Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include <linux/stddef.h>
  20#include <linux/errno.h>
  21#include <linux/gfp.h>
  22#include <linux/pagemap.h>
  23#include <linux/init.h>
  24#include <linux/vmalloc.h>
  25#include <linux/bio.h>
  26#include <linux/sysctl.h>
  27#include <linux/proc_fs.h>
  28#include <linux/workqueue.h>
  29#include <linux/percpu.h>
  30#include <linux/blkdev.h>
  31#include <linux/hash.h>
  32#include <linux/kthread.h>
  33#include <linux/migrate.h>
  34#include <linux/backing-dev.h>
  35#include <linux/freezer.h>
  36#include <linux/sched/mm.h>
  37
  38#include "xfs_format.h"
  39#include "xfs_log_format.h"
  40#include "xfs_trans_resv.h"
  41#include "xfs_sb.h"
 
  42#include "xfs_mount.h"
  43#include "xfs_trace.h"
  44#include "xfs_log.h"
  45#include "xfs_errortag.h"
  46#include "xfs_error.h"
  47
  48static kmem_zone_t *xfs_buf_zone;
  49
 
 
  50#ifdef XFS_BUF_LOCK_TRACKING
  51# define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
  52# define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
  53# define XB_GET_OWNER(bp)	((bp)->b_last_holder)
  54#else
  55# define XB_SET_OWNER(bp)	do { } while (0)
  56# define XB_CLEAR_OWNER(bp)	do { } while (0)
  57# define XB_GET_OWNER(bp)	do { } while (0)
  58#endif
  59
  60#define xb_to_gfp(flags) \
  61	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
  62
  63
  64static inline int
  65xfs_buf_is_vmapped(
  66	struct xfs_buf	*bp)
  67{
  68	/*
  69	 * Return true if the buffer is vmapped.
  70	 *
  71	 * b_addr is null if the buffer is not mapped, but the code is clever
  72	 * enough to know it doesn't have to map a single page, so the check has
  73	 * to be both for b_addr and bp->b_page_count > 1.
  74	 */
  75	return bp->b_addr && bp->b_page_count > 1;
  76}
  77
  78static inline int
  79xfs_buf_vmap_len(
  80	struct xfs_buf	*bp)
  81{
  82	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  83}
  84
  85/*
  86 * Bump the I/O in flight count on the buftarg if we haven't yet done so for
  87 * this buffer. The count is incremented once per buffer (per hold cycle)
  88 * because the corresponding decrement is deferred to buffer release. Buffers
  89 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
  90 * tracking adds unnecessary overhead. This is used for sychronization purposes
  91 * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
  92 * in-flight buffers.
  93 *
  94 * Buffers that are never released (e.g., superblock, iclog buffers) must set
  95 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
  96 * never reaches zero and unmount hangs indefinitely.
  97 */
  98static inline void
  99xfs_buf_ioacct_inc(
 100	struct xfs_buf	*bp)
 101{
 102	if (bp->b_flags & XBF_NO_IOACCT)
 103		return;
 104
 105	ASSERT(bp->b_flags & XBF_ASYNC);
 106	spin_lock(&bp->b_lock);
 107	if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
 108		bp->b_state |= XFS_BSTATE_IN_FLIGHT;
 109		percpu_counter_inc(&bp->b_target->bt_io_count);
 110	}
 111	spin_unlock(&bp->b_lock);
 112}
 113
 114/*
 115 * Clear the in-flight state on a buffer about to be released to the LRU or
 116 * freed and unaccount from the buftarg.
 117 */
 118static inline void
 119__xfs_buf_ioacct_dec(
 120	struct xfs_buf	*bp)
 121{
 122	lockdep_assert_held(&bp->b_lock);
 123
 124	if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
 125		bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
 126		percpu_counter_dec(&bp->b_target->bt_io_count);
 127	}
 128}
 129
 130static inline void
 131xfs_buf_ioacct_dec(
 132	struct xfs_buf	*bp)
 133{
 134	spin_lock(&bp->b_lock);
 135	__xfs_buf_ioacct_dec(bp);
 136	spin_unlock(&bp->b_lock);
 137}
 138
 139/*
 140 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
 141 * b_lru_ref count so that the buffer is freed immediately when the buffer
 142 * reference count falls to zero. If the buffer is already on the LRU, we need
 143 * to remove the reference that LRU holds on the buffer.
 144 *
 145 * This prevents build-up of stale buffers on the LRU.
 146 */
 147void
 148xfs_buf_stale(
 149	struct xfs_buf	*bp)
 150{
 151	ASSERT(xfs_buf_islocked(bp));
 152
 153	bp->b_flags |= XBF_STALE;
 154
 155	/*
 156	 * Clear the delwri status so that a delwri queue walker will not
 157	 * flush this buffer to disk now that it is stale. The delwri queue has
 158	 * a reference to the buffer, so this is safe to do.
 159	 */
 160	bp->b_flags &= ~_XBF_DELWRI_Q;
 161
 162	/*
 163	 * Once the buffer is marked stale and unlocked, a subsequent lookup
 164	 * could reset b_flags. There is no guarantee that the buffer is
 165	 * unaccounted (released to LRU) before that occurs. Drop in-flight
 166	 * status now to preserve accounting consistency.
 167	 */
 168	spin_lock(&bp->b_lock);
 169	__xfs_buf_ioacct_dec(bp);
 170
 171	atomic_set(&bp->b_lru_ref, 0);
 172	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
 173	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
 174		atomic_dec(&bp->b_hold);
 175
 176	ASSERT(atomic_read(&bp->b_hold) >= 1);
 177	spin_unlock(&bp->b_lock);
 178}
 179
 180static int
 181xfs_buf_get_maps(
 182	struct xfs_buf		*bp,
 183	int			map_count)
 184{
 185	ASSERT(bp->b_maps == NULL);
 186	bp->b_map_count = map_count;
 187
 188	if (map_count == 1) {
 189		bp->b_maps = &bp->__b_map;
 190		return 0;
 191	}
 192
 193	bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
 194				KM_NOFS);
 195	if (!bp->b_maps)
 196		return -ENOMEM;
 197	return 0;
 198}
 199
 200/*
 201 *	Frees b_pages if it was allocated.
 202 */
 203static void
 204xfs_buf_free_maps(
 205	struct xfs_buf	*bp)
 206{
 207	if (bp->b_maps != &bp->__b_map) {
 208		kmem_free(bp->b_maps);
 209		bp->b_maps = NULL;
 210	}
 211}
 212
 213struct xfs_buf *
 214_xfs_buf_alloc(
 215	struct xfs_buftarg	*target,
 216	struct xfs_buf_map	*map,
 217	int			nmaps,
 218	xfs_buf_flags_t		flags)
 219{
 220	struct xfs_buf		*bp;
 221	int			error;
 222	int			i;
 223
 224	bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
 225	if (unlikely(!bp))
 226		return NULL;
 227
 228	/*
 229	 * We don't want certain flags to appear in b_flags unless they are
 230	 * specifically set by later operations on the buffer.
 231	 */
 232	flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
 233
 234	atomic_set(&bp->b_hold, 1);
 235	atomic_set(&bp->b_lru_ref, 1);
 236	init_completion(&bp->b_iowait);
 237	INIT_LIST_HEAD(&bp->b_lru);
 238	INIT_LIST_HEAD(&bp->b_list);
 239	INIT_LIST_HEAD(&bp->b_li_list);
 240	sema_init(&bp->b_sema, 0); /* held, no waiters */
 241	spin_lock_init(&bp->b_lock);
 242	XB_SET_OWNER(bp);
 243	bp->b_target = target;
 244	bp->b_flags = flags;
 245
 246	/*
 247	 * Set length and io_length to the same value initially.
 248	 * I/O routines should use io_length, which will be the same in
 249	 * most cases but may be reset (e.g. XFS recovery).
 250	 */
 251	error = xfs_buf_get_maps(bp, nmaps);
 252	if (error)  {
 253		kmem_zone_free(xfs_buf_zone, bp);
 254		return NULL;
 255	}
 256
 257	bp->b_bn = map[0].bm_bn;
 258	bp->b_length = 0;
 259	for (i = 0; i < nmaps; i++) {
 260		bp->b_maps[i].bm_bn = map[i].bm_bn;
 261		bp->b_maps[i].bm_len = map[i].bm_len;
 262		bp->b_length += map[i].bm_len;
 263	}
 264	bp->b_io_length = bp->b_length;
 265
 266	atomic_set(&bp->b_pin_count, 0);
 267	init_waitqueue_head(&bp->b_waiters);
 268
 269	XFS_STATS_INC(target->bt_mount, xb_create);
 270	trace_xfs_buf_init(bp, _RET_IP_);
 271
 272	return bp;
 273}
 274
 275/*
 276 *	Allocate a page array capable of holding a specified number
 277 *	of pages, and point the page buf at it.
 278 */
 279STATIC int
 280_xfs_buf_get_pages(
 281	xfs_buf_t		*bp,
 282	int			page_count)
 
 283{
 284	/* Make sure that we have a page list */
 285	if (bp->b_pages == NULL) {
 286		bp->b_page_count = page_count;
 287		if (page_count <= XB_PAGES) {
 288			bp->b_pages = bp->b_page_array;
 289		} else {
 290			bp->b_pages = kmem_alloc(sizeof(struct page *) *
 291						 page_count, KM_NOFS);
 292			if (bp->b_pages == NULL)
 293				return -ENOMEM;
 294		}
 295		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
 296	}
 297	return 0;
 298}
 299
 300/*
 301 *	Frees b_pages if it was allocated.
 302 */
 303STATIC void
 304_xfs_buf_free_pages(
 305	xfs_buf_t	*bp)
 306{
 307	if (bp->b_pages != bp->b_page_array) {
 308		kmem_free(bp->b_pages);
 309		bp->b_pages = NULL;
 310	}
 311}
 312
 313/*
 314 *	Releases the specified buffer.
 315 *
 316 * 	The modification state of any associated pages is left unchanged.
 317 * 	The buffer must not be on any hash - use xfs_buf_rele instead for
 318 * 	hashed and refcounted buffers
 319 */
 320void
 321xfs_buf_free(
 322	xfs_buf_t		*bp)
 323{
 324	trace_xfs_buf_free(bp, _RET_IP_);
 325
 326	ASSERT(list_empty(&bp->b_lru));
 327
 328	if (bp->b_flags & _XBF_PAGES) {
 329		uint		i;
 330
 331		if (xfs_buf_is_vmapped(bp))
 332			vm_unmap_ram(bp->b_addr - bp->b_offset,
 333					bp->b_page_count);
 334
 335		for (i = 0; i < bp->b_page_count; i++) {
 336			struct page	*page = bp->b_pages[i];
 337
 338			__free_page(page);
 339		}
 340	} else if (bp->b_flags & _XBF_KMEM)
 341		kmem_free(bp->b_addr);
 342	_xfs_buf_free_pages(bp);
 343	xfs_buf_free_maps(bp);
 344	kmem_zone_free(xfs_buf_zone, bp);
 345}
 346
 347/*
 348 * Allocates all the pages for buffer in question and builds it's page list.
 349 */
 350STATIC int
 351xfs_buf_allocate_memory(
 352	xfs_buf_t		*bp,
 353	uint			flags)
 354{
 355	size_t			size;
 356	size_t			nbytes, offset;
 357	gfp_t			gfp_mask = xb_to_gfp(flags);
 358	unsigned short		page_count, i;
 359	xfs_off_t		start, end;
 360	int			error;
 361
 362	/*
 363	 * for buffers that are contained within a single page, just allocate
 364	 * the memory from the heap - there's no need for the complexity of
 365	 * page arrays to keep allocation down to order 0.
 366	 */
 367	size = BBTOB(bp->b_length);
 368	if (size < PAGE_SIZE) {
 369		bp->b_addr = kmem_alloc(size, KM_NOFS);
 370		if (!bp->b_addr) {
 371			/* low memory - use alloc_page loop instead */
 372			goto use_alloc_page;
 373		}
 374
 375		if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
 376		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
 377			/* b_addr spans two pages - use alloc_page instead */
 378			kmem_free(bp->b_addr);
 379			bp->b_addr = NULL;
 380			goto use_alloc_page;
 381		}
 382		bp->b_offset = offset_in_page(bp->b_addr);
 383		bp->b_pages = bp->b_page_array;
 384		bp->b_pages[0] = virt_to_page(bp->b_addr);
 385		bp->b_page_count = 1;
 386		bp->b_flags |= _XBF_KMEM;
 387		return 0;
 388	}
 389
 390use_alloc_page:
 391	start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
 392	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
 393								>> PAGE_SHIFT;
 394	page_count = end - start;
 395	error = _xfs_buf_get_pages(bp, page_count);
 396	if (unlikely(error))
 397		return error;
 398
 399	offset = bp->b_offset;
 400	bp->b_flags |= _XBF_PAGES;
 401
 402	for (i = 0; i < bp->b_page_count; i++) {
 403		struct page	*page;
 404		uint		retries = 0;
 405retry:
 406		page = alloc_page(gfp_mask);
 407		if (unlikely(page == NULL)) {
 408			if (flags & XBF_READ_AHEAD) {
 409				bp->b_page_count = i;
 410				error = -ENOMEM;
 411				goto out_free_pages;
 412			}
 413
 414			/*
 415			 * This could deadlock.
 416			 *
 417			 * But until all the XFS lowlevel code is revamped to
 418			 * handle buffer allocation failures we can't do much.
 419			 */
 420			if (!(++retries % 100))
 421				xfs_err(NULL,
 422		"%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
 423					current->comm, current->pid,
 424					__func__, gfp_mask);
 425
 426			XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries);
 427			congestion_wait(BLK_RW_ASYNC, HZ/50);
 428			goto retry;
 429		}
 430
 431		XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found);
 432
 433		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
 434		size -= nbytes;
 435		bp->b_pages[i] = page;
 436		offset = 0;
 437	}
 438	return 0;
 439
 440out_free_pages:
 441	for (i = 0; i < bp->b_page_count; i++)
 442		__free_page(bp->b_pages[i]);
 443	bp->b_flags &= ~_XBF_PAGES;
 444	return error;
 445}
 446
 447/*
 448 *	Map buffer into kernel address-space if necessary.
 449 */
 450STATIC int
 451_xfs_buf_map_pages(
 452	xfs_buf_t		*bp,
 453	uint			flags)
 454{
 455	ASSERT(bp->b_flags & _XBF_PAGES);
 456	if (bp->b_page_count == 1) {
 457		/* A single page buffer is always mappable */
 458		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
 459	} else if (flags & XBF_UNMAPPED) {
 460		bp->b_addr = NULL;
 461	} else {
 462		int retried = 0;
 463		unsigned nofs_flag;
 464
 465		/*
 466		 * vm_map_ram() will allocate auxillary structures (e.g.
 467		 * pagetables) with GFP_KERNEL, yet we are likely to be under
 468		 * GFP_NOFS context here. Hence we need to tell memory reclaim
 469		 * that we are in such a context via PF_MEMALLOC_NOFS to prevent
 470		 * memory reclaim re-entering the filesystem here and
 471		 * potentially deadlocking.
 472		 */
 473		nofs_flag = memalloc_nofs_save();
 474		do {
 475			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
 476						-1, PAGE_KERNEL);
 477			if (bp->b_addr)
 478				break;
 479			vm_unmap_aliases();
 480		} while (retried++ <= 1);
 481		memalloc_nofs_restore(nofs_flag);
 482
 483		if (!bp->b_addr)
 484			return -ENOMEM;
 485		bp->b_addr += bp->b_offset;
 486	}
 487
 488	return 0;
 489}
 490
 491/*
 492 *	Finding and Reading Buffers
 493 */
 494static int
 495_xfs_buf_obj_cmp(
 496	struct rhashtable_compare_arg	*arg,
 497	const void			*obj)
 498{
 499	const struct xfs_buf_map	*map = arg->key;
 500	const struct xfs_buf		*bp = obj;
 501
 502	/*
 503	 * The key hashing in the lookup path depends on the key being the
 504	 * first element of the compare_arg, make sure to assert this.
 505	 */
 506	BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
 507
 508	if (bp->b_bn != map->bm_bn)
 509		return 1;
 510
 511	if (unlikely(bp->b_length != map->bm_len)) {
 512		/*
 513		 * found a block number match. If the range doesn't
 514		 * match, the only way this is allowed is if the buffer
 515		 * in the cache is stale and the transaction that made
 516		 * it stale has not yet committed. i.e. we are
 517		 * reallocating a busy extent. Skip this buffer and
 518		 * continue searching for an exact match.
 519		 */
 520		ASSERT(bp->b_flags & XBF_STALE);
 521		return 1;
 522	}
 523	return 0;
 524}
 525
 526static const struct rhashtable_params xfs_buf_hash_params = {
 527	.min_size		= 32,	/* empty AGs have minimal footprint */
 528	.nelem_hint		= 16,
 529	.key_len		= sizeof(xfs_daddr_t),
 530	.key_offset		= offsetof(struct xfs_buf, b_bn),
 531	.head_offset		= offsetof(struct xfs_buf, b_rhash_head),
 532	.automatic_shrinking	= true,
 533	.obj_cmpfn		= _xfs_buf_obj_cmp,
 534};
 535
 536int
 537xfs_buf_hash_init(
 538	struct xfs_perag	*pag)
 539{
 540	spin_lock_init(&pag->pag_buf_lock);
 541	return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params);
 542}
 543
 544void
 545xfs_buf_hash_destroy(
 546	struct xfs_perag	*pag)
 547{
 548	rhashtable_destroy(&pag->pag_buf_hash);
 549}
 550
 551/*
 552 *	Look up, and creates if absent, a lockable buffer for
 553 *	a given range of an inode.  The buffer is returned
 554 *	locked.	No I/O is implied by this call.
 555 */
 556xfs_buf_t *
 557_xfs_buf_find(
 558	struct xfs_buftarg	*btp,
 559	struct xfs_buf_map	*map,
 560	int			nmaps,
 561	xfs_buf_flags_t		flags,
 562	xfs_buf_t		*new_bp)
 563{
 
 564	struct xfs_perag	*pag;
 
 
 565	xfs_buf_t		*bp;
 566	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
 567	xfs_daddr_t		eofs;
 
 568	int			i;
 569
 570	for (i = 0; i < nmaps; i++)
 571		cmap.bm_len += map[i].bm_len;
 
 572
 573	/* Check for IOs smaller than the sector size / not sector aligned */
 574	ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
 575	ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
 576
 577	/*
 578	 * Corrupted block numbers can get through to here, unfortunately, so we
 579	 * have to check that the buffer falls within the filesystem bounds.
 580	 */
 581	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
 582	if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
 583		/*
 584		 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
 585		 * but none of the higher level infrastructure supports
 586		 * returning a specific error on buffer lookup failures.
 587		 */
 588		xfs_alert(btp->bt_mount,
 589			  "%s: daddr 0x%llx out of range, EOFS 0x%llx",
 590			  __func__, cmap.bm_bn, eofs);
 591		WARN_ON(1);
 592		return NULL;
 593	}
 594
 
 595	pag = xfs_perag_get(btp->bt_mount,
 596			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
 597
 
 598	spin_lock(&pag->pag_buf_lock);
 599	bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
 600				    xfs_buf_hash_params);
 601	if (bp) {
 602		atomic_inc(&bp->b_hold);
 603		goto found;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604	}
 605
 606	/* No match found */
 607	if (new_bp) {
 
 
 608		/* the buffer keeps the perag reference until it is freed */
 609		new_bp->b_pag = pag;
 610		rhashtable_insert_fast(&pag->pag_buf_hash,
 611				       &new_bp->b_rhash_head,
 612				       xfs_buf_hash_params);
 613		spin_unlock(&pag->pag_buf_lock);
 614	} else {
 615		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
 616		spin_unlock(&pag->pag_buf_lock);
 617		xfs_perag_put(pag);
 618	}
 619	return new_bp;
 620
 621found:
 622	spin_unlock(&pag->pag_buf_lock);
 623	xfs_perag_put(pag);
 624
 625	if (!xfs_buf_trylock(bp)) {
 626		if (flags & XBF_TRYLOCK) {
 627			xfs_buf_rele(bp);
 628			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
 629			return NULL;
 630		}
 631		xfs_buf_lock(bp);
 632		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
 633	}
 634
 635	/*
 636	 * if the buffer is stale, clear all the external state associated with
 637	 * it. We need to keep flags such as how we allocated the buffer memory
 638	 * intact here.
 639	 */
 640	if (bp->b_flags & XBF_STALE) {
 641		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
 642		ASSERT(bp->b_iodone == NULL);
 643		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
 644		bp->b_ops = NULL;
 645	}
 646
 647	trace_xfs_buf_find(bp, flags, _RET_IP_);
 648	XFS_STATS_INC(btp->bt_mount, xb_get_locked);
 649	return bp;
 650}
 651
 652/*
 653 * Assembles a buffer covering the specified range. The code is optimised for
 654 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
 655 * more hits than misses.
 656 */
 657struct xfs_buf *
 658xfs_buf_get_map(
 659	struct xfs_buftarg	*target,
 660	struct xfs_buf_map	*map,
 661	int			nmaps,
 662	xfs_buf_flags_t		flags)
 663{
 664	struct xfs_buf		*bp;
 665	struct xfs_buf		*new_bp;
 666	int			error = 0;
 667
 668	bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
 669	if (likely(bp))
 670		goto found;
 671
 672	new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
 673	if (unlikely(!new_bp))
 674		return NULL;
 675
 676	error = xfs_buf_allocate_memory(new_bp, flags);
 677	if (error) {
 678		xfs_buf_free(new_bp);
 679		return NULL;
 680	}
 681
 682	bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
 683	if (!bp) {
 684		xfs_buf_free(new_bp);
 685		return NULL;
 686	}
 687
 688	if (bp != new_bp)
 689		xfs_buf_free(new_bp);
 690
 691found:
 692	if (!bp->b_addr) {
 693		error = _xfs_buf_map_pages(bp, flags);
 694		if (unlikely(error)) {
 695			xfs_warn(target->bt_mount,
 696				"%s: failed to map pagesn", __func__);
 697			xfs_buf_relse(bp);
 698			return NULL;
 699		}
 700	}
 701
 702	/*
 703	 * Clear b_error if this is a lookup from a caller that doesn't expect
 704	 * valid data to be found in the buffer.
 705	 */
 706	if (!(flags & XBF_READ))
 707		xfs_buf_ioerror(bp, 0);
 708
 709	XFS_STATS_INC(target->bt_mount, xb_get);
 710	trace_xfs_buf_get(bp, flags, _RET_IP_);
 711	return bp;
 712}
 713
 714STATIC int
 715_xfs_buf_read(
 716	xfs_buf_t		*bp,
 717	xfs_buf_flags_t		flags)
 718{
 719	ASSERT(!(flags & XBF_WRITE));
 720	ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
 721
 722	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
 723	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
 724
 725	if (flags & XBF_ASYNC) {
 726		xfs_buf_submit(bp);
 727		return 0;
 728	}
 729	return xfs_buf_submit_wait(bp);
 730}
 731
 732xfs_buf_t *
 733xfs_buf_read_map(
 734	struct xfs_buftarg	*target,
 735	struct xfs_buf_map	*map,
 736	int			nmaps,
 737	xfs_buf_flags_t		flags,
 738	const struct xfs_buf_ops *ops)
 739{
 740	struct xfs_buf		*bp;
 741
 742	flags |= XBF_READ;
 743
 744	bp = xfs_buf_get_map(target, map, nmaps, flags);
 745	if (bp) {
 746		trace_xfs_buf_read(bp, flags, _RET_IP_);
 747
 748		if (!(bp->b_flags & XBF_DONE)) {
 749			XFS_STATS_INC(target->bt_mount, xb_get_read);
 750			bp->b_ops = ops;
 751			_xfs_buf_read(bp, flags);
 752		} else if (flags & XBF_ASYNC) {
 753			/*
 754			 * Read ahead call which is already satisfied,
 755			 * drop the buffer
 756			 */
 757			xfs_buf_relse(bp);
 758			return NULL;
 759		} else {
 760			/* We do not want read in the flags */
 761			bp->b_flags &= ~XBF_READ;
 762		}
 763	}
 764
 765	return bp;
 766}
 767
 768/*
 769 *	If we are not low on memory then do the readahead in a deadlock
 770 *	safe manner.
 771 */
 772void
 773xfs_buf_readahead_map(
 774	struct xfs_buftarg	*target,
 775	struct xfs_buf_map	*map,
 776	int			nmaps,
 777	const struct xfs_buf_ops *ops)
 778{
 779	if (bdi_read_congested(target->bt_bdev->bd_bdi))
 780		return;
 781
 782	xfs_buf_read_map(target, map, nmaps,
 783		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
 784}
 785
 786/*
 787 * Read an uncached buffer from disk. Allocates and returns a locked
 788 * buffer containing the disk contents or nothing.
 789 */
 790int
 791xfs_buf_read_uncached(
 792	struct xfs_buftarg	*target,
 793	xfs_daddr_t		daddr,
 794	size_t			numblks,
 795	int			flags,
 796	struct xfs_buf		**bpp,
 797	const struct xfs_buf_ops *ops)
 798{
 799	struct xfs_buf		*bp;
 800
 801	*bpp = NULL;
 802
 803	bp = xfs_buf_get_uncached(target, numblks, flags);
 804	if (!bp)
 805		return -ENOMEM;
 806
 807	/* set up the buffer for a read IO */
 808	ASSERT(bp->b_map_count == 1);
 809	bp->b_bn = XFS_BUF_DADDR_NULL;  /* always null for uncached buffers */
 810	bp->b_maps[0].bm_bn = daddr;
 811	bp->b_flags |= XBF_READ;
 812	bp->b_ops = ops;
 813
 814	xfs_buf_submit_wait(bp);
 815	if (bp->b_error) {
 816		int	error = bp->b_error;
 817		xfs_buf_relse(bp);
 818		return error;
 819	}
 820
 821	*bpp = bp;
 822	return 0;
 823}
 824
 825/*
 826 * Return a buffer allocated as an empty buffer and associated to external
 827 * memory via xfs_buf_associate_memory() back to it's empty state.
 828 */
 829void
 830xfs_buf_set_empty(
 831	struct xfs_buf		*bp,
 832	size_t			numblks)
 833{
 834	if (bp->b_pages)
 835		_xfs_buf_free_pages(bp);
 836
 837	bp->b_pages = NULL;
 838	bp->b_page_count = 0;
 839	bp->b_addr = NULL;
 840	bp->b_length = numblks;
 841	bp->b_io_length = numblks;
 842
 843	ASSERT(bp->b_map_count == 1);
 844	bp->b_bn = XFS_BUF_DADDR_NULL;
 845	bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
 846	bp->b_maps[0].bm_len = bp->b_length;
 847}
 848
 849static inline struct page *
 850mem_to_page(
 851	void			*addr)
 852{
 853	if ((!is_vmalloc_addr(addr))) {
 854		return virt_to_page(addr);
 855	} else {
 856		return vmalloc_to_page(addr);
 857	}
 858}
 859
 860int
 861xfs_buf_associate_memory(
 862	xfs_buf_t		*bp,
 863	void			*mem,
 864	size_t			len)
 865{
 866	int			rval;
 867	int			i = 0;
 868	unsigned long		pageaddr;
 869	unsigned long		offset;
 870	size_t			buflen;
 871	int			page_count;
 872
 873	pageaddr = (unsigned long)mem & PAGE_MASK;
 874	offset = (unsigned long)mem - pageaddr;
 875	buflen = PAGE_ALIGN(len + offset);
 876	page_count = buflen >> PAGE_SHIFT;
 877
 878	/* Free any previous set of page pointers */
 879	if (bp->b_pages)
 880		_xfs_buf_free_pages(bp);
 881
 882	bp->b_pages = NULL;
 883	bp->b_addr = mem;
 884
 885	rval = _xfs_buf_get_pages(bp, page_count);
 886	if (rval)
 887		return rval;
 888
 889	bp->b_offset = offset;
 890
 891	for (i = 0; i < bp->b_page_count; i++) {
 892		bp->b_pages[i] = mem_to_page((void *)pageaddr);
 893		pageaddr += PAGE_SIZE;
 894	}
 895
 896	bp->b_io_length = BTOBB(len);
 897	bp->b_length = BTOBB(buflen);
 898
 899	return 0;
 900}
 901
 902xfs_buf_t *
 903xfs_buf_get_uncached(
 904	struct xfs_buftarg	*target,
 905	size_t			numblks,
 906	int			flags)
 907{
 908	unsigned long		page_count;
 909	int			error, i;
 910	struct xfs_buf		*bp;
 911	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
 912
 913	/* flags might contain irrelevant bits, pass only what we care about */
 914	bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
 915	if (unlikely(bp == NULL))
 916		goto fail;
 917
 918	page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
 919	error = _xfs_buf_get_pages(bp, page_count);
 920	if (error)
 921		goto fail_free_buf;
 922
 923	for (i = 0; i < page_count; i++) {
 924		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
 925		if (!bp->b_pages[i])
 926			goto fail_free_mem;
 927	}
 928	bp->b_flags |= _XBF_PAGES;
 929
 930	error = _xfs_buf_map_pages(bp, 0);
 931	if (unlikely(error)) {
 932		xfs_warn(target->bt_mount,
 933			"%s: failed to map pages", __func__);
 934		goto fail_free_mem;
 935	}
 936
 937	trace_xfs_buf_get_uncached(bp, _RET_IP_);
 938	return bp;
 939
 940 fail_free_mem:
 941	while (--i >= 0)
 942		__free_page(bp->b_pages[i]);
 943	_xfs_buf_free_pages(bp);
 944 fail_free_buf:
 945	xfs_buf_free_maps(bp);
 946	kmem_zone_free(xfs_buf_zone, bp);
 947 fail:
 948	return NULL;
 949}
 950
 951/*
 952 *	Increment reference count on buffer, to hold the buffer concurrently
 953 *	with another thread which may release (free) the buffer asynchronously.
 954 *	Must hold the buffer already to call this function.
 955 */
 956void
 957xfs_buf_hold(
 958	xfs_buf_t		*bp)
 959{
 960	trace_xfs_buf_hold(bp, _RET_IP_);
 961	atomic_inc(&bp->b_hold);
 962}
 963
 964/*
 965 * Release a hold on the specified buffer. If the hold count is 1, the buffer is
 966 * placed on LRU or freed (depending on b_lru_ref).
 967 */
 968void
 969xfs_buf_rele(
 970	xfs_buf_t		*bp)
 971{
 972	struct xfs_perag	*pag = bp->b_pag;
 973	bool			release;
 974	bool			freebuf = false;
 975
 976	trace_xfs_buf_rele(bp, _RET_IP_);
 977
 978	if (!pag) {
 979		ASSERT(list_empty(&bp->b_lru));
 980		if (atomic_dec_and_test(&bp->b_hold)) {
 981			xfs_buf_ioacct_dec(bp);
 982			xfs_buf_free(bp);
 983		}
 984		return;
 985	}
 986
 987	ASSERT(atomic_read(&bp->b_hold) > 0);
 988
 989	release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
 990	spin_lock(&bp->b_lock);
 991	if (!release) {
 992		/*
 993		 * Drop the in-flight state if the buffer is already on the LRU
 994		 * and it holds the only reference. This is racy because we
 995		 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
 996		 * ensures the decrement occurs only once per-buf.
 997		 */
 998		if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
 999			__xfs_buf_ioacct_dec(bp);
1000		goto out_unlock;
1001	}
1002
1003	/* the last reference has been dropped ... */
1004	__xfs_buf_ioacct_dec(bp);
1005	if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
1006		/*
1007		 * If the buffer is added to the LRU take a new reference to the
1008		 * buffer for the LRU and clear the (now stale) dispose list
1009		 * state flag
1010		 */
1011		if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
1012			bp->b_state &= ~XFS_BSTATE_DISPOSE;
1013			atomic_inc(&bp->b_hold);
1014		}
1015		spin_unlock(&pag->pag_buf_lock);
1016	} else {
1017		/*
1018		 * most of the time buffers will already be removed from the
1019		 * LRU, so optimise that case by checking for the
1020		 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
1021		 * was on was the disposal list
1022		 */
1023		if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
1024			list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
1025		} else {
1026			ASSERT(list_empty(&bp->b_lru));
1027		}
 
 
 
 
 
 
 
 
 
 
1028
1029		ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1030		rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head,
1031				       xfs_buf_hash_params);
1032		spin_unlock(&pag->pag_buf_lock);
1033		xfs_perag_put(pag);
1034		freebuf = true;
1035	}
1036
1037out_unlock:
1038	spin_unlock(&bp->b_lock);
1039
1040	if (freebuf)
1041		xfs_buf_free(bp);
1042}
1043
1044
1045/*
1046 *	Lock a buffer object, if it is not already locked.
1047 *
1048 *	If we come across a stale, pinned, locked buffer, we know that we are
1049 *	being asked to lock a buffer that has been reallocated. Because it is
1050 *	pinned, we know that the log has not been pushed to disk and hence it
1051 *	will still be locked.  Rather than continuing to have trylock attempts
1052 *	fail until someone else pushes the log, push it ourselves before
1053 *	returning.  This means that the xfsaild will not get stuck trying
1054 *	to push on stale inode buffers.
1055 */
1056int
1057xfs_buf_trylock(
1058	struct xfs_buf		*bp)
1059{
1060	int			locked;
1061
1062	locked = down_trylock(&bp->b_sema) == 0;
1063	if (locked) {
1064		XB_SET_OWNER(bp);
1065		trace_xfs_buf_trylock(bp, _RET_IP_);
1066	} else {
1067		trace_xfs_buf_trylock_fail(bp, _RET_IP_);
1068	}
1069	return locked;
1070}
1071
1072/*
1073 *	Lock a buffer object.
1074 *
1075 *	If we come across a stale, pinned, locked buffer, we know that we
1076 *	are being asked to lock a buffer that has been reallocated. Because
1077 *	it is pinned, we know that the log has not been pushed to disk and
1078 *	hence it will still be locked. Rather than sleeping until someone
1079 *	else pushes the log, push it ourselves before trying to get the lock.
1080 */
1081void
1082xfs_buf_lock(
1083	struct xfs_buf		*bp)
1084{
1085	trace_xfs_buf_lock(bp, _RET_IP_);
1086
1087	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1088		xfs_log_force(bp->b_target->bt_mount, 0);
1089	down(&bp->b_sema);
1090	XB_SET_OWNER(bp);
1091
1092	trace_xfs_buf_lock_done(bp, _RET_IP_);
1093}
1094
1095void
1096xfs_buf_unlock(
1097	struct xfs_buf		*bp)
1098{
1099	ASSERT(xfs_buf_islocked(bp));
1100
1101	XB_CLEAR_OWNER(bp);
1102	up(&bp->b_sema);
1103
1104	trace_xfs_buf_unlock(bp, _RET_IP_);
1105}
1106
1107STATIC void
1108xfs_buf_wait_unpin(
1109	xfs_buf_t		*bp)
1110{
1111	DECLARE_WAITQUEUE	(wait, current);
1112
1113	if (atomic_read(&bp->b_pin_count) == 0)
1114		return;
1115
1116	add_wait_queue(&bp->b_waiters, &wait);
1117	for (;;) {
1118		set_current_state(TASK_UNINTERRUPTIBLE);
1119		if (atomic_read(&bp->b_pin_count) == 0)
1120			break;
1121		io_schedule();
1122	}
1123	remove_wait_queue(&bp->b_waiters, &wait);
1124	set_current_state(TASK_RUNNING);
1125}
1126
1127/*
1128 *	Buffer Utility Routines
1129 */
1130
1131void
1132xfs_buf_ioend(
1133	struct xfs_buf	*bp)
1134{
1135	bool		read = bp->b_flags & XBF_READ;
1136
1137	trace_xfs_buf_iodone(bp, _RET_IP_);
1138
1139	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1140
1141	/*
1142	 * Pull in IO completion errors now. We are guaranteed to be running
1143	 * single threaded, so we don't need the lock to read b_io_error.
1144	 */
1145	if (!bp->b_error && bp->b_io_error)
1146		xfs_buf_ioerror(bp, bp->b_io_error);
1147
1148	/* Only validate buffers that were read without errors */
1149	if (read && !bp->b_error && bp->b_ops) {
1150		ASSERT(!bp->b_iodone);
1151		bp->b_ops->verify_read(bp);
1152	}
1153
1154	if (!bp->b_error)
1155		bp->b_flags |= XBF_DONE;
1156
1157	if (bp->b_iodone)
1158		(*(bp->b_iodone))(bp);
1159	else if (bp->b_flags & XBF_ASYNC)
1160		xfs_buf_relse(bp);
1161	else
 
1162		complete(&bp->b_iowait);
 
1163}
1164
1165static void
1166xfs_buf_ioend_work(
1167	struct work_struct	*work)
 
1168{
1169	struct xfs_buf		*bp =
1170		container_of(work, xfs_buf_t, b_ioend_work);
1171
1172	xfs_buf_ioend(bp);
1173}
1174
1175static void
1176xfs_buf_ioend_async(
1177	struct xfs_buf	*bp)
1178{
1179	INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1180	queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
 
 
 
 
 
 
 
 
1181}
1182
1183void
1184__xfs_buf_ioerror(
1185	xfs_buf_t		*bp,
1186	int			error,
1187	xfs_failaddr_t		failaddr)
1188{
1189	ASSERT(error <= 0 && error >= -1000);
1190	bp->b_error = error;
1191	trace_xfs_buf_ioerror(bp, error, failaddr);
1192}
1193
1194void
1195xfs_buf_ioerror_alert(
1196	struct xfs_buf		*bp,
1197	const char		*func)
1198{
1199	xfs_alert(bp->b_target->bt_mount,
1200"metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d",
1201			func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
1202			-bp->b_error);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1203}
1204
1205int
1206xfs_bwrite(
1207	struct xfs_buf		*bp)
1208{
1209	int			error;
1210
1211	ASSERT(xfs_buf_islocked(bp));
1212
1213	bp->b_flags |= XBF_WRITE;
1214	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1215			 XBF_WRITE_FAIL | XBF_DONE);
 
1216
1217	error = xfs_buf_submit_wait(bp);
1218	if (error) {
1219		xfs_force_shutdown(bp->b_target->bt_mount,
1220				   SHUTDOWN_META_IO_ERROR);
1221	}
1222	return error;
1223}
1224
1225static void
 
 
 
 
 
 
 
 
 
1226xfs_buf_bio_end_io(
1227	struct bio		*bio)
 
1228{
1229	struct xfs_buf		*bp = (struct xfs_buf *)bio->bi_private;
1230
1231	/*
1232	 * don't overwrite existing errors - otherwise we can lose errors on
1233	 * buffers that require multiple bios to complete.
1234	 */
1235	if (bio->bi_status) {
1236		int error = blk_status_to_errno(bio->bi_status);
1237
1238		cmpxchg(&bp->b_io_error, 0, error);
1239	}
1240
1241	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1242		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1243
1244	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1245		xfs_buf_ioend_async(bp);
1246	bio_put(bio);
1247}
1248
1249static void
1250xfs_buf_ioapply_map(
1251	struct xfs_buf	*bp,
1252	int		map,
1253	int		*buf_offset,
1254	int		*count,
1255	int		op,
1256	int		op_flags)
1257{
1258	int		page_index;
1259	int		total_nr_pages = bp->b_page_count;
1260	int		nr_pages;
1261	struct bio	*bio;
1262	sector_t	sector =  bp->b_maps[map].bm_bn;
1263	int		size;
1264	int		offset;
1265
 
 
1266	/* skip the pages in the buffer before the start offset */
1267	page_index = 0;
1268	offset = *buf_offset;
1269	while (offset >= PAGE_SIZE) {
1270		page_index++;
1271		offset -= PAGE_SIZE;
1272	}
1273
1274	/*
1275	 * Limit the IO size to the length of the current vector, and update the
1276	 * remaining IO count for the next time around.
1277	 */
1278	size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1279	*count -= size;
1280	*buf_offset += size;
1281
1282next_chunk:
1283	atomic_inc(&bp->b_io_remaining);
1284	nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
 
 
1285
1286	bio = bio_alloc(GFP_NOIO, nr_pages);
1287	bio_set_dev(bio, bp->b_target->bt_bdev);
1288	bio->bi_iter.bi_sector = sector;
1289	bio->bi_end_io = xfs_buf_bio_end_io;
1290	bio->bi_private = bp;
1291	bio_set_op_attrs(bio, op, op_flags);
1292
1293	for (; size && nr_pages; nr_pages--, page_index++) {
1294		int	rbytes, nbytes = PAGE_SIZE - offset;
1295
1296		if (nbytes > size)
1297			nbytes = size;
1298
1299		rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1300				      offset);
1301		if (rbytes < nbytes)
1302			break;
1303
1304		offset = 0;
1305		sector += BTOBB(nbytes);
1306		size -= nbytes;
1307		total_nr_pages--;
1308	}
1309
1310	if (likely(bio->bi_iter.bi_size)) {
1311		if (xfs_buf_is_vmapped(bp)) {
1312			flush_kernel_vmap_range(bp->b_addr,
1313						xfs_buf_vmap_len(bp));
1314		}
1315		submit_bio(bio);
1316		if (size)
1317			goto next_chunk;
1318	} else {
1319		/*
1320		 * This is guaranteed not to be the last io reference count
1321		 * because the caller (xfs_buf_submit) holds a count itself.
1322		 */
1323		atomic_dec(&bp->b_io_remaining);
1324		xfs_buf_ioerror(bp, -EIO);
1325		bio_put(bio);
1326	}
1327
1328}
1329
1330STATIC void
1331_xfs_buf_ioapply(
1332	struct xfs_buf	*bp)
1333{
1334	struct blk_plug	plug;
1335	int		op;
1336	int		op_flags = 0;
1337	int		offset;
1338	int		size;
1339	int		i;
1340
1341	/*
1342	 * Make sure we capture only current IO errors rather than stale errors
1343	 * left over from previous use of the buffer (e.g. failed readahead).
1344	 */
1345	bp->b_error = 0;
1346
1347	/*
1348	 * Initialize the I/O completion workqueue if we haven't yet or the
1349	 * submitter has not opted to specify a custom one.
1350	 */
1351	if (!bp->b_ioend_wq)
1352		bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
1353
1354	if (bp->b_flags & XBF_WRITE) {
1355		op = REQ_OP_WRITE;
1356		if (bp->b_flags & XBF_SYNCIO)
1357			op_flags = REQ_SYNC;
 
 
1358		if (bp->b_flags & XBF_FUA)
1359			op_flags |= REQ_FUA;
1360		if (bp->b_flags & XBF_FLUSH)
1361			op_flags |= REQ_PREFLUSH;
1362
1363		/*
1364		 * Run the write verifier callback function if it exists. If
1365		 * this function fails it will mark the buffer with an error and
1366		 * the IO should not be dispatched.
1367		 */
1368		if (bp->b_ops) {
1369			bp->b_ops->verify_write(bp);
1370			if (bp->b_error) {
1371				xfs_force_shutdown(bp->b_target->bt_mount,
1372						   SHUTDOWN_CORRUPT_INCORE);
1373				return;
1374			}
1375		} else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
1376			struct xfs_mount *mp = bp->b_target->bt_mount;
1377
1378			/*
1379			 * non-crc filesystems don't attach verifiers during
1380			 * log recovery, so don't warn for such filesystems.
1381			 */
1382			if (xfs_sb_version_hascrc(&mp->m_sb)) {
1383				xfs_warn(mp,
1384					"%s: no buf ops on daddr 0x%llx len %d",
1385					__func__, bp->b_bn, bp->b_length);
1386				xfs_hex_dump(bp->b_addr,
1387						XFS_CORRUPTION_DUMP_LEN);
1388				dump_stack();
1389			}
1390		}
1391	} else if (bp->b_flags & XBF_READ_AHEAD) {
1392		op = REQ_OP_READ;
1393		op_flags = REQ_RAHEAD;
1394	} else {
1395		op = REQ_OP_READ;
1396	}
1397
1398	/* we only use the buffer cache for meta-data */
1399	op_flags |= REQ_META;
1400
1401	/*
1402	 * Walk all the vectors issuing IO on them. Set up the initial offset
1403	 * into the buffer and the desired IO size before we start -
1404	 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1405	 * subsequent call.
1406	 */
1407	offset = bp->b_offset;
1408	size = BBTOB(bp->b_io_length);
1409	blk_start_plug(&plug);
1410	for (i = 0; i < bp->b_map_count; i++) {
1411		xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
1412		if (bp->b_error)
1413			break;
1414		if (size <= 0)
1415			break;	/* all done */
1416	}
1417	blk_finish_plug(&plug);
1418}
1419
1420/*
1421 * Asynchronous IO submission path. This transfers the buffer lock ownership and
1422 * the current reference to the IO. It is not safe to reference the buffer after
1423 * a call to this function unless the caller holds an additional reference
1424 * itself.
1425 */
1426void
1427xfs_buf_submit(
1428	struct xfs_buf	*bp)
1429{
1430	trace_xfs_buf_submit(bp, _RET_IP_);
1431
1432	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1433	ASSERT(bp->b_flags & XBF_ASYNC);
1434
1435	/* on shutdown we stale and complete the buffer immediately */
1436	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1437		xfs_buf_ioerror(bp, -EIO);
1438		bp->b_flags &= ~XBF_DONE;
1439		xfs_buf_stale(bp);
1440		xfs_buf_ioend(bp);
1441		return;
1442	}
1443
1444	if (bp->b_flags & XBF_WRITE)
1445		xfs_buf_wait_unpin(bp);
1446
1447	/* clear the internal error state to avoid spurious errors */
1448	bp->b_io_error = 0;
1449
1450	/*
1451	 * The caller's reference is released during I/O completion.
1452	 * This occurs some time after the last b_io_remaining reference is
1453	 * released, so after we drop our Io reference we have to have some
1454	 * other reference to ensure the buffer doesn't go away from underneath
1455	 * us. Take a direct reference to ensure we have safe access to the
1456	 * buffer until we are finished with it.
1457	 */
1458	xfs_buf_hold(bp);
1459
1460	/*
1461	 * Set the count to 1 initially, this will stop an I/O completion
1462	 * callout which happens before we have started all the I/O from calling
1463	 * xfs_buf_ioend too early.
1464	 */
1465	atomic_set(&bp->b_io_remaining, 1);
1466	xfs_buf_ioacct_inc(bp);
1467	_xfs_buf_ioapply(bp);
1468
1469	/*
1470	 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1471	 * reference we took above. If we drop it to zero, run completion so
1472	 * that we don't return to the caller with completion still pending.
 
1473	 */
1474	if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1475		if (bp->b_error)
1476			xfs_buf_ioend(bp);
1477		else
1478			xfs_buf_ioend_async(bp);
1479	}
1480
1481	xfs_buf_rele(bp);
1482	/* Note: it is not safe to reference bp now we've dropped our ref */
1483}
1484
1485/*
1486 * Synchronous buffer IO submission path, read or write.
 
 
 
1487 */
1488int
1489xfs_buf_submit_wait(
1490	struct xfs_buf	*bp)
1491{
1492	int		error;
1493
1494	trace_xfs_buf_submit_wait(bp, _RET_IP_);
1495
1496	ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
1497
1498	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1499		xfs_buf_ioerror(bp, -EIO);
1500		xfs_buf_stale(bp);
1501		bp->b_flags &= ~XBF_DONE;
1502		return -EIO;
1503	}
1504
1505	if (bp->b_flags & XBF_WRITE)
1506		xfs_buf_wait_unpin(bp);
1507
1508	/* clear the internal error state to avoid spurious errors */
1509	bp->b_io_error = 0;
1510
1511	/*
1512	 * For synchronous IO, the IO does not inherit the submitters reference
1513	 * count, nor the buffer lock. Hence we cannot release the reference we
1514	 * are about to take until we've waited for all IO completion to occur,
1515	 * including any xfs_buf_ioend_async() work that may be pending.
1516	 */
1517	xfs_buf_hold(bp);
1518
1519	/*
1520	 * Set the count to 1 initially, this will stop an I/O completion
1521	 * callout which happens before we have started all the I/O from calling
1522	 * xfs_buf_ioend too early.
1523	 */
1524	atomic_set(&bp->b_io_remaining, 1);
1525	_xfs_buf_ioapply(bp);
1526
1527	/*
1528	 * make sure we run completion synchronously if it raced with us and is
1529	 * already complete.
1530	 */
1531	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1532		xfs_buf_ioend(bp);
1533
1534	/* wait for completion before gathering the error from the buffer */
1535	trace_xfs_buf_iowait(bp, _RET_IP_);
1536	wait_for_completion(&bp->b_iowait);
1537	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1538	error = bp->b_error;
1539
1540	/*
1541	 * all done now, we can release the hold that keeps the buffer
1542	 * referenced for the entire IO.
1543	 */
1544	xfs_buf_rele(bp);
1545	return error;
1546}
1547
1548void *
1549xfs_buf_offset(
1550	struct xfs_buf		*bp,
1551	size_t			offset)
1552{
1553	struct page		*page;
1554
1555	if (bp->b_addr)
1556		return bp->b_addr + offset;
1557
1558	offset += bp->b_offset;
1559	page = bp->b_pages[offset >> PAGE_SHIFT];
1560	return page_address(page) + (offset & (PAGE_SIZE-1));
1561}
1562
1563/*
1564 *	Move data into or out of a buffer.
1565 */
1566void
1567xfs_buf_iomove(
1568	xfs_buf_t		*bp,	/* buffer to process		*/
1569	size_t			boff,	/* starting buffer offset	*/
1570	size_t			bsize,	/* length to copy		*/
1571	void			*data,	/* data address			*/
1572	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
1573{
1574	size_t			bend;
1575
1576	bend = boff + bsize;
1577	while (boff < bend) {
1578		struct page	*page;
1579		int		page_index, page_offset, csize;
1580
1581		page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1582		page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1583		page = bp->b_pages[page_index];
1584		csize = min_t(size_t, PAGE_SIZE - page_offset,
1585				      BBTOB(bp->b_io_length) - boff);
1586
1587		ASSERT((csize + page_offset) <= PAGE_SIZE);
1588
1589		switch (mode) {
1590		case XBRW_ZERO:
1591			memset(page_address(page) + page_offset, 0, csize);
1592			break;
1593		case XBRW_READ:
1594			memcpy(data, page_address(page) + page_offset, csize);
1595			break;
1596		case XBRW_WRITE:
1597			memcpy(page_address(page) + page_offset, data, csize);
1598		}
1599
1600		boff += csize;
1601		data += csize;
1602	}
1603}
1604
1605/*
1606 *	Handling of buffer targets (buftargs).
1607 */
1608
1609/*
1610 * Wait for any bufs with callbacks that have been submitted but have not yet
1611 * returned. These buffers will have an elevated hold count, so wait on those
1612 * while freeing all the buffers only held by the LRU.
1613 */
1614static enum lru_status
1615xfs_buftarg_wait_rele(
1616	struct list_head	*item,
1617	struct list_lru_one	*lru,
1618	spinlock_t		*lru_lock,
1619	void			*arg)
1620
1621{
1622	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1623	struct list_head	*dispose = arg;
1624
1625	if (atomic_read(&bp->b_hold) > 1) {
1626		/* need to wait, so skip it this pass */
1627		trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1628		return LRU_SKIP;
1629	}
1630	if (!spin_trylock(&bp->b_lock))
1631		return LRU_SKIP;
1632
1633	/*
1634	 * clear the LRU reference count so the buffer doesn't get
1635	 * ignored in xfs_buf_rele().
1636	 */
1637	atomic_set(&bp->b_lru_ref, 0);
1638	bp->b_state |= XFS_BSTATE_DISPOSE;
1639	list_lru_isolate_move(lru, item, dispose);
1640	spin_unlock(&bp->b_lock);
1641	return LRU_REMOVED;
1642}
1643
1644void
1645xfs_wait_buftarg(
1646	struct xfs_buftarg	*btp)
1647{
1648	LIST_HEAD(dispose);
1649	int loop = 0;
1650
1651	/*
1652	 * First wait on the buftarg I/O count for all in-flight buffers to be
1653	 * released. This is critical as new buffers do not make the LRU until
1654	 * they are released.
1655	 *
1656	 * Next, flush the buffer workqueue to ensure all completion processing
1657	 * has finished. Just waiting on buffer locks is not sufficient for
1658	 * async IO as the reference count held over IO is not released until
1659	 * after the buffer lock is dropped. Hence we need to ensure here that
1660	 * all reference counts have been dropped before we start walking the
1661	 * LRU list.
1662	 */
1663	while (percpu_counter_sum(&btp->bt_io_count))
1664		delay(100);
1665	flush_workqueue(btp->bt_mount->m_buf_workqueue);
1666
1667	/* loop until there is nothing left on the lru list. */
1668	while (list_lru_count(&btp->bt_lru)) {
1669		list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1670			      &dispose, LONG_MAX);
1671
1672		while (!list_empty(&dispose)) {
1673			struct xfs_buf *bp;
1674			bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1675			list_del_init(&bp->b_lru);
1676			if (bp->b_flags & XBF_WRITE_FAIL) {
1677				xfs_alert(btp->bt_mount,
1678"Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
 
1679					(long long)bp->b_bn);
1680				xfs_alert(btp->bt_mount,
1681"Please run xfs_repair to determine the extent of the problem.");
1682			}
1683			xfs_buf_rele(bp);
1684		}
1685		if (loop++ != 0)
1686			delay(100);
1687	}
1688}
1689
1690static enum lru_status
1691xfs_buftarg_isolate(
1692	struct list_head	*item,
1693	struct list_lru_one	*lru,
1694	spinlock_t		*lru_lock,
1695	void			*arg)
1696{
1697	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1698	struct list_head	*dispose = arg;
1699
1700	/*
1701	 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1702	 * If we fail to get the lock, just skip it.
1703	 */
1704	if (!spin_trylock(&bp->b_lock))
1705		return LRU_SKIP;
1706	/*
1707	 * Decrement the b_lru_ref count unless the value is already
1708	 * zero. If the value is already zero, we need to reclaim the
1709	 * buffer, otherwise it gets another trip through the LRU.
1710	 */
1711	if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1712		spin_unlock(&bp->b_lock);
1713		return LRU_ROTATE;
1714	}
1715
1716	bp->b_state |= XFS_BSTATE_DISPOSE;
1717	list_lru_isolate_move(lru, item, dispose);
1718	spin_unlock(&bp->b_lock);
1719	return LRU_REMOVED;
1720}
1721
1722static unsigned long
1723xfs_buftarg_shrink_scan(
1724	struct shrinker		*shrink,
1725	struct shrink_control	*sc)
1726{
1727	struct xfs_buftarg	*btp = container_of(shrink,
1728					struct xfs_buftarg, bt_shrinker);
1729	LIST_HEAD(dispose);
1730	unsigned long		freed;
 
1731
1732	freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1733				     xfs_buftarg_isolate, &dispose);
1734
1735	while (!list_empty(&dispose)) {
1736		struct xfs_buf *bp;
1737		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1738		list_del_init(&bp->b_lru);
1739		xfs_buf_rele(bp);
1740	}
1741
1742	return freed;
1743}
1744
1745static unsigned long
1746xfs_buftarg_shrink_count(
1747	struct shrinker		*shrink,
1748	struct shrink_control	*sc)
1749{
1750	struct xfs_buftarg	*btp = container_of(shrink,
1751					struct xfs_buftarg, bt_shrinker);
1752	return list_lru_shrink_count(&btp->bt_lru, sc);
1753}
1754
1755void
1756xfs_free_buftarg(
 
1757	struct xfs_buftarg	*btp)
1758{
1759	unregister_shrinker(&btp->bt_shrinker);
1760	ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
1761	percpu_counter_destroy(&btp->bt_io_count);
1762	list_lru_destroy(&btp->bt_lru);
1763
1764	xfs_blkdev_issue_flush(btp);
 
1765
1766	kmem_free(btp);
1767}
1768
1769int
1770xfs_setsize_buftarg(
1771	xfs_buftarg_t		*btp,
 
1772	unsigned int		sectorsize)
1773{
1774	/* Set up metadata sector size info */
1775	btp->bt_meta_sectorsize = sectorsize;
1776	btp->bt_meta_sectormask = sectorsize - 1;
1777
1778	if (set_blocksize(btp->bt_bdev, sectorsize)) {
 
 
 
 
1779		xfs_warn(btp->bt_mount,
1780			"Cannot set_blocksize to %u on device %pg",
1781			sectorsize, btp->bt_bdev);
1782		return -EINVAL;
1783	}
1784
1785	/* Set up device logical sector size mask */
1786	btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1787	btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1788
1789	return 0;
1790}
1791
1792/*
1793 * When allocating the initial buffer target we have not yet
1794 * read in the superblock, so don't know what sized sectors
1795 * are being used at this early stage.  Play safe.
1796 */
1797STATIC int
1798xfs_setsize_buftarg_early(
1799	xfs_buftarg_t		*btp,
1800	struct block_device	*bdev)
1801{
1802	return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
 
1803}
1804
1805xfs_buftarg_t *
1806xfs_alloc_buftarg(
1807	struct xfs_mount	*mp,
1808	struct block_device	*bdev,
1809	struct dax_device	*dax_dev)
 
1810{
1811	xfs_buftarg_t		*btp;
1812
1813	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1814
1815	btp->bt_mount = mp;
1816	btp->bt_dev =  bdev->bd_dev;
1817	btp->bt_bdev = bdev;
1818	btp->bt_daxdev = dax_dev;
 
 
1819
1820	if (xfs_setsize_buftarg_early(btp, bdev))
1821		goto error_free;
1822
1823	if (list_lru_init(&btp->bt_lru))
1824		goto error_free;
1825
1826	if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
1827		goto error_lru;
1828
1829	btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1830	btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1831	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1832	btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1833	if (register_shrinker(&btp->bt_shrinker))
1834		goto error_pcpu;
1835	return btp;
1836
1837error_pcpu:
1838	percpu_counter_destroy(&btp->bt_io_count);
1839error_lru:
1840	list_lru_destroy(&btp->bt_lru);
1841error_free:
1842	kmem_free(btp);
1843	return NULL;
1844}
1845
1846/*
1847 * Cancel a delayed write list.
1848 *
1849 * Remove each buffer from the list, clear the delwri queue flag and drop the
1850 * associated buffer reference.
1851 */
1852void
1853xfs_buf_delwri_cancel(
1854	struct list_head	*list)
1855{
1856	struct xfs_buf		*bp;
1857
1858	while (!list_empty(list)) {
1859		bp = list_first_entry(list, struct xfs_buf, b_list);
1860
1861		xfs_buf_lock(bp);
1862		bp->b_flags &= ~_XBF_DELWRI_Q;
1863		list_del_init(&bp->b_list);
1864		xfs_buf_relse(bp);
1865	}
1866}
1867
1868/*
1869 * Add a buffer to the delayed write list.
1870 *
1871 * This queues a buffer for writeout if it hasn't already been.  Note that
1872 * neither this routine nor the buffer list submission functions perform
1873 * any internal synchronization.  It is expected that the lists are thread-local
1874 * to the callers.
1875 *
1876 * Returns true if we queued up the buffer, or false if it already had
1877 * been on the buffer list.
1878 */
1879bool
1880xfs_buf_delwri_queue(
1881	struct xfs_buf		*bp,
1882	struct list_head	*list)
1883{
1884	ASSERT(xfs_buf_islocked(bp));
1885	ASSERT(!(bp->b_flags & XBF_READ));
1886
1887	/*
1888	 * If the buffer is already marked delwri it already is queued up
1889	 * by someone else for imediate writeout.  Just ignore it in that
1890	 * case.
1891	 */
1892	if (bp->b_flags & _XBF_DELWRI_Q) {
1893		trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1894		return false;
1895	}
1896
1897	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1898
1899	/*
1900	 * If a buffer gets written out synchronously or marked stale while it
1901	 * is on a delwri list we lazily remove it. To do this, the other party
1902	 * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1903	 * It remains referenced and on the list.  In a rare corner case it
1904	 * might get readded to a delwri list after the synchronous writeout, in
1905	 * which case we need just need to re-add the flag here.
1906	 */
1907	bp->b_flags |= _XBF_DELWRI_Q;
1908	if (list_empty(&bp->b_list)) {
1909		atomic_inc(&bp->b_hold);
1910		list_add_tail(&bp->b_list, list);
1911	}
1912
1913	return true;
1914}
1915
1916/*
1917 * Compare function is more complex than it needs to be because
1918 * the return value is only 32 bits and we are doing comparisons
1919 * on 64 bit values
1920 */
1921static int
1922xfs_buf_cmp(
1923	void		*priv,
1924	struct list_head *a,
1925	struct list_head *b)
1926{
1927	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
1928	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
1929	xfs_daddr_t		diff;
1930
1931	diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1932	if (diff < 0)
1933		return -1;
1934	if (diff > 0)
1935		return 1;
1936	return 0;
1937}
1938
1939/*
1940 * submit buffers for write.
1941 *
1942 * When we have a large buffer list, we do not want to hold all the buffers
1943 * locked while we block on the request queue waiting for IO dispatch. To avoid
1944 * this problem, we lock and submit buffers in groups of 50, thereby minimising
1945 * the lock hold times for lists which may contain thousands of objects.
1946 *
1947 * To do this, we sort the buffer list before we walk the list to lock and
1948 * submit buffers, and we plug and unplug around each group of buffers we
1949 * submit.
1950 */
1951static int
1952xfs_buf_delwri_submit_buffers(
1953	struct list_head	*buffer_list,
1954	struct list_head	*wait_list)
 
1955{
 
1956	struct xfs_buf		*bp, *n;
1957	LIST_HEAD		(submit_list);
1958	int			pinned = 0;
1959	struct blk_plug		plug;
1960
1961	list_sort(NULL, buffer_list, xfs_buf_cmp);
1962
1963	blk_start_plug(&plug);
1964	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1965		if (!wait_list) {
1966			if (xfs_buf_ispinned(bp)) {
1967				pinned++;
1968				continue;
1969			}
1970			if (!xfs_buf_trylock(bp))
1971				continue;
1972		} else {
1973			xfs_buf_lock(bp);
1974		}
1975
1976		/*
1977		 * Someone else might have written the buffer synchronously or
1978		 * marked it stale in the meantime.  In that case only the
1979		 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1980		 * reference and remove it from the list here.
1981		 */
1982		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1983			list_del_init(&bp->b_list);
1984			xfs_buf_relse(bp);
1985			continue;
1986		}
1987
 
1988		trace_xfs_buf_delwri_split(bp, _RET_IP_);
 
1989
1990		/*
1991		 * We do all IO submission async. This means if we need
1992		 * to wait for IO completion we need to take an extra
1993		 * reference so the buffer is still valid on the other
1994		 * side. We need to move the buffer onto the io_list
1995		 * at this point so the caller can still access it.
1996		 */
1997		bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
1998		bp->b_flags |= XBF_WRITE | XBF_ASYNC;
1999		if (wait_list) {
2000			xfs_buf_hold(bp);
2001			list_move_tail(&bp->b_list, wait_list);
2002		} else
2003			list_del_init(&bp->b_list);
2004
2005		xfs_buf_submit(bp);
 
 
 
 
2006	}
2007	blk_finish_plug(&plug);
2008
2009	return pinned;
2010}
2011
2012/*
2013 * Write out a buffer list asynchronously.
2014 *
2015 * This will take the @buffer_list, write all non-locked and non-pinned buffers
2016 * out and not wait for I/O completion on any of the buffers.  This interface
2017 * is only safely useable for callers that can track I/O completion by higher
2018 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
2019 * function.
2020 */
2021int
2022xfs_buf_delwri_submit_nowait(
2023	struct list_head	*buffer_list)
2024{
2025	return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
 
2026}
2027
2028/*
2029 * Write out a buffer list synchronously.
2030 *
2031 * This will take the @buffer_list, write all buffers out and wait for I/O
2032 * completion on all of the buffers. @buffer_list is consumed by the function,
2033 * so callers must have some other way of tracking buffers if they require such
2034 * functionality.
2035 */
2036int
2037xfs_buf_delwri_submit(
2038	struct list_head	*buffer_list)
2039{
2040	LIST_HEAD		(wait_list);
2041	int			error = 0, error2;
2042	struct xfs_buf		*bp;
2043
2044	xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
2045
2046	/* Wait for IO to complete. */
2047	while (!list_empty(&wait_list)) {
2048		bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
2049
2050		list_del_init(&bp->b_list);
2051
2052		/* locking the buffer will wait for async IO completion. */
2053		xfs_buf_lock(bp);
2054		error2 = bp->b_error;
2055		xfs_buf_relse(bp);
2056		if (!error)
2057			error = error2;
2058	}
2059
2060	return error;
2061}
2062
2063/*
2064 * Push a single buffer on a delwri queue.
2065 *
2066 * The purpose of this function is to submit a single buffer of a delwri queue
2067 * and return with the buffer still on the original queue. The waiting delwri
2068 * buffer submission infrastructure guarantees transfer of the delwri queue
2069 * buffer reference to a temporary wait list. We reuse this infrastructure to
2070 * transfer the buffer back to the original queue.
2071 *
2072 * Note the buffer transitions from the queued state, to the submitted and wait
2073 * listed state and back to the queued state during this call. The buffer
2074 * locking and queue management logic between _delwri_pushbuf() and
2075 * _delwri_queue() guarantee that the buffer cannot be queued to another list
2076 * before returning.
2077 */
2078int
2079xfs_buf_delwri_pushbuf(
2080	struct xfs_buf		*bp,
2081	struct list_head	*buffer_list)
2082{
2083	LIST_HEAD		(submit_list);
2084	int			error;
2085
2086	ASSERT(bp->b_flags & _XBF_DELWRI_Q);
2087
2088	trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
2089
2090	/*
2091	 * Isolate the buffer to a new local list so we can submit it for I/O
2092	 * independently from the rest of the original list.
2093	 */
2094	xfs_buf_lock(bp);
2095	list_move(&bp->b_list, &submit_list);
2096	xfs_buf_unlock(bp);
2097
2098	/*
2099	 * Delwri submission clears the DELWRI_Q buffer flag and returns with
2100	 * the buffer on the wait list with an associated reference. Rather than
2101	 * bounce the buffer from a local wait list back to the original list
2102	 * after I/O completion, reuse the original list as the wait list.
2103	 */
2104	xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
2105
2106	/*
2107	 * The buffer is now under I/O and wait listed as during typical delwri
2108	 * submission. Lock the buffer to wait for I/O completion. Rather than
2109	 * remove the buffer from the wait list and release the reference, we
2110	 * want to return with the buffer queued to the original list. The
2111	 * buffer already sits on the original list with a wait list reference,
2112	 * however. If we let the queue inherit that wait list reference, all we
2113	 * need to do is reset the DELWRI_Q flag.
2114	 */
2115	xfs_buf_lock(bp);
2116	error = bp->b_error;
2117	bp->b_flags |= _XBF_DELWRI_Q;
2118	xfs_buf_unlock(bp);
2119
2120	return error;
2121}
2122
2123int __init
2124xfs_buf_init(void)
2125{
2126	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
2127						KM_ZONE_HWALIGN, NULL);
2128	if (!xfs_buf_zone)
2129		goto out;
2130
 
 
 
 
 
2131	return 0;
2132
 
 
2133 out:
2134	return -ENOMEM;
2135}
2136
2137void
2138xfs_buf_terminate(void)
2139{
 
2140	kmem_zone_destroy(xfs_buf_zone);
2141}
2142
2143void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
2144{
2145	/*
2146	 * Set the lru reference count to 0 based on the error injection tag.
2147	 * This allows userspace to disrupt buffer caching for debug/testing
2148	 * purposes.
2149	 */
2150	if (XFS_TEST_ERROR(false, bp->b_target->bt_mount,
2151			   XFS_ERRTAG_BUF_LRU_REF))
2152		lru_ref = 0;
2153
2154	atomic_set(&bp->b_lru_ref, lru_ref);
2155}
v3.15
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include <linux/stddef.h>
  20#include <linux/errno.h>
  21#include <linux/gfp.h>
  22#include <linux/pagemap.h>
  23#include <linux/init.h>
  24#include <linux/vmalloc.h>
  25#include <linux/bio.h>
  26#include <linux/sysctl.h>
  27#include <linux/proc_fs.h>
  28#include <linux/workqueue.h>
  29#include <linux/percpu.h>
  30#include <linux/blkdev.h>
  31#include <linux/hash.h>
  32#include <linux/kthread.h>
  33#include <linux/migrate.h>
  34#include <linux/backing-dev.h>
  35#include <linux/freezer.h>
 
  36
 
  37#include "xfs_log_format.h"
  38#include "xfs_trans_resv.h"
  39#include "xfs_sb.h"
  40#include "xfs_ag.h"
  41#include "xfs_mount.h"
  42#include "xfs_trace.h"
  43#include "xfs_log.h"
 
 
  44
  45static kmem_zone_t *xfs_buf_zone;
  46
  47static struct workqueue_struct *xfslogd_workqueue;
  48
  49#ifdef XFS_BUF_LOCK_TRACKING
  50# define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
  51# define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
  52# define XB_GET_OWNER(bp)	((bp)->b_last_holder)
  53#else
  54# define XB_SET_OWNER(bp)	do { } while (0)
  55# define XB_CLEAR_OWNER(bp)	do { } while (0)
  56# define XB_GET_OWNER(bp)	do { } while (0)
  57#endif
  58
  59#define xb_to_gfp(flags) \
  60	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
  61
  62
  63static inline int
  64xfs_buf_is_vmapped(
  65	struct xfs_buf	*bp)
  66{
  67	/*
  68	 * Return true if the buffer is vmapped.
  69	 *
  70	 * b_addr is null if the buffer is not mapped, but the code is clever
  71	 * enough to know it doesn't have to map a single page, so the check has
  72	 * to be both for b_addr and bp->b_page_count > 1.
  73	 */
  74	return bp->b_addr && bp->b_page_count > 1;
  75}
  76
  77static inline int
  78xfs_buf_vmap_len(
  79	struct xfs_buf	*bp)
  80{
  81	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  82}
  83
  84/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  85 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
  86 * b_lru_ref count so that the buffer is freed immediately when the buffer
  87 * reference count falls to zero. If the buffer is already on the LRU, we need
  88 * to remove the reference that LRU holds on the buffer.
  89 *
  90 * This prevents build-up of stale buffers on the LRU.
  91 */
  92void
  93xfs_buf_stale(
  94	struct xfs_buf	*bp)
  95{
  96	ASSERT(xfs_buf_islocked(bp));
  97
  98	bp->b_flags |= XBF_STALE;
  99
 100	/*
 101	 * Clear the delwri status so that a delwri queue walker will not
 102	 * flush this buffer to disk now that it is stale. The delwri queue has
 103	 * a reference to the buffer, so this is safe to do.
 104	 */
 105	bp->b_flags &= ~_XBF_DELWRI_Q;
 106
 
 
 
 
 
 
 107	spin_lock(&bp->b_lock);
 
 
 108	atomic_set(&bp->b_lru_ref, 0);
 109	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
 110	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
 111		atomic_dec(&bp->b_hold);
 112
 113	ASSERT(atomic_read(&bp->b_hold) >= 1);
 114	spin_unlock(&bp->b_lock);
 115}
 116
 117static int
 118xfs_buf_get_maps(
 119	struct xfs_buf		*bp,
 120	int			map_count)
 121{
 122	ASSERT(bp->b_maps == NULL);
 123	bp->b_map_count = map_count;
 124
 125	if (map_count == 1) {
 126		bp->b_maps = &bp->__b_map;
 127		return 0;
 128	}
 129
 130	bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
 131				KM_NOFS);
 132	if (!bp->b_maps)
 133		return ENOMEM;
 134	return 0;
 135}
 136
 137/*
 138 *	Frees b_pages if it was allocated.
 139 */
 140static void
 141xfs_buf_free_maps(
 142	struct xfs_buf	*bp)
 143{
 144	if (bp->b_maps != &bp->__b_map) {
 145		kmem_free(bp->b_maps);
 146		bp->b_maps = NULL;
 147	}
 148}
 149
 150struct xfs_buf *
 151_xfs_buf_alloc(
 152	struct xfs_buftarg	*target,
 153	struct xfs_buf_map	*map,
 154	int			nmaps,
 155	xfs_buf_flags_t		flags)
 156{
 157	struct xfs_buf		*bp;
 158	int			error;
 159	int			i;
 160
 161	bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
 162	if (unlikely(!bp))
 163		return NULL;
 164
 165	/*
 166	 * We don't want certain flags to appear in b_flags unless they are
 167	 * specifically set by later operations on the buffer.
 168	 */
 169	flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
 170
 171	atomic_set(&bp->b_hold, 1);
 172	atomic_set(&bp->b_lru_ref, 1);
 173	init_completion(&bp->b_iowait);
 174	INIT_LIST_HEAD(&bp->b_lru);
 175	INIT_LIST_HEAD(&bp->b_list);
 176	RB_CLEAR_NODE(&bp->b_rbnode);
 177	sema_init(&bp->b_sema, 0); /* held, no waiters */
 178	spin_lock_init(&bp->b_lock);
 179	XB_SET_OWNER(bp);
 180	bp->b_target = target;
 181	bp->b_flags = flags;
 182
 183	/*
 184	 * Set length and io_length to the same value initially.
 185	 * I/O routines should use io_length, which will be the same in
 186	 * most cases but may be reset (e.g. XFS recovery).
 187	 */
 188	error = xfs_buf_get_maps(bp, nmaps);
 189	if (error)  {
 190		kmem_zone_free(xfs_buf_zone, bp);
 191		return NULL;
 192	}
 193
 194	bp->b_bn = map[0].bm_bn;
 195	bp->b_length = 0;
 196	for (i = 0; i < nmaps; i++) {
 197		bp->b_maps[i].bm_bn = map[i].bm_bn;
 198		bp->b_maps[i].bm_len = map[i].bm_len;
 199		bp->b_length += map[i].bm_len;
 200	}
 201	bp->b_io_length = bp->b_length;
 202
 203	atomic_set(&bp->b_pin_count, 0);
 204	init_waitqueue_head(&bp->b_waiters);
 205
 206	XFS_STATS_INC(xb_create);
 207	trace_xfs_buf_init(bp, _RET_IP_);
 208
 209	return bp;
 210}
 211
 212/*
 213 *	Allocate a page array capable of holding a specified number
 214 *	of pages, and point the page buf at it.
 215 */
 216STATIC int
 217_xfs_buf_get_pages(
 218	xfs_buf_t		*bp,
 219	int			page_count,
 220	xfs_buf_flags_t		flags)
 221{
 222	/* Make sure that we have a page list */
 223	if (bp->b_pages == NULL) {
 224		bp->b_page_count = page_count;
 225		if (page_count <= XB_PAGES) {
 226			bp->b_pages = bp->b_page_array;
 227		} else {
 228			bp->b_pages = kmem_alloc(sizeof(struct page *) *
 229						 page_count, KM_NOFS);
 230			if (bp->b_pages == NULL)
 231				return -ENOMEM;
 232		}
 233		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
 234	}
 235	return 0;
 236}
 237
 238/*
 239 *	Frees b_pages if it was allocated.
 240 */
 241STATIC void
 242_xfs_buf_free_pages(
 243	xfs_buf_t	*bp)
 244{
 245	if (bp->b_pages != bp->b_page_array) {
 246		kmem_free(bp->b_pages);
 247		bp->b_pages = NULL;
 248	}
 249}
 250
 251/*
 252 *	Releases the specified buffer.
 253 *
 254 * 	The modification state of any associated pages is left unchanged.
 255 * 	The buffer must not be on any hash - use xfs_buf_rele instead for
 256 * 	hashed and refcounted buffers
 257 */
 258void
 259xfs_buf_free(
 260	xfs_buf_t		*bp)
 261{
 262	trace_xfs_buf_free(bp, _RET_IP_);
 263
 264	ASSERT(list_empty(&bp->b_lru));
 265
 266	if (bp->b_flags & _XBF_PAGES) {
 267		uint		i;
 268
 269		if (xfs_buf_is_vmapped(bp))
 270			vm_unmap_ram(bp->b_addr - bp->b_offset,
 271					bp->b_page_count);
 272
 273		for (i = 0; i < bp->b_page_count; i++) {
 274			struct page	*page = bp->b_pages[i];
 275
 276			__free_page(page);
 277		}
 278	} else if (bp->b_flags & _XBF_KMEM)
 279		kmem_free(bp->b_addr);
 280	_xfs_buf_free_pages(bp);
 281	xfs_buf_free_maps(bp);
 282	kmem_zone_free(xfs_buf_zone, bp);
 283}
 284
 285/*
 286 * Allocates all the pages for buffer in question and builds it's page list.
 287 */
 288STATIC int
 289xfs_buf_allocate_memory(
 290	xfs_buf_t		*bp,
 291	uint			flags)
 292{
 293	size_t			size;
 294	size_t			nbytes, offset;
 295	gfp_t			gfp_mask = xb_to_gfp(flags);
 296	unsigned short		page_count, i;
 297	xfs_off_t		start, end;
 298	int			error;
 299
 300	/*
 301	 * for buffers that are contained within a single page, just allocate
 302	 * the memory from the heap - there's no need for the complexity of
 303	 * page arrays to keep allocation down to order 0.
 304	 */
 305	size = BBTOB(bp->b_length);
 306	if (size < PAGE_SIZE) {
 307		bp->b_addr = kmem_alloc(size, KM_NOFS);
 308		if (!bp->b_addr) {
 309			/* low memory - use alloc_page loop instead */
 310			goto use_alloc_page;
 311		}
 312
 313		if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
 314		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
 315			/* b_addr spans two pages - use alloc_page instead */
 316			kmem_free(bp->b_addr);
 317			bp->b_addr = NULL;
 318			goto use_alloc_page;
 319		}
 320		bp->b_offset = offset_in_page(bp->b_addr);
 321		bp->b_pages = bp->b_page_array;
 322		bp->b_pages[0] = virt_to_page(bp->b_addr);
 323		bp->b_page_count = 1;
 324		bp->b_flags |= _XBF_KMEM;
 325		return 0;
 326	}
 327
 328use_alloc_page:
 329	start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
 330	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
 331								>> PAGE_SHIFT;
 332	page_count = end - start;
 333	error = _xfs_buf_get_pages(bp, page_count, flags);
 334	if (unlikely(error))
 335		return error;
 336
 337	offset = bp->b_offset;
 338	bp->b_flags |= _XBF_PAGES;
 339
 340	for (i = 0; i < bp->b_page_count; i++) {
 341		struct page	*page;
 342		uint		retries = 0;
 343retry:
 344		page = alloc_page(gfp_mask);
 345		if (unlikely(page == NULL)) {
 346			if (flags & XBF_READ_AHEAD) {
 347				bp->b_page_count = i;
 348				error = ENOMEM;
 349				goto out_free_pages;
 350			}
 351
 352			/*
 353			 * This could deadlock.
 354			 *
 355			 * But until all the XFS lowlevel code is revamped to
 356			 * handle buffer allocation failures we can't do much.
 357			 */
 358			if (!(++retries % 100))
 359				xfs_err(NULL,
 360		"possible memory allocation deadlock in %s (mode:0x%x)",
 
 361					__func__, gfp_mask);
 362
 363			XFS_STATS_INC(xb_page_retries);
 364			congestion_wait(BLK_RW_ASYNC, HZ/50);
 365			goto retry;
 366		}
 367
 368		XFS_STATS_INC(xb_page_found);
 369
 370		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
 371		size -= nbytes;
 372		bp->b_pages[i] = page;
 373		offset = 0;
 374	}
 375	return 0;
 376
 377out_free_pages:
 378	for (i = 0; i < bp->b_page_count; i++)
 379		__free_page(bp->b_pages[i]);
 
 380	return error;
 381}
 382
 383/*
 384 *	Map buffer into kernel address-space if necessary.
 385 */
 386STATIC int
 387_xfs_buf_map_pages(
 388	xfs_buf_t		*bp,
 389	uint			flags)
 390{
 391	ASSERT(bp->b_flags & _XBF_PAGES);
 392	if (bp->b_page_count == 1) {
 393		/* A single page buffer is always mappable */
 394		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
 395	} else if (flags & XBF_UNMAPPED) {
 396		bp->b_addr = NULL;
 397	} else {
 398		int retried = 0;
 399		unsigned noio_flag;
 400
 401		/*
 402		 * vm_map_ram() will allocate auxillary structures (e.g.
 403		 * pagetables) with GFP_KERNEL, yet we are likely to be under
 404		 * GFP_NOFS context here. Hence we need to tell memory reclaim
 405		 * that we are in such a context via PF_MEMALLOC_NOIO to prevent
 406		 * memory reclaim re-entering the filesystem here and
 407		 * potentially deadlocking.
 408		 */
 409		noio_flag = memalloc_noio_save();
 410		do {
 411			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
 412						-1, PAGE_KERNEL);
 413			if (bp->b_addr)
 414				break;
 415			vm_unmap_aliases();
 416		} while (retried++ <= 1);
 417		memalloc_noio_restore(noio_flag);
 418
 419		if (!bp->b_addr)
 420			return -ENOMEM;
 421		bp->b_addr += bp->b_offset;
 422	}
 423
 424	return 0;
 425}
 426
 427/*
 428 *	Finding and Reading Buffers
 429 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430
 431/*
 432 *	Look up, and creates if absent, a lockable buffer for
 433 *	a given range of an inode.  The buffer is returned
 434 *	locked.	No I/O is implied by this call.
 435 */
 436xfs_buf_t *
 437_xfs_buf_find(
 438	struct xfs_buftarg	*btp,
 439	struct xfs_buf_map	*map,
 440	int			nmaps,
 441	xfs_buf_flags_t		flags,
 442	xfs_buf_t		*new_bp)
 443{
 444	size_t			numbytes;
 445	struct xfs_perag	*pag;
 446	struct rb_node		**rbp;
 447	struct rb_node		*parent;
 448	xfs_buf_t		*bp;
 449	xfs_daddr_t		blkno = map[0].bm_bn;
 450	xfs_daddr_t		eofs;
 451	int			numblks = 0;
 452	int			i;
 453
 454	for (i = 0; i < nmaps; i++)
 455		numblks += map[i].bm_len;
 456	numbytes = BBTOB(numblks);
 457
 458	/* Check for IOs smaller than the sector size / not sector aligned */
 459	ASSERT(!(numbytes < btp->bt_meta_sectorsize));
 460	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
 461
 462	/*
 463	 * Corrupted block numbers can get through to here, unfortunately, so we
 464	 * have to check that the buffer falls within the filesystem bounds.
 465	 */
 466	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
 467	if (blkno >= eofs) {
 468		/*
 469		 * XXX (dgc): we should really be returning EFSCORRUPTED here,
 470		 * but none of the higher level infrastructure supports
 471		 * returning a specific error on buffer lookup failures.
 472		 */
 473		xfs_alert(btp->bt_mount,
 474			  "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
 475			  __func__, blkno, eofs);
 476		WARN_ON(1);
 477		return NULL;
 478	}
 479
 480	/* get tree root */
 481	pag = xfs_perag_get(btp->bt_mount,
 482				xfs_daddr_to_agno(btp->bt_mount, blkno));
 483
 484	/* walk tree */
 485	spin_lock(&pag->pag_buf_lock);
 486	rbp = &pag->pag_buf_tree.rb_node;
 487	parent = NULL;
 488	bp = NULL;
 489	while (*rbp) {
 490		parent = *rbp;
 491		bp = rb_entry(parent, struct xfs_buf, b_rbnode);
 492
 493		if (blkno < bp->b_bn)
 494			rbp = &(*rbp)->rb_left;
 495		else if (blkno > bp->b_bn)
 496			rbp = &(*rbp)->rb_right;
 497		else {
 498			/*
 499			 * found a block number match. If the range doesn't
 500			 * match, the only way this is allowed is if the buffer
 501			 * in the cache is stale and the transaction that made
 502			 * it stale has not yet committed. i.e. we are
 503			 * reallocating a busy extent. Skip this buffer and
 504			 * continue searching to the right for an exact match.
 505			 */
 506			if (bp->b_length != numblks) {
 507				ASSERT(bp->b_flags & XBF_STALE);
 508				rbp = &(*rbp)->rb_right;
 509				continue;
 510			}
 511			atomic_inc(&bp->b_hold);
 512			goto found;
 513		}
 514	}
 515
 516	/* No match found */
 517	if (new_bp) {
 518		rb_link_node(&new_bp->b_rbnode, parent, rbp);
 519		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
 520		/* the buffer keeps the perag reference until it is freed */
 521		new_bp->b_pag = pag;
 
 
 
 522		spin_unlock(&pag->pag_buf_lock);
 523	} else {
 524		XFS_STATS_INC(xb_miss_locked);
 525		spin_unlock(&pag->pag_buf_lock);
 526		xfs_perag_put(pag);
 527	}
 528	return new_bp;
 529
 530found:
 531	spin_unlock(&pag->pag_buf_lock);
 532	xfs_perag_put(pag);
 533
 534	if (!xfs_buf_trylock(bp)) {
 535		if (flags & XBF_TRYLOCK) {
 536			xfs_buf_rele(bp);
 537			XFS_STATS_INC(xb_busy_locked);
 538			return NULL;
 539		}
 540		xfs_buf_lock(bp);
 541		XFS_STATS_INC(xb_get_locked_waited);
 542	}
 543
 544	/*
 545	 * if the buffer is stale, clear all the external state associated with
 546	 * it. We need to keep flags such as how we allocated the buffer memory
 547	 * intact here.
 548	 */
 549	if (bp->b_flags & XBF_STALE) {
 550		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
 551		ASSERT(bp->b_iodone == NULL);
 552		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
 553		bp->b_ops = NULL;
 554	}
 555
 556	trace_xfs_buf_find(bp, flags, _RET_IP_);
 557	XFS_STATS_INC(xb_get_locked);
 558	return bp;
 559}
 560
 561/*
 562 * Assembles a buffer covering the specified range. The code is optimised for
 563 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
 564 * more hits than misses.
 565 */
 566struct xfs_buf *
 567xfs_buf_get_map(
 568	struct xfs_buftarg	*target,
 569	struct xfs_buf_map	*map,
 570	int			nmaps,
 571	xfs_buf_flags_t		flags)
 572{
 573	struct xfs_buf		*bp;
 574	struct xfs_buf		*new_bp;
 575	int			error = 0;
 576
 577	bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
 578	if (likely(bp))
 579		goto found;
 580
 581	new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
 582	if (unlikely(!new_bp))
 583		return NULL;
 584
 585	error = xfs_buf_allocate_memory(new_bp, flags);
 586	if (error) {
 587		xfs_buf_free(new_bp);
 588		return NULL;
 589	}
 590
 591	bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
 592	if (!bp) {
 593		xfs_buf_free(new_bp);
 594		return NULL;
 595	}
 596
 597	if (bp != new_bp)
 598		xfs_buf_free(new_bp);
 599
 600found:
 601	if (!bp->b_addr) {
 602		error = _xfs_buf_map_pages(bp, flags);
 603		if (unlikely(error)) {
 604			xfs_warn(target->bt_mount,
 605				"%s: failed to map pagesn", __func__);
 606			xfs_buf_relse(bp);
 607			return NULL;
 608		}
 609	}
 610
 611	XFS_STATS_INC(xb_get);
 
 
 
 
 
 
 
 612	trace_xfs_buf_get(bp, flags, _RET_IP_);
 613	return bp;
 614}
 615
 616STATIC int
 617_xfs_buf_read(
 618	xfs_buf_t		*bp,
 619	xfs_buf_flags_t		flags)
 620{
 621	ASSERT(!(flags & XBF_WRITE));
 622	ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
 623
 624	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
 625	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
 626
 627	xfs_buf_iorequest(bp);
 628	if (flags & XBF_ASYNC)
 629		return 0;
 630	return xfs_buf_iowait(bp);
 
 631}
 632
 633xfs_buf_t *
 634xfs_buf_read_map(
 635	struct xfs_buftarg	*target,
 636	struct xfs_buf_map	*map,
 637	int			nmaps,
 638	xfs_buf_flags_t		flags,
 639	const struct xfs_buf_ops *ops)
 640{
 641	struct xfs_buf		*bp;
 642
 643	flags |= XBF_READ;
 644
 645	bp = xfs_buf_get_map(target, map, nmaps, flags);
 646	if (bp) {
 647		trace_xfs_buf_read(bp, flags, _RET_IP_);
 648
 649		if (!XFS_BUF_ISDONE(bp)) {
 650			XFS_STATS_INC(xb_get_read);
 651			bp->b_ops = ops;
 652			_xfs_buf_read(bp, flags);
 653		} else if (flags & XBF_ASYNC) {
 654			/*
 655			 * Read ahead call which is already satisfied,
 656			 * drop the buffer
 657			 */
 658			xfs_buf_relse(bp);
 659			return NULL;
 660		} else {
 661			/* We do not want read in the flags */
 662			bp->b_flags &= ~XBF_READ;
 663		}
 664	}
 665
 666	return bp;
 667}
 668
 669/*
 670 *	If we are not low on memory then do the readahead in a deadlock
 671 *	safe manner.
 672 */
 673void
 674xfs_buf_readahead_map(
 675	struct xfs_buftarg	*target,
 676	struct xfs_buf_map	*map,
 677	int			nmaps,
 678	const struct xfs_buf_ops *ops)
 679{
 680	if (bdi_read_congested(target->bt_bdi))
 681		return;
 682
 683	xfs_buf_read_map(target, map, nmaps,
 684		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
 685}
 686
 687/*
 688 * Read an uncached buffer from disk. Allocates and returns a locked
 689 * buffer containing the disk contents or nothing.
 690 */
 691struct xfs_buf *
 692xfs_buf_read_uncached(
 693	struct xfs_buftarg	*target,
 694	xfs_daddr_t		daddr,
 695	size_t			numblks,
 696	int			flags,
 
 697	const struct xfs_buf_ops *ops)
 698{
 699	struct xfs_buf		*bp;
 700
 
 
 701	bp = xfs_buf_get_uncached(target, numblks, flags);
 702	if (!bp)
 703		return NULL;
 704
 705	/* set up the buffer for a read IO */
 706	ASSERT(bp->b_map_count == 1);
 707	bp->b_bn = daddr;
 708	bp->b_maps[0].bm_bn = daddr;
 709	bp->b_flags |= XBF_READ;
 710	bp->b_ops = ops;
 711
 712	if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
 
 
 713		xfs_buf_relse(bp);
 714		return NULL;
 715	}
 716	xfs_buf_iorequest(bp);
 717	xfs_buf_iowait(bp);
 718	return bp;
 719}
 720
 721/*
 722 * Return a buffer allocated as an empty buffer and associated to external
 723 * memory via xfs_buf_associate_memory() back to it's empty state.
 724 */
 725void
 726xfs_buf_set_empty(
 727	struct xfs_buf		*bp,
 728	size_t			numblks)
 729{
 730	if (bp->b_pages)
 731		_xfs_buf_free_pages(bp);
 732
 733	bp->b_pages = NULL;
 734	bp->b_page_count = 0;
 735	bp->b_addr = NULL;
 736	bp->b_length = numblks;
 737	bp->b_io_length = numblks;
 738
 739	ASSERT(bp->b_map_count == 1);
 740	bp->b_bn = XFS_BUF_DADDR_NULL;
 741	bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
 742	bp->b_maps[0].bm_len = bp->b_length;
 743}
 744
 745static inline struct page *
 746mem_to_page(
 747	void			*addr)
 748{
 749	if ((!is_vmalloc_addr(addr))) {
 750		return virt_to_page(addr);
 751	} else {
 752		return vmalloc_to_page(addr);
 753	}
 754}
 755
 756int
 757xfs_buf_associate_memory(
 758	xfs_buf_t		*bp,
 759	void			*mem,
 760	size_t			len)
 761{
 762	int			rval;
 763	int			i = 0;
 764	unsigned long		pageaddr;
 765	unsigned long		offset;
 766	size_t			buflen;
 767	int			page_count;
 768
 769	pageaddr = (unsigned long)mem & PAGE_MASK;
 770	offset = (unsigned long)mem - pageaddr;
 771	buflen = PAGE_ALIGN(len + offset);
 772	page_count = buflen >> PAGE_SHIFT;
 773
 774	/* Free any previous set of page pointers */
 775	if (bp->b_pages)
 776		_xfs_buf_free_pages(bp);
 777
 778	bp->b_pages = NULL;
 779	bp->b_addr = mem;
 780
 781	rval = _xfs_buf_get_pages(bp, page_count, 0);
 782	if (rval)
 783		return rval;
 784
 785	bp->b_offset = offset;
 786
 787	for (i = 0; i < bp->b_page_count; i++) {
 788		bp->b_pages[i] = mem_to_page((void *)pageaddr);
 789		pageaddr += PAGE_SIZE;
 790	}
 791
 792	bp->b_io_length = BTOBB(len);
 793	bp->b_length = BTOBB(buflen);
 794
 795	return 0;
 796}
 797
 798xfs_buf_t *
 799xfs_buf_get_uncached(
 800	struct xfs_buftarg	*target,
 801	size_t			numblks,
 802	int			flags)
 803{
 804	unsigned long		page_count;
 805	int			error, i;
 806	struct xfs_buf		*bp;
 807	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
 808
 809	bp = _xfs_buf_alloc(target, &map, 1, 0);
 
 810	if (unlikely(bp == NULL))
 811		goto fail;
 812
 813	page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
 814	error = _xfs_buf_get_pages(bp, page_count, 0);
 815	if (error)
 816		goto fail_free_buf;
 817
 818	for (i = 0; i < page_count; i++) {
 819		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
 820		if (!bp->b_pages[i])
 821			goto fail_free_mem;
 822	}
 823	bp->b_flags |= _XBF_PAGES;
 824
 825	error = _xfs_buf_map_pages(bp, 0);
 826	if (unlikely(error)) {
 827		xfs_warn(target->bt_mount,
 828			"%s: failed to map pages", __func__);
 829		goto fail_free_mem;
 830	}
 831
 832	trace_xfs_buf_get_uncached(bp, _RET_IP_);
 833	return bp;
 834
 835 fail_free_mem:
 836	while (--i >= 0)
 837		__free_page(bp->b_pages[i]);
 838	_xfs_buf_free_pages(bp);
 839 fail_free_buf:
 840	xfs_buf_free_maps(bp);
 841	kmem_zone_free(xfs_buf_zone, bp);
 842 fail:
 843	return NULL;
 844}
 845
 846/*
 847 *	Increment reference count on buffer, to hold the buffer concurrently
 848 *	with another thread which may release (free) the buffer asynchronously.
 849 *	Must hold the buffer already to call this function.
 850 */
 851void
 852xfs_buf_hold(
 853	xfs_buf_t		*bp)
 854{
 855	trace_xfs_buf_hold(bp, _RET_IP_);
 856	atomic_inc(&bp->b_hold);
 857}
 858
 859/*
 860 *	Releases a hold on the specified buffer.  If the
 861 *	the hold count is 1, calls xfs_buf_free.
 862 */
 863void
 864xfs_buf_rele(
 865	xfs_buf_t		*bp)
 866{
 867	struct xfs_perag	*pag = bp->b_pag;
 
 
 868
 869	trace_xfs_buf_rele(bp, _RET_IP_);
 870
 871	if (!pag) {
 872		ASSERT(list_empty(&bp->b_lru));
 873		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
 874		if (atomic_dec_and_test(&bp->b_hold))
 875			xfs_buf_free(bp);
 
 876		return;
 877	}
 878
 879	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 880
 881	ASSERT(atomic_read(&bp->b_hold) > 0);
 882	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
 883		spin_lock(&bp->b_lock);
 884		if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
 885			/*
 886			 * If the buffer is added to the LRU take a new
 887			 * reference to the buffer for the LRU and clear the
 888			 * (now stale) dispose list state flag
 889			 */
 890			if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
 891				bp->b_state &= ~XFS_BSTATE_DISPOSE;
 892				atomic_inc(&bp->b_hold);
 893			}
 894			spin_unlock(&bp->b_lock);
 895			spin_unlock(&pag->pag_buf_lock);
 
 
 
 
 
 
 
 896		} else {
 897			/*
 898			 * most of the time buffers will already be removed from
 899			 * the LRU, so optimise that case by checking for the
 900			 * XFS_BSTATE_DISPOSE flag indicating the last list the
 901			 * buffer was on was the disposal list
 902			 */
 903			if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
 904				list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
 905			} else {
 906				ASSERT(list_empty(&bp->b_lru));
 907			}
 908			spin_unlock(&bp->b_lock);
 909
 910			ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
 911			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
 912			spin_unlock(&pag->pag_buf_lock);
 913			xfs_perag_put(pag);
 914			xfs_buf_free(bp);
 915		}
 916	}
 
 
 
 
 
 
 917}
 918
 919
 920/*
 921 *	Lock a buffer object, if it is not already locked.
 922 *
 923 *	If we come across a stale, pinned, locked buffer, we know that we are
 924 *	being asked to lock a buffer that has been reallocated. Because it is
 925 *	pinned, we know that the log has not been pushed to disk and hence it
 926 *	will still be locked.  Rather than continuing to have trylock attempts
 927 *	fail until someone else pushes the log, push it ourselves before
 928 *	returning.  This means that the xfsaild will not get stuck trying
 929 *	to push on stale inode buffers.
 930 */
 931int
 932xfs_buf_trylock(
 933	struct xfs_buf		*bp)
 934{
 935	int			locked;
 936
 937	locked = down_trylock(&bp->b_sema) == 0;
 938	if (locked)
 939		XB_SET_OWNER(bp);
 940
 941	trace_xfs_buf_trylock(bp, _RET_IP_);
 
 
 942	return locked;
 943}
 944
 945/*
 946 *	Lock a buffer object.
 947 *
 948 *	If we come across a stale, pinned, locked buffer, we know that we
 949 *	are being asked to lock a buffer that has been reallocated. Because
 950 *	it is pinned, we know that the log has not been pushed to disk and
 951 *	hence it will still be locked. Rather than sleeping until someone
 952 *	else pushes the log, push it ourselves before trying to get the lock.
 953 */
 954void
 955xfs_buf_lock(
 956	struct xfs_buf		*bp)
 957{
 958	trace_xfs_buf_lock(bp, _RET_IP_);
 959
 960	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
 961		xfs_log_force(bp->b_target->bt_mount, 0);
 962	down(&bp->b_sema);
 963	XB_SET_OWNER(bp);
 964
 965	trace_xfs_buf_lock_done(bp, _RET_IP_);
 966}
 967
 968void
 969xfs_buf_unlock(
 970	struct xfs_buf		*bp)
 971{
 
 
 972	XB_CLEAR_OWNER(bp);
 973	up(&bp->b_sema);
 974
 975	trace_xfs_buf_unlock(bp, _RET_IP_);
 976}
 977
 978STATIC void
 979xfs_buf_wait_unpin(
 980	xfs_buf_t		*bp)
 981{
 982	DECLARE_WAITQUEUE	(wait, current);
 983
 984	if (atomic_read(&bp->b_pin_count) == 0)
 985		return;
 986
 987	add_wait_queue(&bp->b_waiters, &wait);
 988	for (;;) {
 989		set_current_state(TASK_UNINTERRUPTIBLE);
 990		if (atomic_read(&bp->b_pin_count) == 0)
 991			break;
 992		io_schedule();
 993	}
 994	remove_wait_queue(&bp->b_waiters, &wait);
 995	set_current_state(TASK_RUNNING);
 996}
 997
 998/*
 999 *	Buffer Utility Routines
1000 */
1001
1002STATIC void
1003xfs_buf_iodone_work(
1004	struct work_struct	*work)
1005{
1006	struct xfs_buf		*bp =
1007		container_of(work, xfs_buf_t, b_iodone_work);
1008	bool			read = !!(bp->b_flags & XBF_READ);
1009
1010	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1011
1012	/* only validate buffers that were read without errors */
1013	if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE))
 
 
 
 
 
 
 
 
1014		bp->b_ops->verify_read(bp);
 
 
 
 
1015
1016	if (bp->b_iodone)
1017		(*(bp->b_iodone))(bp);
1018	else if (bp->b_flags & XBF_ASYNC)
1019		xfs_buf_relse(bp);
1020	else {
1021		ASSERT(read && bp->b_ops);
1022		complete(&bp->b_iowait);
1023	}
1024}
1025
1026void
1027xfs_buf_ioend(
1028	struct xfs_buf	*bp,
1029	int		schedule)
1030{
1031	bool		read = !!(bp->b_flags & XBF_READ);
 
1032
1033	trace_xfs_buf_iodone(bp, _RET_IP_);
 
1034
1035	if (bp->b_error == 0)
1036		bp->b_flags |= XBF_DONE;
1037
1038	if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
1039		if (schedule) {
1040			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1041			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1042		} else {
1043			xfs_buf_iodone_work(&bp->b_iodone_work);
1044		}
1045	} else {
1046		bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1047		complete(&bp->b_iowait);
1048	}
1049}
1050
1051void
1052xfs_buf_ioerror(
1053	xfs_buf_t		*bp,
1054	int			error)
 
1055{
1056	ASSERT(error >= 0 && error <= 0xffff);
1057	bp->b_error = (unsigned short)error;
1058	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1059}
1060
1061void
1062xfs_buf_ioerror_alert(
1063	struct xfs_buf		*bp,
1064	const char		*func)
1065{
1066	xfs_alert(bp->b_target->bt_mount,
1067"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1068		(__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
1069}
1070
1071/*
1072 * Called when we want to stop a buffer from getting written or read.
1073 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1074 * so that the proper iodone callbacks get called.
1075 */
1076STATIC int
1077xfs_bioerror(
1078	xfs_buf_t *bp)
1079{
1080#ifdef XFSERRORDEBUG
1081	ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1082#endif
1083
1084	/*
1085	 * No need to wait until the buffer is unpinned, we aren't flushing it.
1086	 */
1087	xfs_buf_ioerror(bp, EIO);
1088
1089	/*
1090	 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1091	 */
1092	XFS_BUF_UNREAD(bp);
1093	XFS_BUF_UNDONE(bp);
1094	xfs_buf_stale(bp);
1095
1096	xfs_buf_ioend(bp, 0);
1097
1098	return EIO;
1099}
1100
1101/*
1102 * Same as xfs_bioerror, except that we are releasing the buffer
1103 * here ourselves, and avoiding the xfs_buf_ioend call.
1104 * This is meant for userdata errors; metadata bufs come with
1105 * iodone functions attached, so that we can track down errors.
1106 */
1107int
1108xfs_bioerror_relse(
1109	struct xfs_buf	*bp)
1110{
1111	int64_t		fl = bp->b_flags;
1112	/*
1113	 * No need to wait until the buffer is unpinned.
1114	 * We aren't flushing it.
1115	 *
1116	 * chunkhold expects B_DONE to be set, whether
1117	 * we actually finish the I/O or not. We don't want to
1118	 * change that interface.
1119	 */
1120	XFS_BUF_UNREAD(bp);
1121	XFS_BUF_DONE(bp);
1122	xfs_buf_stale(bp);
1123	bp->b_iodone = NULL;
1124	if (!(fl & XBF_ASYNC)) {
1125		/*
1126		 * Mark b_error and B_ERROR _both_.
1127		 * Lot's of chunkcache code assumes that.
1128		 * There's no reason to mark error for
1129		 * ASYNC buffers.
1130		 */
1131		xfs_buf_ioerror(bp, EIO);
1132		complete(&bp->b_iowait);
1133	} else {
1134		xfs_buf_relse(bp);
1135	}
1136
1137	return EIO;
1138}
1139
1140STATIC int
1141xfs_bdstrat_cb(
1142	struct xfs_buf	*bp)
1143{
1144	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1145		trace_xfs_bdstrat_shut(bp, _RET_IP_);
1146		/*
1147		 * Metadata write that didn't get logged but
1148		 * written delayed anyway. These aren't associated
1149		 * with a transaction, and can be ignored.
1150		 */
1151		if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1152			return xfs_bioerror_relse(bp);
1153		else
1154			return xfs_bioerror(bp);
1155	}
1156
1157	xfs_buf_iorequest(bp);
1158	return 0;
1159}
1160
1161int
1162xfs_bwrite(
1163	struct xfs_buf		*bp)
1164{
1165	int			error;
1166
1167	ASSERT(xfs_buf_islocked(bp));
1168
1169	bp->b_flags |= XBF_WRITE;
1170	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
1171
1172	xfs_bdstrat_cb(bp);
1173
1174	error = xfs_buf_iowait(bp);
1175	if (error) {
1176		xfs_force_shutdown(bp->b_target->bt_mount,
1177				   SHUTDOWN_META_IO_ERROR);
1178	}
1179	return error;
1180}
1181
1182STATIC void
1183_xfs_buf_ioend(
1184	xfs_buf_t		*bp,
1185	int			schedule)
1186{
1187	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1188		xfs_buf_ioend(bp, schedule);
1189}
1190
1191STATIC void
1192xfs_buf_bio_end_io(
1193	struct bio		*bio,
1194	int			error)
1195{
1196	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
1197
1198	/*
1199	 * don't overwrite existing errors - otherwise we can lose errors on
1200	 * buffers that require multiple bios to complete.
1201	 */
1202	if (!bp->b_error)
1203		xfs_buf_ioerror(bp, -error);
 
 
 
1204
1205	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1206		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1207
1208	_xfs_buf_ioend(bp, 1);
 
1209	bio_put(bio);
1210}
1211
1212static void
1213xfs_buf_ioapply_map(
1214	struct xfs_buf	*bp,
1215	int		map,
1216	int		*buf_offset,
1217	int		*count,
1218	int		rw)
 
1219{
1220	int		page_index;
1221	int		total_nr_pages = bp->b_page_count;
1222	int		nr_pages;
1223	struct bio	*bio;
1224	sector_t	sector =  bp->b_maps[map].bm_bn;
1225	int		size;
1226	int		offset;
1227
1228	total_nr_pages = bp->b_page_count;
1229
1230	/* skip the pages in the buffer before the start offset */
1231	page_index = 0;
1232	offset = *buf_offset;
1233	while (offset >= PAGE_SIZE) {
1234		page_index++;
1235		offset -= PAGE_SIZE;
1236	}
1237
1238	/*
1239	 * Limit the IO size to the length of the current vector, and update the
1240	 * remaining IO count for the next time around.
1241	 */
1242	size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1243	*count -= size;
1244	*buf_offset += size;
1245
1246next_chunk:
1247	atomic_inc(&bp->b_io_remaining);
1248	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1249	if (nr_pages > total_nr_pages)
1250		nr_pages = total_nr_pages;
1251
1252	bio = bio_alloc(GFP_NOIO, nr_pages);
1253	bio->bi_bdev = bp->b_target->bt_bdev;
1254	bio->bi_iter.bi_sector = sector;
1255	bio->bi_end_io = xfs_buf_bio_end_io;
1256	bio->bi_private = bp;
1257
1258
1259	for (; size && nr_pages; nr_pages--, page_index++) {
1260		int	rbytes, nbytes = PAGE_SIZE - offset;
1261
1262		if (nbytes > size)
1263			nbytes = size;
1264
1265		rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1266				      offset);
1267		if (rbytes < nbytes)
1268			break;
1269
1270		offset = 0;
1271		sector += BTOBB(nbytes);
1272		size -= nbytes;
1273		total_nr_pages--;
1274	}
1275
1276	if (likely(bio->bi_iter.bi_size)) {
1277		if (xfs_buf_is_vmapped(bp)) {
1278			flush_kernel_vmap_range(bp->b_addr,
1279						xfs_buf_vmap_len(bp));
1280		}
1281		submit_bio(rw, bio);
1282		if (size)
1283			goto next_chunk;
1284	} else {
1285		/*
1286		 * This is guaranteed not to be the last io reference count
1287		 * because the caller (xfs_buf_iorequest) holds a count itself.
1288		 */
1289		atomic_dec(&bp->b_io_remaining);
1290		xfs_buf_ioerror(bp, EIO);
1291		bio_put(bio);
1292	}
1293
1294}
1295
1296STATIC void
1297_xfs_buf_ioapply(
1298	struct xfs_buf	*bp)
1299{
1300	struct blk_plug	plug;
1301	int		rw;
 
1302	int		offset;
1303	int		size;
1304	int		i;
1305
1306	/*
1307	 * Make sure we capture only current IO errors rather than stale errors
1308	 * left over from previous use of the buffer (e.g. failed readahead).
1309	 */
1310	bp->b_error = 0;
1311
 
 
 
 
 
 
 
1312	if (bp->b_flags & XBF_WRITE) {
 
1313		if (bp->b_flags & XBF_SYNCIO)
1314			rw = WRITE_SYNC;
1315		else
1316			rw = WRITE;
1317		if (bp->b_flags & XBF_FUA)
1318			rw |= REQ_FUA;
1319		if (bp->b_flags & XBF_FLUSH)
1320			rw |= REQ_FLUSH;
1321
1322		/*
1323		 * Run the write verifier callback function if it exists. If
1324		 * this function fails it will mark the buffer with an error and
1325		 * the IO should not be dispatched.
1326		 */
1327		if (bp->b_ops) {
1328			bp->b_ops->verify_write(bp);
1329			if (bp->b_error) {
1330				xfs_force_shutdown(bp->b_target->bt_mount,
1331						   SHUTDOWN_CORRUPT_INCORE);
1332				return;
1333			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334		}
1335	} else if (bp->b_flags & XBF_READ_AHEAD) {
1336		rw = READA;
 
1337	} else {
1338		rw = READ;
1339	}
1340
1341	/* we only use the buffer cache for meta-data */
1342	rw |= REQ_META;
1343
1344	/*
1345	 * Walk all the vectors issuing IO on them. Set up the initial offset
1346	 * into the buffer and the desired IO size before we start -
1347	 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1348	 * subsequent call.
1349	 */
1350	offset = bp->b_offset;
1351	size = BBTOB(bp->b_io_length);
1352	blk_start_plug(&plug);
1353	for (i = 0; i < bp->b_map_count; i++) {
1354		xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1355		if (bp->b_error)
1356			break;
1357		if (size <= 0)
1358			break;	/* all done */
1359	}
1360	blk_finish_plug(&plug);
1361}
1362
 
 
 
 
 
 
1363void
1364xfs_buf_iorequest(
1365	xfs_buf_t		*bp)
1366{
1367	trace_xfs_buf_iorequest(bp, _RET_IP_);
1368
1369	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
 
 
 
 
 
 
 
 
 
 
1370
1371	if (bp->b_flags & XBF_WRITE)
1372		xfs_buf_wait_unpin(bp);
 
 
 
 
 
 
 
 
 
 
 
 
1373	xfs_buf_hold(bp);
1374
1375	/*
1376	 * Set the count to 1 initially, this will stop an I/O
1377	 * completion callout which happens before we have started
1378	 * all the I/O from calling xfs_buf_ioend too early.
1379	 */
1380	atomic_set(&bp->b_io_remaining, 1);
 
1381	_xfs_buf_ioapply(bp);
 
1382	/*
1383	 * If _xfs_buf_ioapply failed, we'll get back here with
1384	 * only the reference we took above.  _xfs_buf_ioend will
1385	 * drop it to zero, so we'd better not queue it for later,
1386	 * or we'll free it before it's done.
1387	 */
1388	_xfs_buf_ioend(bp, bp->b_error ? 0 : 1);
 
 
 
 
 
1389
1390	xfs_buf_rele(bp);
 
1391}
1392
1393/*
1394 * Waits for I/O to complete on the buffer supplied.  It returns immediately if
1395 * no I/O is pending or there is already a pending error on the buffer, in which
1396 * case nothing will ever complete.  It returns the I/O error code, if any, or
1397 * 0 if there was no error.
1398 */
1399int
1400xfs_buf_iowait(
1401	xfs_buf_t		*bp)
1402{
1403	trace_xfs_buf_iowait(bp, _RET_IP_);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1404
1405	if (!bp->b_error)
1406		wait_for_completion(&bp->b_iowait);
 
 
 
 
1407
 
 
 
1408	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1409	return bp->b_error;
 
 
 
 
 
 
 
1410}
1411
1412xfs_caddr_t
1413xfs_buf_offset(
1414	xfs_buf_t		*bp,
1415	size_t			offset)
1416{
1417	struct page		*page;
1418
1419	if (bp->b_addr)
1420		return bp->b_addr + offset;
1421
1422	offset += bp->b_offset;
1423	page = bp->b_pages[offset >> PAGE_SHIFT];
1424	return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1425}
1426
1427/*
1428 *	Move data into or out of a buffer.
1429 */
1430void
1431xfs_buf_iomove(
1432	xfs_buf_t		*bp,	/* buffer to process		*/
1433	size_t			boff,	/* starting buffer offset	*/
1434	size_t			bsize,	/* length to copy		*/
1435	void			*data,	/* data address			*/
1436	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
1437{
1438	size_t			bend;
1439
1440	bend = boff + bsize;
1441	while (boff < bend) {
1442		struct page	*page;
1443		int		page_index, page_offset, csize;
1444
1445		page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1446		page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1447		page = bp->b_pages[page_index];
1448		csize = min_t(size_t, PAGE_SIZE - page_offset,
1449				      BBTOB(bp->b_io_length) - boff);
1450
1451		ASSERT((csize + page_offset) <= PAGE_SIZE);
1452
1453		switch (mode) {
1454		case XBRW_ZERO:
1455			memset(page_address(page) + page_offset, 0, csize);
1456			break;
1457		case XBRW_READ:
1458			memcpy(data, page_address(page) + page_offset, csize);
1459			break;
1460		case XBRW_WRITE:
1461			memcpy(page_address(page) + page_offset, data, csize);
1462		}
1463
1464		boff += csize;
1465		data += csize;
1466	}
1467}
1468
1469/*
1470 *	Handling of buffer targets (buftargs).
1471 */
1472
1473/*
1474 * Wait for any bufs with callbacks that have been submitted but have not yet
1475 * returned. These buffers will have an elevated hold count, so wait on those
1476 * while freeing all the buffers only held by the LRU.
1477 */
1478static enum lru_status
1479xfs_buftarg_wait_rele(
1480	struct list_head	*item,
 
1481	spinlock_t		*lru_lock,
1482	void			*arg)
1483
1484{
1485	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1486	struct list_head	*dispose = arg;
1487
1488	if (atomic_read(&bp->b_hold) > 1) {
1489		/* need to wait, so skip it this pass */
1490		trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1491		return LRU_SKIP;
1492	}
1493	if (!spin_trylock(&bp->b_lock))
1494		return LRU_SKIP;
1495
1496	/*
1497	 * clear the LRU reference count so the buffer doesn't get
1498	 * ignored in xfs_buf_rele().
1499	 */
1500	atomic_set(&bp->b_lru_ref, 0);
1501	bp->b_state |= XFS_BSTATE_DISPOSE;
1502	list_move(item, dispose);
1503	spin_unlock(&bp->b_lock);
1504	return LRU_REMOVED;
1505}
1506
1507void
1508xfs_wait_buftarg(
1509	struct xfs_buftarg	*btp)
1510{
1511	LIST_HEAD(dispose);
1512	int loop = 0;
1513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1514	/* loop until there is nothing left on the lru list. */
1515	while (list_lru_count(&btp->bt_lru)) {
1516		list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1517			      &dispose, LONG_MAX);
1518
1519		while (!list_empty(&dispose)) {
1520			struct xfs_buf *bp;
1521			bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1522			list_del_init(&bp->b_lru);
1523			if (bp->b_flags & XBF_WRITE_FAIL) {
1524				xfs_alert(btp->bt_mount,
1525"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
1526"Please run xfs_repair to determine the extent of the problem.",
1527					(long long)bp->b_bn);
 
 
1528			}
1529			xfs_buf_rele(bp);
1530		}
1531		if (loop++ != 0)
1532			delay(100);
1533	}
1534}
1535
1536static enum lru_status
1537xfs_buftarg_isolate(
1538	struct list_head	*item,
 
1539	spinlock_t		*lru_lock,
1540	void			*arg)
1541{
1542	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1543	struct list_head	*dispose = arg;
1544
1545	/*
1546	 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1547	 * If we fail to get the lock, just skip it.
1548	 */
1549	if (!spin_trylock(&bp->b_lock))
1550		return LRU_SKIP;
1551	/*
1552	 * Decrement the b_lru_ref count unless the value is already
1553	 * zero. If the value is already zero, we need to reclaim the
1554	 * buffer, otherwise it gets another trip through the LRU.
1555	 */
1556	if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1557		spin_unlock(&bp->b_lock);
1558		return LRU_ROTATE;
1559	}
1560
1561	bp->b_state |= XFS_BSTATE_DISPOSE;
1562	list_move(item, dispose);
1563	spin_unlock(&bp->b_lock);
1564	return LRU_REMOVED;
1565}
1566
1567static unsigned long
1568xfs_buftarg_shrink_scan(
1569	struct shrinker		*shrink,
1570	struct shrink_control	*sc)
1571{
1572	struct xfs_buftarg	*btp = container_of(shrink,
1573					struct xfs_buftarg, bt_shrinker);
1574	LIST_HEAD(dispose);
1575	unsigned long		freed;
1576	unsigned long		nr_to_scan = sc->nr_to_scan;
1577
1578	freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
1579				       &dispose, &nr_to_scan);
1580
1581	while (!list_empty(&dispose)) {
1582		struct xfs_buf *bp;
1583		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1584		list_del_init(&bp->b_lru);
1585		xfs_buf_rele(bp);
1586	}
1587
1588	return freed;
1589}
1590
1591static unsigned long
1592xfs_buftarg_shrink_count(
1593	struct shrinker		*shrink,
1594	struct shrink_control	*sc)
1595{
1596	struct xfs_buftarg	*btp = container_of(shrink,
1597					struct xfs_buftarg, bt_shrinker);
1598	return list_lru_count_node(&btp->bt_lru, sc->nid);
1599}
1600
1601void
1602xfs_free_buftarg(
1603	struct xfs_mount	*mp,
1604	struct xfs_buftarg	*btp)
1605{
1606	unregister_shrinker(&btp->bt_shrinker);
 
 
1607	list_lru_destroy(&btp->bt_lru);
1608
1609	if (mp->m_flags & XFS_MOUNT_BARRIER)
1610		xfs_blkdev_issue_flush(btp);
1611
1612	kmem_free(btp);
1613}
1614
1615int
1616xfs_setsize_buftarg(
1617	xfs_buftarg_t		*btp,
1618	unsigned int		blocksize,
1619	unsigned int		sectorsize)
1620{
1621	/* Set up metadata sector size info */
1622	btp->bt_meta_sectorsize = sectorsize;
1623	btp->bt_meta_sectormask = sectorsize - 1;
1624
1625	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1626		char name[BDEVNAME_SIZE];
1627
1628		bdevname(btp->bt_bdev, name);
1629
1630		xfs_warn(btp->bt_mount,
1631			"Cannot set_blocksize to %u on device %s",
1632			sectorsize, name);
1633		return EINVAL;
1634	}
1635
1636	/* Set up device logical sector size mask */
1637	btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1638	btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1639
1640	return 0;
1641}
1642
1643/*
1644 * When allocating the initial buffer target we have not yet
1645 * read in the superblock, so don't know what sized sectors
1646 * are being used at this early stage.  Play safe.
1647 */
1648STATIC int
1649xfs_setsize_buftarg_early(
1650	xfs_buftarg_t		*btp,
1651	struct block_device	*bdev)
1652{
1653	return xfs_setsize_buftarg(btp, PAGE_SIZE,
1654				   bdev_logical_block_size(bdev));
1655}
1656
1657xfs_buftarg_t *
1658xfs_alloc_buftarg(
1659	struct xfs_mount	*mp,
1660	struct block_device	*bdev,
1661	int			external,
1662	const char		*fsname)
1663{
1664	xfs_buftarg_t		*btp;
1665
1666	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1667
1668	btp->bt_mount = mp;
1669	btp->bt_dev =  bdev->bd_dev;
1670	btp->bt_bdev = bdev;
1671	btp->bt_bdi = blk_get_backing_dev_info(bdev);
1672	if (!btp->bt_bdi)
1673		goto error;
1674
1675	if (xfs_setsize_buftarg_early(btp, bdev))
1676		goto error;
1677
1678	if (list_lru_init(&btp->bt_lru))
1679		goto error;
 
 
 
1680
1681	btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1682	btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1683	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1684	btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1685	register_shrinker(&btp->bt_shrinker);
 
1686	return btp;
1687
1688error:
 
 
 
 
1689	kmem_free(btp);
1690	return NULL;
1691}
1692
1693/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1694 * Add a buffer to the delayed write list.
1695 *
1696 * This queues a buffer for writeout if it hasn't already been.  Note that
1697 * neither this routine nor the buffer list submission functions perform
1698 * any internal synchronization.  It is expected that the lists are thread-local
1699 * to the callers.
1700 *
1701 * Returns true if we queued up the buffer, or false if it already had
1702 * been on the buffer list.
1703 */
1704bool
1705xfs_buf_delwri_queue(
1706	struct xfs_buf		*bp,
1707	struct list_head	*list)
1708{
1709	ASSERT(xfs_buf_islocked(bp));
1710	ASSERT(!(bp->b_flags & XBF_READ));
1711
1712	/*
1713	 * If the buffer is already marked delwri it already is queued up
1714	 * by someone else for imediate writeout.  Just ignore it in that
1715	 * case.
1716	 */
1717	if (bp->b_flags & _XBF_DELWRI_Q) {
1718		trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1719		return false;
1720	}
1721
1722	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1723
1724	/*
1725	 * If a buffer gets written out synchronously or marked stale while it
1726	 * is on a delwri list we lazily remove it. To do this, the other party
1727	 * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1728	 * It remains referenced and on the list.  In a rare corner case it
1729	 * might get readded to a delwri list after the synchronous writeout, in
1730	 * which case we need just need to re-add the flag here.
1731	 */
1732	bp->b_flags |= _XBF_DELWRI_Q;
1733	if (list_empty(&bp->b_list)) {
1734		atomic_inc(&bp->b_hold);
1735		list_add_tail(&bp->b_list, list);
1736	}
1737
1738	return true;
1739}
1740
1741/*
1742 * Compare function is more complex than it needs to be because
1743 * the return value is only 32 bits and we are doing comparisons
1744 * on 64 bit values
1745 */
1746static int
1747xfs_buf_cmp(
1748	void		*priv,
1749	struct list_head *a,
1750	struct list_head *b)
1751{
1752	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
1753	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
1754	xfs_daddr_t		diff;
1755
1756	diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1757	if (diff < 0)
1758		return -1;
1759	if (diff > 0)
1760		return 1;
1761	return 0;
1762}
1763
 
 
 
 
 
 
 
 
 
 
 
 
1764static int
1765__xfs_buf_delwri_submit(
1766	struct list_head	*buffer_list,
1767	struct list_head	*io_list,
1768	bool			wait)
1769{
1770	struct blk_plug		plug;
1771	struct xfs_buf		*bp, *n;
 
1772	int			pinned = 0;
 
 
 
1773
 
1774	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1775		if (!wait) {
1776			if (xfs_buf_ispinned(bp)) {
1777				pinned++;
1778				continue;
1779			}
1780			if (!xfs_buf_trylock(bp))
1781				continue;
1782		} else {
1783			xfs_buf_lock(bp);
1784		}
1785
1786		/*
1787		 * Someone else might have written the buffer synchronously or
1788		 * marked it stale in the meantime.  In that case only the
1789		 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1790		 * reference and remove it from the list here.
1791		 */
1792		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1793			list_del_init(&bp->b_list);
1794			xfs_buf_relse(bp);
1795			continue;
1796		}
1797
1798		list_move_tail(&bp->b_list, io_list);
1799		trace_xfs_buf_delwri_split(bp, _RET_IP_);
1800	}
1801
1802	list_sort(NULL, io_list, xfs_buf_cmp);
1803
1804	blk_start_plug(&plug);
1805	list_for_each_entry_safe(bp, n, io_list, b_list) {
1806		bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
1807		bp->b_flags |= XBF_WRITE;
 
 
 
 
 
 
 
 
1808
1809		if (!wait) {
1810			bp->b_flags |= XBF_ASYNC;
1811			list_del_init(&bp->b_list);
1812		}
1813		xfs_bdstrat_cb(bp);
1814	}
1815	blk_finish_plug(&plug);
1816
1817	return pinned;
1818}
1819
1820/*
1821 * Write out a buffer list asynchronously.
1822 *
1823 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1824 * out and not wait for I/O completion on any of the buffers.  This interface
1825 * is only safely useable for callers that can track I/O completion by higher
1826 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1827 * function.
1828 */
1829int
1830xfs_buf_delwri_submit_nowait(
1831	struct list_head	*buffer_list)
1832{
1833	LIST_HEAD		(io_list);
1834	return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1835}
1836
1837/*
1838 * Write out a buffer list synchronously.
1839 *
1840 * This will take the @buffer_list, write all buffers out and wait for I/O
1841 * completion on all of the buffers. @buffer_list is consumed by the function,
1842 * so callers must have some other way of tracking buffers if they require such
1843 * functionality.
1844 */
1845int
1846xfs_buf_delwri_submit(
1847	struct list_head	*buffer_list)
1848{
1849	LIST_HEAD		(io_list);
1850	int			error = 0, error2;
1851	struct xfs_buf		*bp;
1852
1853	__xfs_buf_delwri_submit(buffer_list, &io_list, true);
1854
1855	/* Wait for IO to complete. */
1856	while (!list_empty(&io_list)) {
1857		bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1858
1859		list_del_init(&bp->b_list);
1860		error2 = xfs_buf_iowait(bp);
 
 
 
1861		xfs_buf_relse(bp);
1862		if (!error)
1863			error = error2;
1864	}
1865
1866	return error;
1867}
1868
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1869int __init
1870xfs_buf_init(void)
1871{
1872	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1873						KM_ZONE_HWALIGN, NULL);
1874	if (!xfs_buf_zone)
1875		goto out;
1876
1877	xfslogd_workqueue = alloc_workqueue("xfslogd",
1878					WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1879	if (!xfslogd_workqueue)
1880		goto out_free_buf_zone;
1881
1882	return 0;
1883
1884 out_free_buf_zone:
1885	kmem_zone_destroy(xfs_buf_zone);
1886 out:
1887	return -ENOMEM;
1888}
1889
1890void
1891xfs_buf_terminate(void)
1892{
1893	destroy_workqueue(xfslogd_workqueue);
1894	kmem_zone_destroy(xfs_buf_zone);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1895}