Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it would be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program; if not, write the Free Software Foundation,
  15 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  16 */
  17
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_format.h"
  21#include "xfs_log_format.h"
  22#include "xfs_shared.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_mount.h"
  25#include "xfs_error.h"
  26#include "xfs_alloc.h"
  27#include "xfs_extent_busy.h"
  28#include "xfs_discard.h"
  29#include "xfs_trans.h"
  30#include "xfs_trans_priv.h"
  31#include "xfs_log.h"
  32#include "xfs_log_priv.h"
  33#include "xfs_trace.h"
  34
  35struct workqueue_struct *xfs_discard_wq;
  36
  37/*
  38 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
  39 * recover, so we don't allow failure here. Also, we allocate in a context that
  40 * we don't want to be issuing transactions from, so we need to tell the
  41 * allocation code this as well.
  42 *
  43 * We don't reserve any space for the ticket - we are going to steal whatever
  44 * space we require from transactions as they commit. To ensure we reserve all
  45 * the space required, we need to set the current reservation of the ticket to
  46 * zero so that we know to steal the initial transaction overhead from the
  47 * first transaction commit.
  48 */
  49static struct xlog_ticket *
  50xlog_cil_ticket_alloc(
  51	struct xlog	*log)
  52{
  53	struct xlog_ticket *tic;
  54
  55	tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
  56				KM_SLEEP|KM_NOFS);
  57
  58	/*
  59	 * set the current reservation to zero so we know to steal the basic
  60	 * transaction overhead reservation from the first transaction commit.
  61	 */
  62	tic->t_curr_res = 0;
 
  63	return tic;
  64}
  65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  66/*
  67 * After the first stage of log recovery is done, we know where the head and
  68 * tail of the log are. We need this log initialisation done before we can
  69 * initialise the first CIL checkpoint context.
  70 *
  71 * Here we allocate a log ticket to track space usage during a CIL push.  This
  72 * ticket is passed to xlog_write() directly so that we don't slowly leak log
  73 * space by failing to account for space used by log headers and additional
  74 * region headers for split regions.
  75 */
  76void
  77xlog_cil_init_post_recovery(
  78	struct xlog	*log)
  79{
  80	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
  81	log->l_cilp->xc_ctx->sequence = 1;
 
  82}
  83
  84static inline int
  85xlog_cil_iovec_space(
  86	uint	niovecs)
  87{
  88	return round_up((sizeof(struct xfs_log_vec) +
  89					niovecs * sizeof(struct xfs_log_iovec)),
  90			sizeof(uint64_t));
  91}
  92
  93/*
  94 * Allocate or pin log vector buffers for CIL insertion.
  95 *
  96 * The CIL currently uses disposable buffers for copying a snapshot of the
  97 * modified items into the log during a push. The biggest problem with this is
  98 * the requirement to allocate the disposable buffer during the commit if:
  99 *	a) does not exist; or
 100 *	b) it is too small
 101 *
 102 * If we do this allocation within xlog_cil_insert_format_items(), it is done
 103 * under the xc_ctx_lock, which means that a CIL push cannot occur during
 104 * the memory allocation. This means that we have a potential deadlock situation
 105 * under low memory conditions when we have lots of dirty metadata pinned in
 106 * the CIL and we need a CIL commit to occur to free memory.
 107 *
 108 * To avoid this, we need to move the memory allocation outside the
 109 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
 110 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
 111 * vector buffers between the check and the formatting of the item into the
 112 * log vector buffer within the xc_ctx_lock.
 113 *
 114 * Because the log vector buffer needs to be unchanged during the CIL push
 115 * process, we cannot share the buffer between the transaction commit (which
 116 * modifies the buffer) and the CIL push context that is writing the changes
 117 * into the log. This means skipping preallocation of buffer space is
 118 * unreliable, but we most definitely do not want to be allocating and freeing
 119 * buffers unnecessarily during commits when overwrites can be done safely.
 120 *
 121 * The simplest solution to this problem is to allocate a shadow buffer when a
 122 * log item is committed for the second time, and then to only use this buffer
 123 * if necessary. The buffer can remain attached to the log item until such time
 124 * it is needed, and this is the buffer that is reallocated to match the size of
 125 * the incoming modification. Then during the formatting of the item we can swap
 126 * the active buffer with the new one if we can't reuse the existing buffer. We
 127 * don't free the old buffer as it may be reused on the next modification if
 128 * it's size is right, otherwise we'll free and reallocate it at that point.
 129 *
 130 * This function builds a vector for the changes in each log item in the
 131 * transaction. It then works out the length of the buffer needed for each log
 132 * item, allocates them and attaches the vector to the log item in preparation
 133 * for the formatting step which occurs under the xc_ctx_lock.
 134 *
 135 * While this means the memory footprint goes up, it avoids the repeated
 136 * alloc/free pattern that repeated modifications of an item would otherwise
 137 * cause, and hence minimises the CPU overhead of such behaviour.
 138 */
 139static void
 140xlog_cil_alloc_shadow_bufs(
 141	struct xlog		*log,
 142	struct xfs_trans	*tp)
 143{
 144	struct xfs_log_item_desc *lidp;
 145
 146	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
 147		struct xfs_log_item *lip = lidp->lid_item;
 148		struct xfs_log_vec *lv;
 149		int	niovecs = 0;
 150		int	nbytes = 0;
 151		int	buf_size;
 152		bool	ordered = false;
 153
 154		/* Skip items which aren't dirty in this transaction. */
 155		if (!(lidp->lid_flags & XFS_LID_DIRTY))
 156			continue;
 157
 158		/* get number of vecs and size of data to be stored */
 159		lip->li_ops->iop_size(lip, &niovecs, &nbytes);
 160
 161		/*
 162		 * Ordered items need to be tracked but we do not wish to write
 163		 * them. We need a logvec to track the object, but we do not
 164		 * need an iovec or buffer to be allocated for copying data.
 165		 */
 166		if (niovecs == XFS_LOG_VEC_ORDERED) {
 167			ordered = true;
 168			niovecs = 0;
 169			nbytes = 0;
 170		}
 171
 172		/*
 173		 * We 64-bit align the length of each iovec so that the start
 174		 * of the next one is naturally aligned.  We'll need to
 175		 * account for that slack space here. Then round nbytes up
 176		 * to 64-bit alignment so that the initial buffer alignment is
 177		 * easy to calculate and verify.
 
 
 
 
 
 
 178		 */
 179		nbytes += niovecs * sizeof(uint64_t);
 
 180		nbytes = round_up(nbytes, sizeof(uint64_t));
 181
 182		/*
 183		 * The data buffer needs to start 64-bit aligned, so round up
 184		 * that space to ensure we can align it appropriately and not
 185		 * overrun the buffer.
 186		 */
 187		buf_size = nbytes + xlog_cil_iovec_space(niovecs);
 188
 189		/*
 190		 * if we have no shadow buffer, or it is too small, we need to
 191		 * reallocate it.
 192		 */
 193		if (!lip->li_lv_shadow ||
 194		    buf_size > lip->li_lv_shadow->lv_size) {
 195
 196			/*
 197			 * We free and allocate here as a realloc would copy
 198			 * unecessary data. We don't use kmem_zalloc() for the
 199			 * same reason - we don't need to zero the data area in
 200			 * the buffer, only the log vector header and the iovec
 201			 * storage.
 202			 */
 203			kmem_free(lip->li_lv_shadow);
 
 204
 205			lv = kmem_alloc_large(buf_size, KM_SLEEP | KM_NOFS);
 206			memset(lv, 0, xlog_cil_iovec_space(niovecs));
 207
 
 208			lv->lv_item = lip;
 209			lv->lv_size = buf_size;
 210			if (ordered)
 211				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 212			else
 213				lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
 214			lip->li_lv_shadow = lv;
 215		} else {
 216			/* same or smaller, optimise common overwrite case */
 217			lv = lip->li_lv_shadow;
 218			if (ordered)
 219				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 220			else
 221				lv->lv_buf_len = 0;
 222			lv->lv_bytes = 0;
 223			lv->lv_next = NULL;
 224		}
 225
 226		/* Ensure the lv is set up according to ->iop_size */
 227		lv->lv_niovecs = niovecs;
 228
 229		/* The allocated data region lies beyond the iovec region */
 230		lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
 231	}
 232
 233}
 234
 235/*
 236 * Prepare the log item for insertion into the CIL. Calculate the difference in
 237 * log space and vectors it will consume, and if it is a new item pin it as
 238 * well.
 239 */
 240STATIC void
 241xfs_cil_prepare_item(
 242	struct xlog		*log,
 243	struct xfs_log_vec	*lv,
 244	struct xfs_log_vec	*old_lv,
 245	int			*diff_len,
 246	int			*diff_iovecs)
 247{
 248	/* Account for the new LV being passed in */
 249	if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
 250		*diff_len += lv->lv_bytes;
 251		*diff_iovecs += lv->lv_niovecs;
 252	}
 253
 254	/*
 255	 * If there is no old LV, this is the first time we've seen the item in
 256	 * this CIL context and so we need to pin it. If we are replacing the
 257	 * old_lv, then remove the space it accounts for and make it the shadow
 258	 * buffer for later freeing. In both cases we are now switching to the
 259	 * shadow buffer, so update the the pointer to it appropriately.
 260	 */
 261	if (!old_lv) {
 262		lv->lv_item->li_ops->iop_pin(lv->lv_item);
 
 263		lv->lv_item->li_lv_shadow = NULL;
 264	} else if (old_lv != lv) {
 265		ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
 266
 267		*diff_len -= old_lv->lv_bytes;
 268		*diff_iovecs -= old_lv->lv_niovecs;
 269		lv->lv_item->li_lv_shadow = old_lv;
 270	}
 271
 272	/* attach new log vector to log item */
 273	lv->lv_item->li_lv = lv;
 274
 275	/*
 276	 * If this is the first time the item is being committed to the
 277	 * CIL, store the sequence number on the log item so we can
 278	 * tell in future commits whether this is the first checkpoint
 279	 * the item is being committed into.
 280	 */
 281	if (!lv->lv_item->li_seq)
 282		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
 283}
 284
 285/*
 286 * Format log item into a flat buffers
 287 *
 288 * For delayed logging, we need to hold a formatted buffer containing all the
 289 * changes on the log item. This enables us to relog the item in memory and
 290 * write it out asynchronously without needing to relock the object that was
 291 * modified at the time it gets written into the iclog.
 292 *
 293 * This function takes the prepared log vectors attached to each log item, and
 294 * formats the changes into the log vector buffer. The buffer it uses is
 295 * dependent on the current state of the vector in the CIL - the shadow lv is
 296 * guaranteed to be large enough for the current modification, but we will only
 297 * use that if we can't reuse the existing lv. If we can't reuse the existing
 298 * lv, then simple swap it out for the shadow lv. We don't free it - that is
 299 * done lazily either by th enext modification or the freeing of the log item.
 300 *
 301 * We don't set up region headers during this process; we simply copy the
 302 * regions into the flat buffer. We can do this because we still have to do a
 303 * formatting step to write the regions into the iclog buffer.  Writing the
 304 * ophdrs during the iclog write means that we can support splitting large
 305 * regions across iclog boundares without needing a change in the format of the
 306 * item/region encapsulation.
 307 *
 308 * Hence what we need to do now is change the rewrite the vector array to point
 309 * to the copied region inside the buffer we just allocated. This allows us to
 310 * format the regions into the iclog as though they are being formatted
 311 * directly out of the objects themselves.
 312 */
 313static void
 314xlog_cil_insert_format_items(
 315	struct xlog		*log,
 316	struct xfs_trans	*tp,
 317	int			*diff_len,
 318	int			*diff_iovecs)
 319{
 320	struct xfs_log_item_desc *lidp;
 321
 322
 323	/* Bail out if we didn't find a log item.  */
 324	if (list_empty(&tp->t_items)) {
 325		ASSERT(0);
 326		return;
 327	}
 328
 329	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
 330		struct xfs_log_item *lip = lidp->lid_item;
 331		struct xfs_log_vec *lv;
 332		struct xfs_log_vec *old_lv = NULL;
 333		struct xfs_log_vec *shadow;
 334		bool	ordered = false;
 335
 336		/* Skip items which aren't dirty in this transaction. */
 337		if (!(lidp->lid_flags & XFS_LID_DIRTY))
 338			continue;
 339
 340		/*
 341		 * The formatting size information is already attached to
 342		 * the shadow lv on the log item.
 343		 */
 344		shadow = lip->li_lv_shadow;
 345		if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
 346			ordered = true;
 347
 348		/* Skip items that do not have any vectors for writing */
 349		if (!shadow->lv_niovecs && !ordered)
 350			continue;
 351
 352		/* compare to existing item size */
 353		old_lv = lip->li_lv;
 354		if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
 355			/* same or smaller, optimise common overwrite case */
 356			lv = lip->li_lv;
 357			lv->lv_next = NULL;
 358
 359			if (ordered)
 360				goto insert;
 361
 362			/*
 363			 * set the item up as though it is a new insertion so
 364			 * that the space reservation accounting is correct.
 365			 */
 366			*diff_iovecs -= lv->lv_niovecs;
 367			*diff_len -= lv->lv_bytes;
 368
 369			/* Ensure the lv is set up according to ->iop_size */
 370			lv->lv_niovecs = shadow->lv_niovecs;
 371
 372			/* reset the lv buffer information for new formatting */
 373			lv->lv_buf_len = 0;
 374			lv->lv_bytes = 0;
 375			lv->lv_buf = (char *)lv +
 376					xlog_cil_iovec_space(lv->lv_niovecs);
 377		} else {
 378			/* switch to shadow buffer! */
 379			lv = shadow;
 380			lv->lv_item = lip;
 381			if (ordered) {
 382				/* track as an ordered logvec */
 383				ASSERT(lip->li_lv == NULL);
 384				goto insert;
 385			}
 386		}
 387
 388		ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
 389		lip->li_ops->iop_format(lip, lv);
 390insert:
 391		xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
 392	}
 393}
 394
 395/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 396 * Insert the log items into the CIL and calculate the difference in space
 397 * consumed by the item. Add the space to the checkpoint ticket and calculate
 398 * if the change requires additional log metadata. If it does, take that space
 399 * as well. Remove the amount of space we added to the checkpoint ticket from
 400 * the current transaction ticket so that the accounting works out correctly.
 401 */
 402static void
 403xlog_cil_insert_items(
 404	struct xlog		*log,
 405	struct xfs_trans	*tp)
 
 406{
 407	struct xfs_cil		*cil = log->l_cilp;
 408	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
 409	struct xfs_log_item_desc *lidp;
 410	int			len = 0;
 411	int			diff_iovecs = 0;
 412	int			iclog_space;
 413	int			iovhdr_res = 0, split_res = 0, ctx_res = 0;
 
 
 
 
 414
 415	ASSERT(tp);
 416
 417	/*
 418	 * We can do this safely because the context can't checkpoint until we
 419	 * are done so it doesn't matter exactly how we update the CIL.
 420	 */
 421	xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
 422
 423	spin_lock(&cil->xc_cil_lock);
 
 
 
 
 
 424
 425	/* account for space used by new iovec headers  */
 426	iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t);
 427	len += iovhdr_res;
 428	ctx->nvecs += diff_iovecs;
 
 
 
 
 429
 430	/* attach the transaction to the CIL if it has any busy extents */
 431	if (!list_empty(&tp->t_busy))
 432		list_splice_init(&tp->t_busy, &ctx->busy_extents);
 433
 434	/*
 435	 * Now transfer enough transaction reservation to the context ticket
 436	 * for the checkpoint. The context ticket is special - the unit
 437	 * reservation has to grow as well as the current reservation as we
 438	 * steal from tickets so we can correctly determine the space used
 439	 * during the transaction commit.
 440	 */
 441	if (ctx->ticket->t_curr_res == 0) {
 
 442		ctx_res = ctx->ticket->t_unit_res;
 443		ctx->ticket->t_curr_res = ctx_res;
 444		tp->t_ticket->t_curr_res -= ctx_res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 446
 447	/* do we need space for more log record headers? */
 448	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
 449	if (len > 0 && (ctx->space_used / iclog_space !=
 450				(ctx->space_used + len) / iclog_space)) {
 451		split_res = (len + iclog_space - 1) / iclog_space;
 452		/* need to take into account split region headers, too */
 453		split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
 454		ctx->ticket->t_unit_res += split_res;
 455		ctx->ticket->t_curr_res += split_res;
 456		tp->t_ticket->t_curr_res -= split_res;
 457		ASSERT(tp->t_ticket->t_curr_res >= len);
 458	}
 459	tp->t_ticket->t_curr_res -= len;
 460	ctx->space_used += len;
 461
 462	/*
 463	 * If we've overrun the reservation, dump the tx details before we move
 464	 * the log items. Shutdown is imminent...
 465	 */
 
 466	if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
 467		xfs_warn(log->l_mp, "Transaction log reservation overrun:");
 468		xfs_warn(log->l_mp,
 469			 "  log items: %d bytes (iov hdrs: %d bytes)",
 470			 len, iovhdr_res);
 471		xfs_warn(log->l_mp, "  split region headers: %d bytes",
 472			 split_res);
 473		xfs_warn(log->l_mp, "  ctx ticket: %d bytes", ctx_res);
 474		xlog_print_trans(tp);
 
 475	}
 
 476
 477	/*
 478	 * Now (re-)position everything modified at the tail of the CIL.
 479	 * We do this here so we only need to take the CIL lock once during
 480	 * the transaction commit.
 481	 */
 482	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
 483		struct xfs_log_item	*lip = lidp->lid_item;
 
 
 484
 485		/* Skip items which aren't dirty in this transaction. */
 486		if (!(lidp->lid_flags & XFS_LID_DIRTY))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487			continue;
 488
 489		/*
 490		 * Only move the item if it isn't already at the tail. This is
 491		 * to prevent a transient list_empty() state when reinserting
 492		 * an item that is already the only item in the CIL.
 493		 */
 494		if (!list_is_last(&lip->li_cil, &cil->xc_cil))
 495			list_move_tail(&lip->li_cil, &cil->xc_cil);
 496	}
 
 
 
 497
 498	spin_unlock(&cil->xc_cil_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499
 500	if (tp->t_ticket->t_curr_res < 0)
 501		xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502}
 503
 504static void
 505xlog_cil_free_logvec(
 506	struct xfs_log_vec	*log_vector)
 507{
 508	struct xfs_log_vec	*lv;
 509
 510	for (lv = log_vector; lv; ) {
 511		struct xfs_log_vec *next = lv->lv_next;
 512		kmem_free(lv);
 513		lv = next;
 514	}
 515}
 516
 
 
 
 
 
 517static void
 518xlog_discard_endio_work(
 519	struct work_struct	*work)
 520{
 521	struct xfs_cil_ctx	*ctx =
 522		container_of(work, struct xfs_cil_ctx, discard_endio_work);
 523	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524
 525	xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
 526	kmem_free(ctx);
 
 
 
 
 
 
 
 
 
 
 
 527}
 528
 529/*
 530 * Queue up the actual completion to a thread to avoid IRQ-safe locking for
 531 * pagb_lock.  Note that we need a unbounded workqueue, otherwise we might
 532 * get the execution delayed up to 30 seconds for weird reasons.
 533 */
 534static void
 535xlog_discard_endio(
 536	struct bio		*bio)
 537{
 538	struct xfs_cil_ctx	*ctx = bio->bi_private;
 539
 540	INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
 541	queue_work(xfs_discard_wq, &ctx->discard_endio_work);
 542	bio_put(bio);
 
 
 543}
 544
 545static void
 546xlog_discard_busy_extents(
 547	struct xfs_mount	*mp,
 548	struct xfs_cil_ctx	*ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 549{
 550	struct list_head	*list = &ctx->busy_extents;
 551	struct xfs_extent_busy	*busyp;
 552	struct bio		*bio = NULL;
 553	struct blk_plug		plug;
 554	int			error = 0;
 
 
 
 
 
 
 
 
 
 555
 556	ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
 
 
 
 
 
 557
 558	blk_start_plug(&plug);
 559	list_for_each_entry(busyp, list, list) {
 560		trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
 561					 busyp->length);
 562
 563		error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
 564				XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
 565				XFS_FSB_TO_BB(mp, busyp->length),
 566				GFP_NOFS, 0, &bio);
 567		if (error && error != -EOPNOTSUPP) {
 568			xfs_info(mp,
 569	 "discard failed for extent [0x%llx,%u], error %d",
 570				 (unsigned long long)busyp->bno,
 571				 busyp->length,
 572				 error);
 573			break;
 574		}
 575	}
 
 
 
 576
 577	if (bio) {
 578		bio->bi_private = ctx;
 579		bio->bi_end_io = xlog_discard_endio;
 580		submit_bio(bio);
 581	} else {
 582		xlog_discard_endio_work(&ctx->discard_endio_work);
 583	}
 584	blk_finish_plug(&plug);
 
 
 
 
 
 
 
 
 
 
 585}
 586
 587/*
 588 * Mark all items committed and clear busy extents. We free the log vector
 589 * chains in a separate pass so that we unpin the log items as quickly as
 590 * possible.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 591 */
 592static void
 593xlog_cil_committed(
 594	void	*args,
 595	int	abort)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 596{
 597	struct xfs_cil_ctx	*ctx = args;
 598	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
 599
 600	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
 601					ctx->start_lsn, abort);
 602
 603	xfs_extent_busy_sort(&ctx->busy_extents);
 604	xfs_extent_busy_clear(mp, &ctx->busy_extents,
 605			     (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 606
 607	/*
 608	 * If we are aborting the commit, wake up anyone waiting on the
 609	 * committing list.  If we don't, then a shutdown we can leave processes
 610	 * waiting in xlog_cil_force_lsn() waiting on a sequence commit that
 611	 * will never happen because we aborted it.
 612	 */
 613	spin_lock(&ctx->cil->xc_push_lock);
 614	if (abort)
 615		wake_up_all(&ctx->cil->xc_commit_wait);
 616	list_del(&ctx->committing);
 617	spin_unlock(&ctx->cil->xc_push_lock);
 618
 619	xlog_cil_free_logvec(ctx->lv_chain);
 
 
 
 
 620
 621	if (!list_empty(&ctx->busy_extents))
 622		xlog_discard_busy_extents(mp, ctx);
 623	else
 624		kmem_free(ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 625}
 626
 627/*
 628 * Push the Committed Item List to the log. If @push_seq flag is zero, then it
 629 * is a background flush and so we can chose to ignore it. Otherwise, if the
 630 * current sequence is the same as @push_seq we need to do a flush. If
 631 * @push_seq is less than the current sequence, then it has already been
 632 * flushed and we don't need to do anything - the caller will wait for it to
 633 * complete if necessary.
 634 *
 635 * @push_seq is a value rather than a flag because that allows us to do an
 636 * unlocked check of the sequence number for a match. Hence we can allows log
 637 * forces to run racily and not issue pushes for the same sequence twice. If we
 638 * get a race between multiple pushes for the same sequence they will block on
 639 * the first one and then abort, hence avoiding needless pushes.
 
 
 
 
 
 
 640 */
 641STATIC int
 642xlog_cil_push(
 643	struct xlog		*log)
 644{
 645	struct xfs_cil		*cil = log->l_cilp;
 646	struct xfs_log_vec	*lv;
 647	struct xfs_cil_ctx	*ctx;
 
 
 648	struct xfs_cil_ctx	*new_ctx;
 649	struct xlog_in_core	*commit_iclog;
 650	struct xlog_ticket	*tic;
 651	int			num_iovecs;
 652	int			error = 0;
 653	struct xfs_trans_header thdr;
 654	struct xfs_log_iovec	lhdr;
 655	struct xfs_log_vec	lvhdr = { NULL };
 656	xfs_lsn_t		commit_lsn;
 657	xfs_lsn_t		push_seq;
 
 658
 659	if (!cil)
 660		return 0;
 661
 662	new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
 663	new_ctx->ticket = xlog_cil_ticket_alloc(log);
 664
 665	down_write(&cil->xc_ctx_lock);
 666	ctx = cil->xc_ctx;
 667
 668	spin_lock(&cil->xc_push_lock);
 669	push_seq = cil->xc_push_seq;
 670	ASSERT(push_seq <= ctx->sequence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671
 672	/*
 673	 * Check if we've anything to push. If there is nothing, then we don't
 674	 * move on to a new sequence number and so we have to be able to push
 675	 * this sequence again later.
 676	 */
 677	if (list_empty(&cil->xc_cil)) {
 678		cil->xc_push_seq = 0;
 679		spin_unlock(&cil->xc_push_lock);
 680		goto out_skip;
 681	}
 682
 683
 684	/* check for a previously pushed seqeunce */
 685	if (push_seq < cil->xc_ctx->sequence) {
 686		spin_unlock(&cil->xc_push_lock);
 687		goto out_skip;
 688	}
 689
 690	/*
 691	 * We are now going to push this context, so add it to the committing
 692	 * list before we do anything else. This ensures that anyone waiting on
 693	 * this push can easily detect the difference between a "push in
 694	 * progress" and "CIL is empty, nothing to do".
 695	 *
 696	 * IOWs, a wait loop can now check for:
 697	 *	the current sequence not being found on the committing list;
 698	 *	an empty CIL; and
 699	 *	an unchanged sequence number
 700	 * to detect a push that had nothing to do and therefore does not need
 701	 * waiting on. If the CIL is not empty, we get put on the committing
 702	 * list before emptying the CIL and bumping the sequence number. Hence
 703	 * an empty CIL and an unchanged sequence number means we jumped out
 704	 * above after doing nothing.
 705	 *
 706	 * Hence the waiter will either find the commit sequence on the
 707	 * committing list or the sequence number will be unchanged and the CIL
 708	 * still dirty. In that latter case, the push has not yet started, and
 709	 * so the waiter will have to continue trying to check the CIL
 710	 * committing list until it is found. In extreme cases of delay, the
 711	 * sequence may fully commit between the attempts the wait makes to wait
 712	 * on the commit sequence.
 713	 */
 714	list_add(&ctx->committing, &cil->xc_committing);
 715	spin_unlock(&cil->xc_push_lock);
 716
 717	/*
 718	 * pull all the log vectors off the items in the CIL, and
 719	 * remove the items from the CIL. We don't need the CIL lock
 720	 * here because it's only needed on the transaction commit
 721	 * side which is currently locked out by the flush lock.
 722	 */
 723	lv = NULL;
 724	num_iovecs = 0;
 725	while (!list_empty(&cil->xc_cil)) {
 726		struct xfs_log_item	*item;
 727
 728		item = list_first_entry(&cil->xc_cil,
 729					struct xfs_log_item, li_cil);
 730		list_del_init(&item->li_cil);
 731		if (!ctx->lv_chain)
 732			ctx->lv_chain = item->li_lv;
 733		else
 734			lv->lv_next = item->li_lv;
 735		lv = item->li_lv;
 736		item->li_lv = NULL;
 737		num_iovecs += lv->lv_niovecs;
 738	}
 739
 740	/*
 741	 * initialise the new context and attach it to the CIL. Then attach
 742	 * the current context to the CIL committing lsit so it can be found
 743	 * during log forces to extract the commit lsn of the sequence that
 744	 * needs to be forced.
 745	 */
 746	INIT_LIST_HEAD(&new_ctx->committing);
 747	INIT_LIST_HEAD(&new_ctx->busy_extents);
 748	new_ctx->sequence = ctx->sequence + 1;
 749	new_ctx->cil = cil;
 750	cil->xc_ctx = new_ctx;
 751
 752	/*
 753	 * The switch is now done, so we can drop the context lock and move out
 754	 * of a shared context. We can't just go straight to the commit record,
 755	 * though - we need to synchronise with previous and future commits so
 756	 * that the commit records are correctly ordered in the log to ensure
 757	 * that we process items during log IO completion in the correct order.
 758	 *
 759	 * For example, if we get an EFI in one checkpoint and the EFD in the
 760	 * next (e.g. due to log forces), we do not want the checkpoint with
 761	 * the EFD to be committed before the checkpoint with the EFI.  Hence
 762	 * we must strictly order the commit records of the checkpoints so
 763	 * that: a) the checkpoint callbacks are attached to the iclogs in the
 764	 * correct order; and b) the checkpoints are replayed in correct order
 765	 * in log recovery.
 766	 *
 767	 * Hence we need to add this context to the committing context list so
 768	 * that higher sequences will wait for us to write out a commit record
 769	 * before they do.
 770	 *
 771	 * xfs_log_force_lsn requires us to mirror the new sequence into the cil
 772	 * structure atomically with the addition of this sequence to the
 773	 * committing list. This also ensures that we can do unlocked checks
 774	 * against the current sequence in log forces without risking
 775	 * deferencing a freed context pointer.
 776	 */
 777	spin_lock(&cil->xc_push_lock);
 778	cil->xc_current_sequence = new_ctx->sequence;
 779	spin_unlock(&cil->xc_push_lock);
 780	up_write(&cil->xc_ctx_lock);
 781
 782	/*
 
 
 
 
 
 
 
 783	 * Build a checkpoint transaction header and write it to the log to
 784	 * begin the transaction. We need to account for the space used by the
 785	 * transaction header here as it is not accounted for in xlog_write().
 786	 *
 787	 * The LSN we need to pass to the log items on transaction commit is
 788	 * the LSN reported by the first log vector write. If we use the commit
 789	 * record lsn then we can move the tail beyond the grant write head.
 790	 */
 791	tic = ctx->ticket;
 792	thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
 793	thdr.th_type = XFS_TRANS_CHECKPOINT;
 794	thdr.th_tid = tic->t_tid;
 795	thdr.th_num_items = num_iovecs;
 796	lhdr.i_addr = &thdr;
 797	lhdr.i_len = sizeof(xfs_trans_header_t);
 798	lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
 799	tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
 800
 801	lvhdr.lv_niovecs = 1;
 802	lvhdr.lv_iovecp = &lhdr;
 803	lvhdr.lv_next = ctx->lv_chain;
 804
 805	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
 806	if (error)
 807		goto out_abort_free_ticket;
 808
 809	/*
 810	 * now that we've written the checkpoint into the log, strictly
 811	 * order the commit records so replay will get them in the right order.
 812	 */
 813restart:
 814	spin_lock(&cil->xc_push_lock);
 815	list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
 816		/*
 817		 * Avoid getting stuck in this loop because we were woken by the
 818		 * shutdown, but then went back to sleep once already in the
 819		 * shutdown state.
 820		 */
 821		if (XLOG_FORCED_SHUTDOWN(log)) {
 822			spin_unlock(&cil->xc_push_lock);
 823			goto out_abort_free_ticket;
 824		}
 
 
 
 
 
 
 
 
 825
 826		/*
 827		 * Higher sequences will wait for this one so skip them.
 828		 * Don't wait for our own sequence, either.
 829		 */
 830		if (new_ctx->sequence >= ctx->sequence)
 831			continue;
 832		if (!new_ctx->commit_lsn) {
 833			/*
 834			 * It is still being pushed! Wait for the push to
 835			 * complete, then start again from the beginning.
 
 836			 */
 837			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
 838			goto restart;
 839		}
 840	}
 841	spin_unlock(&cil->xc_push_lock);
 842
 843	/* xfs_log_done always frees the ticket on error. */
 844	commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, false);
 845	if (commit_lsn == -1)
 846		goto out_abort;
 847
 848	/* attach all the transactions w/ busy extents to iclog */
 849	ctx->log_cb.cb_func = xlog_cil_committed;
 850	ctx->log_cb.cb_arg = ctx;
 851	error = xfs_log_notify(commit_iclog, &ctx->log_cb);
 852	if (error)
 853		goto out_abort;
 854
 855	/*
 856	 * now the checkpoint commit is complete and we've attached the
 857	 * callbacks to the iclog we can assign the commit LSN to the context
 858	 * and wake up anyone who is waiting for the commit to complete.
 859	 */
 860	spin_lock(&cil->xc_push_lock);
 861	ctx->commit_lsn = commit_lsn;
 862	wake_up_all(&cil->xc_commit_wait);
 863	spin_unlock(&cil->xc_push_lock);
 864
 865	/* release the hounds! */
 866	return xfs_log_release_iclog(log->l_mp, commit_iclog);
 
 
 
 
 
 
 
 
 
 
 
 
 867
 868out_skip:
 869	up_write(&cil->xc_ctx_lock);
 870	xfs_log_ticket_put(new_ctx->ticket);
 871	kmem_free(new_ctx);
 872	return 0;
 
 873
 874out_abort_free_ticket:
 875	xfs_log_ticket_put(tic);
 876out_abort:
 877	xlog_cil_committed(ctx, XFS_LI_ABORTED);
 878	return -EIO;
 879}
 880
 881static void
 882xlog_cil_push_work(
 883	struct work_struct	*work)
 884{
 885	struct xfs_cil		*cil = container_of(work, struct xfs_cil,
 886							xc_push_work);
 887	xlog_cil_push(cil->xc_log);
 
 
 888}
 889
 890/*
 891 * We need to push CIL every so often so we don't cache more than we can fit in
 892 * the log. The limit really is that a checkpoint can't be more than half the
 893 * log (the current checkpoint is not allowed to overwrite the previous
 894 * checkpoint), but commit latency and memory usage limit this to a smaller
 895 * size.
 896 */
 897static void
 898xlog_cil_push_background(
 899	struct xlog	*log)
 900{
 901	struct xfs_cil	*cil = log->l_cilp;
 
 902
 903	/*
 904	 * The cil won't be empty because we are called while holding the
 905	 * context lock so whatever we added to the CIL will still be there
 906	 */
 907	ASSERT(!list_empty(&cil->xc_cil));
 908
 909	/*
 910	 * don't do a background push if we haven't used up all the
 911	 * space available yet.
 
 
 
 
 
 912	 */
 913	if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
 
 
 
 
 914		return;
 
 915
 916	spin_lock(&cil->xc_push_lock);
 917	if (cil->xc_push_seq < cil->xc_current_sequence) {
 918		cil->xc_push_seq = cil->xc_current_sequence;
 919		queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920	}
 
 921	spin_unlock(&cil->xc_push_lock);
 922
 923}
 924
 925/*
 926 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
 927 * number that is passed. When it returns, the work will be queued for
 928 * @push_seq, but it won't be completed. The caller is expected to do any
 929 * waiting for push_seq to complete if it is required.
 
 
 
 
 
 
 
 
 
 
 
 
 930 */
 931static void
 932xlog_cil_push_now(
 933	struct xlog	*log,
 934	xfs_lsn_t	push_seq)
 
 935{
 936	struct xfs_cil	*cil = log->l_cilp;
 937
 938	if (!cil)
 939		return;
 940
 941	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
 942
 943	/* start on any pending background push to minimise wait time on it */
 944	flush_work(&cil->xc_push_work);
 
 
 
 
 
 
 
 
 
 
 
 
 945
 946	/*
 947	 * If the CIL is empty or we've already pushed the sequence then
 948	 * there's no work we need to do.
 949	 */
 950	spin_lock(&cil->xc_push_lock);
 951	if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
 952		spin_unlock(&cil->xc_push_lock);
 953		return;
 954	}
 955
 956	cil->xc_push_seq = push_seq;
 957	queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
 958	spin_unlock(&cil->xc_push_lock);
 959}
 960
 961bool
 962xlog_cil_empty(
 963	struct xlog	*log)
 964{
 965	struct xfs_cil	*cil = log->l_cilp;
 966	bool		empty = false;
 967
 968	spin_lock(&cil->xc_push_lock);
 969	if (list_empty(&cil->xc_cil))
 970		empty = true;
 971	spin_unlock(&cil->xc_push_lock);
 972	return empty;
 973}
 974
 975/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 976 * Commit a transaction with the given vector to the Committed Item List.
 977 *
 978 * To do this, we need to format the item, pin it in memory if required and
 979 * account for the space used by the transaction. Once we have done that we
 980 * need to release the unused reservation for the transaction, attach the
 981 * transaction to the checkpoint context so we carry the busy extents through
 982 * to checkpoint completion, and then unlock all the items in the transaction.
 983 *
 984 * Called with the context lock already held in read mode to lock out
 985 * background commit, returns without it held once background commits are
 986 * allowed again.
 987 */
 988void
 989xfs_log_commit_cil(
 990	struct xfs_mount	*mp,
 991	struct xfs_trans	*tp,
 992	xfs_lsn_t		*commit_lsn,
 993	bool			regrant)
 994{
 995	struct xlog		*log = mp->m_log;
 996	struct xfs_cil		*cil = log->l_cilp;
 997	xfs_lsn_t		xc_commit_lsn;
 
 998
 999	/*
1000	 * Do all necessary memory allocation before we lock the CIL.
1001	 * This ensures the allocation does not deadlock with a CIL
1002	 * push in memory reclaim (e.g. from kswapd).
1003	 */
1004	xlog_cil_alloc_shadow_bufs(log, tp);
1005
1006	/* lock out background commit */
1007	down_read(&cil->xc_ctx_lock);
1008
1009	xlog_cil_insert_items(log, tp);
 
1010
1011	xc_commit_lsn = cil->xc_ctx->sequence;
1012	if (commit_lsn)
1013		*commit_lsn = xc_commit_lsn;
1014
1015	xfs_log_done(mp, tp->t_ticket, NULL, regrant);
 
 
 
 
1016	xfs_trans_unreserve_and_mod_sb(tp);
1017
1018	/*
1019	 * Once all the items of the transaction have been copied to the CIL,
1020	 * the items can be unlocked and freed.
1021	 *
1022	 * This needs to be done before we drop the CIL context lock because we
1023	 * have to update state in the log items and unlock them before they go
1024	 * to disk. If we don't, then the CIL checkpoint can race with us and
1025	 * we can run checkpoint completion before we've updated and unlocked
1026	 * the log items. This affects (at least) processing of stale buffers,
1027	 * inodes and EFIs.
1028	 */
1029	xfs_trans_free_items(tp, xc_commit_lsn, false);
 
 
 
 
 
 
 
1030
 
1031	xlog_cil_push_background(log);
 
1032
1033	up_read(&cil->xc_ctx_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1034}
1035
1036/*
1037 * Conditionally push the CIL based on the sequence passed in.
1038 *
1039 * We only need to push if we haven't already pushed the sequence
1040 * number given. Hence the only time we will trigger a push here is
1041 * if the push sequence is the same as the current context.
1042 *
1043 * We return the current commit lsn to allow the callers to determine if a
1044 * iclog flush is necessary following this call.
1045 */
1046xfs_lsn_t
1047xlog_cil_force_lsn(
1048	struct xlog	*log,
1049	xfs_lsn_t	sequence)
1050{
1051	struct xfs_cil		*cil = log->l_cilp;
1052	struct xfs_cil_ctx	*ctx;
1053	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
1054
1055	ASSERT(sequence <= cil->xc_current_sequence);
1056
 
 
 
 
1057	/*
1058	 * check to see if we need to force out the current context.
1059	 * xlog_cil_push() handles racing pushes for the same sequence,
1060	 * so no need to deal with it here.
1061	 */
1062restart:
1063	xlog_cil_push_now(log, sequence);
1064
1065	/*
1066	 * See if we can find a previous sequence still committing.
1067	 * We need to wait for all previous sequence commits to complete
1068	 * before allowing the force of push_seq to go ahead. Hence block
1069	 * on commits for those as well.
1070	 */
1071	spin_lock(&cil->xc_push_lock);
1072	list_for_each_entry(ctx, &cil->xc_committing, committing) {
1073		/*
1074		 * Avoid getting stuck in this loop because we were woken by the
1075		 * shutdown, but then went back to sleep once already in the
1076		 * shutdown state.
1077		 */
1078		if (XLOG_FORCED_SHUTDOWN(log))
1079			goto out_shutdown;
1080		if (ctx->sequence > sequence)
1081			continue;
1082		if (!ctx->commit_lsn) {
1083			/*
1084			 * It is still being pushed! Wait for the push to
1085			 * complete, then start again from the beginning.
1086			 */
 
1087			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1088			goto restart;
1089		}
1090		if (ctx->sequence != sequence)
1091			continue;
1092		/* found it! */
1093		commit_lsn = ctx->commit_lsn;
1094	}
1095
1096	/*
1097	 * The call to xlog_cil_push_now() executes the push in the background.
1098	 * Hence by the time we have got here it our sequence may not have been
1099	 * pushed yet. This is true if the current sequence still matches the
1100	 * push sequence after the above wait loop and the CIL still contains
1101	 * dirty objects. This is guaranteed by the push code first adding the
1102	 * context to the committing list before emptying the CIL.
1103	 *
1104	 * Hence if we don't find the context in the committing list and the
1105	 * current sequence number is unchanged then the CIL contents are
1106	 * significant.  If the CIL is empty, if means there was nothing to push
1107	 * and that means there is nothing to wait for. If the CIL is not empty,
1108	 * it means we haven't yet started the push, because if it had started
1109	 * we would have found the context on the committing list.
1110	 */
1111	if (sequence == cil->xc_current_sequence &&
1112	    !list_empty(&cil->xc_cil)) {
1113		spin_unlock(&cil->xc_push_lock);
1114		goto restart;
1115	}
1116
1117	spin_unlock(&cil->xc_push_lock);
1118	return commit_lsn;
1119
1120	/*
1121	 * We detected a shutdown in progress. We need to trigger the log force
1122	 * to pass through it's iclog state machine error handling, even though
1123	 * we are already in a shutdown state. Hence we can't return
1124	 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1125	 * LSN is already stable), so we return a zero LSN instead.
1126	 */
1127out_shutdown:
1128	spin_unlock(&cil->xc_push_lock);
1129	return 0;
1130}
1131
1132/*
1133 * Check if the current log item was first committed in this sequence.
1134 * We can't rely on just the log item being in the CIL, we have to check
1135 * the recorded commit sequence number.
1136 *
1137 * Note: for this to be used in a non-racy manner, it has to be called with
1138 * CIL flushing locked out. As a result, it should only be used during the
1139 * transaction commit process when deciding what to format into the item.
1140 */
1141bool
1142xfs_log_item_in_current_chkpt(
1143	struct xfs_log_item *lip)
1144{
1145	struct xfs_cil_ctx *ctx;
1146
1147	if (list_empty(&lip->li_cil))
1148		return false;
1149
1150	ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
1151
1152	/*
1153	 * li_seq is written on the first commit of a log item to record the
1154	 * first checkpoint it is written to. Hence if it is different to the
1155	 * current sequence, we're in a new checkpoint.
1156	 */
1157	if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
1158		return false;
1159	return true;
1160}
1161
1162/*
1163 * Perform initial CIL structure initialisation.
1164 */
1165int
1166xlog_cil_init(
1167	struct xlog	*log)
1168{
1169	struct xfs_cil	*cil;
1170	struct xfs_cil_ctx *ctx;
 
 
1171
1172	cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
1173	if (!cil)
1174		return -ENOMEM;
 
 
 
 
 
 
 
 
 
1175
1176	ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
1177	if (!ctx) {
1178		kmem_free(cil);
1179		return -ENOMEM;
 
 
 
 
 
1180	}
1181
1182	INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
1183	INIT_LIST_HEAD(&cil->xc_cil);
1184	INIT_LIST_HEAD(&cil->xc_committing);
1185	spin_lock_init(&cil->xc_cil_lock);
1186	spin_lock_init(&cil->xc_push_lock);
 
1187	init_rwsem(&cil->xc_ctx_lock);
 
1188	init_waitqueue_head(&cil->xc_commit_wait);
1189
1190	INIT_LIST_HEAD(&ctx->committing);
1191	INIT_LIST_HEAD(&ctx->busy_extents);
1192	ctx->sequence = 1;
1193	ctx->cil = cil;
1194	cil->xc_ctx = ctx;
1195	cil->xc_current_sequence = ctx->sequence;
1196
1197	cil->xc_log = log;
1198	log->l_cilp = cil;
 
 
 
1199	return 0;
 
 
 
 
 
 
1200}
1201
1202void
1203xlog_cil_destroy(
1204	struct xlog	*log)
1205{
1206	if (log->l_cilp->xc_ctx) {
1207		if (log->l_cilp->xc_ctx->ticket)
1208			xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
1209		kmem_free(log->l_cilp->xc_ctx);
 
 
1210	}
1211
1212	ASSERT(list_empty(&log->l_cilp->xc_cil));
1213	kmem_free(log->l_cilp);
 
 
1214}
1215
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_format.h"
   9#include "xfs_log_format.h"
  10#include "xfs_shared.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
 
 
  13#include "xfs_extent_busy.h"
 
  14#include "xfs_trans.h"
  15#include "xfs_trans_priv.h"
  16#include "xfs_log.h"
  17#include "xfs_log_priv.h"
  18#include "xfs_trace.h"
  19#include "xfs_discard.h"
 
  20
  21/*
  22 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
  23 * recover, so we don't allow failure here. Also, we allocate in a context that
  24 * we don't want to be issuing transactions from, so we need to tell the
  25 * allocation code this as well.
  26 *
  27 * We don't reserve any space for the ticket - we are going to steal whatever
  28 * space we require from transactions as they commit. To ensure we reserve all
  29 * the space required, we need to set the current reservation of the ticket to
  30 * zero so that we know to steal the initial transaction overhead from the
  31 * first transaction commit.
  32 */
  33static struct xlog_ticket *
  34xlog_cil_ticket_alloc(
  35	struct xlog	*log)
  36{
  37	struct xlog_ticket *tic;
  38
  39	tic = xlog_ticket_alloc(log, 0, 1, 0);
 
  40
  41	/*
  42	 * set the current reservation to zero so we know to steal the basic
  43	 * transaction overhead reservation from the first transaction commit.
  44	 */
  45	tic->t_curr_res = 0;
  46	tic->t_iclog_hdrs = 0;
  47	return tic;
  48}
  49
  50static inline void
  51xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
  52{
  53	struct xlog	*log = cil->xc_log;
  54
  55	atomic_set(&cil->xc_iclog_hdrs,
  56		   (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
  57			(log->l_iclog_size - log->l_iclog_hsize)));
  58}
  59
  60/*
  61 * Check if the current log item was first committed in this sequence.
  62 * We can't rely on just the log item being in the CIL, we have to check
  63 * the recorded commit sequence number.
  64 *
  65 * Note: for this to be used in a non-racy manner, it has to be called with
  66 * CIL flushing locked out. As a result, it should only be used during the
  67 * transaction commit process when deciding what to format into the item.
  68 */
  69static bool
  70xlog_item_in_current_chkpt(
  71	struct xfs_cil		*cil,
  72	struct xfs_log_item	*lip)
  73{
  74	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
  75		return false;
  76
  77	/*
  78	 * li_seq is written on the first commit of a log item to record the
  79	 * first checkpoint it is written to. Hence if it is different to the
  80	 * current sequence, we're in a new checkpoint.
  81	 */
  82	return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
  83}
  84
  85bool
  86xfs_log_item_in_current_chkpt(
  87	struct xfs_log_item *lip)
  88{
  89	return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip);
  90}
  91
  92/*
  93 * Unavoidable forward declaration - xlog_cil_push_work() calls
  94 * xlog_cil_ctx_alloc() itself.
  95 */
  96static void xlog_cil_push_work(struct work_struct *work);
  97
  98static struct xfs_cil_ctx *
  99xlog_cil_ctx_alloc(void)
 100{
 101	struct xfs_cil_ctx	*ctx;
 102
 103	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
 104	INIT_LIST_HEAD(&ctx->committing);
 105	INIT_LIST_HEAD(&ctx->busy_extents.extent_list);
 106	INIT_LIST_HEAD(&ctx->log_items);
 107	INIT_LIST_HEAD(&ctx->lv_chain);
 108	INIT_WORK(&ctx->push_work, xlog_cil_push_work);
 109	return ctx;
 110}
 111
 112/*
 113 * Aggregate the CIL per cpu structures into global counts, lists, etc and
 114 * clear the percpu state ready for the next context to use. This is called
 115 * from the push code with the context lock held exclusively, hence nothing else
 116 * will be accessing or modifying the per-cpu counters.
 117 */
 118static void
 119xlog_cil_push_pcp_aggregate(
 120	struct xfs_cil		*cil,
 121	struct xfs_cil_ctx	*ctx)
 122{
 123	struct xlog_cil_pcp	*cilpcp;
 124	int			cpu;
 125
 126	for_each_cpu(cpu, &ctx->cil_pcpmask) {
 127		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
 128
 129		ctx->ticket->t_curr_res += cilpcp->space_reserved;
 130		cilpcp->space_reserved = 0;
 131
 132		if (!list_empty(&cilpcp->busy_extents)) {
 133			list_splice_init(&cilpcp->busy_extents,
 134					&ctx->busy_extents.extent_list);
 135		}
 136		if (!list_empty(&cilpcp->log_items))
 137			list_splice_init(&cilpcp->log_items, &ctx->log_items);
 138
 139		/*
 140		 * We're in the middle of switching cil contexts.  Reset the
 141		 * counter we use to detect when the current context is nearing
 142		 * full.
 143		 */
 144		cilpcp->space_used = 0;
 145	}
 146}
 147
 148/*
 149 * Aggregate the CIL per-cpu space used counters into the global atomic value.
 150 * This is called when the per-cpu counter aggregation will first pass the soft
 151 * limit threshold so we can switch to atomic counter aggregation for accurate
 152 * detection of hard limit traversal.
 153 */
 154static void
 155xlog_cil_insert_pcp_aggregate(
 156	struct xfs_cil		*cil,
 157	struct xfs_cil_ctx	*ctx)
 158{
 159	int			cpu;
 160	int			count = 0;
 161
 162	/* Trigger atomic updates then aggregate only for the first caller */
 163	if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
 164		return;
 165
 166	/*
 167	 * We can race with other cpus setting cil_pcpmask.  However, we've
 168	 * atomically cleared PCP_SPACE which forces other threads to add to
 169	 * the global space used count.  cil_pcpmask is a superset of cilpcp
 170	 * structures that could have a nonzero space_used.
 171	 */
 172	for_each_cpu(cpu, &ctx->cil_pcpmask) {
 173		struct xlog_cil_pcp	*cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
 174
 175		count += xchg(&cilpcp->space_used, 0);
 176	}
 177	atomic_add(count, &ctx->space_used);
 178}
 179
 180static void
 181xlog_cil_ctx_switch(
 182	struct xfs_cil		*cil,
 183	struct xfs_cil_ctx	*ctx)
 184{
 185	xlog_cil_set_iclog_hdr_count(cil);
 186	set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
 187	set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
 188	ctx->sequence = ++cil->xc_current_sequence;
 189	ctx->cil = cil;
 190	cil->xc_ctx = ctx;
 191}
 192
 193/*
 194 * After the first stage of log recovery is done, we know where the head and
 195 * tail of the log are. We need this log initialisation done before we can
 196 * initialise the first CIL checkpoint context.
 197 *
 198 * Here we allocate a log ticket to track space usage during a CIL push.  This
 199 * ticket is passed to xlog_write() directly so that we don't slowly leak log
 200 * space by failing to account for space used by log headers and additional
 201 * region headers for split regions.
 202 */
 203void
 204xlog_cil_init_post_recovery(
 205	struct xlog	*log)
 206{
 207	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
 208	log->l_cilp->xc_ctx->sequence = 1;
 209	xlog_cil_set_iclog_hdr_count(log->l_cilp);
 210}
 211
 212static inline int
 213xlog_cil_iovec_space(
 214	uint	niovecs)
 215{
 216	return round_up((sizeof(struct xfs_log_vec) +
 217					niovecs * sizeof(struct xfs_log_iovec)),
 218			sizeof(uint64_t));
 219}
 220
 221/*
 222 * Allocate or pin log vector buffers for CIL insertion.
 223 *
 224 * The CIL currently uses disposable buffers for copying a snapshot of the
 225 * modified items into the log during a push. The biggest problem with this is
 226 * the requirement to allocate the disposable buffer during the commit if:
 227 *	a) does not exist; or
 228 *	b) it is too small
 229 *
 230 * If we do this allocation within xlog_cil_insert_format_items(), it is done
 231 * under the xc_ctx_lock, which means that a CIL push cannot occur during
 232 * the memory allocation. This means that we have a potential deadlock situation
 233 * under low memory conditions when we have lots of dirty metadata pinned in
 234 * the CIL and we need a CIL commit to occur to free memory.
 235 *
 236 * To avoid this, we need to move the memory allocation outside the
 237 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
 238 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
 239 * vector buffers between the check and the formatting of the item into the
 240 * log vector buffer within the xc_ctx_lock.
 241 *
 242 * Because the log vector buffer needs to be unchanged during the CIL push
 243 * process, we cannot share the buffer between the transaction commit (which
 244 * modifies the buffer) and the CIL push context that is writing the changes
 245 * into the log. This means skipping preallocation of buffer space is
 246 * unreliable, but we most definitely do not want to be allocating and freeing
 247 * buffers unnecessarily during commits when overwrites can be done safely.
 248 *
 249 * The simplest solution to this problem is to allocate a shadow buffer when a
 250 * log item is committed for the second time, and then to only use this buffer
 251 * if necessary. The buffer can remain attached to the log item until such time
 252 * it is needed, and this is the buffer that is reallocated to match the size of
 253 * the incoming modification. Then during the formatting of the item we can swap
 254 * the active buffer with the new one if we can't reuse the existing buffer. We
 255 * don't free the old buffer as it may be reused on the next modification if
 256 * it's size is right, otherwise we'll free and reallocate it at that point.
 257 *
 258 * This function builds a vector for the changes in each log item in the
 259 * transaction. It then works out the length of the buffer needed for each log
 260 * item, allocates them and attaches the vector to the log item in preparation
 261 * for the formatting step which occurs under the xc_ctx_lock.
 262 *
 263 * While this means the memory footprint goes up, it avoids the repeated
 264 * alloc/free pattern that repeated modifications of an item would otherwise
 265 * cause, and hence minimises the CPU overhead of such behaviour.
 266 */
 267static void
 268xlog_cil_alloc_shadow_bufs(
 269	struct xlog		*log,
 270	struct xfs_trans	*tp)
 271{
 272	struct xfs_log_item	*lip;
 273
 274	list_for_each_entry(lip, &tp->t_items, li_trans) {
 
 275		struct xfs_log_vec *lv;
 276		int	niovecs = 0;
 277		int	nbytes = 0;
 278		int	buf_size;
 279		bool	ordered = false;
 280
 281		/* Skip items which aren't dirty in this transaction. */
 282		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 283			continue;
 284
 285		/* get number of vecs and size of data to be stored */
 286		lip->li_ops->iop_size(lip, &niovecs, &nbytes);
 287
 288		/*
 289		 * Ordered items need to be tracked but we do not wish to write
 290		 * them. We need a logvec to track the object, but we do not
 291		 * need an iovec or buffer to be allocated for copying data.
 292		 */
 293		if (niovecs == XFS_LOG_VEC_ORDERED) {
 294			ordered = true;
 295			niovecs = 0;
 296			nbytes = 0;
 297		}
 298
 299		/*
 300		 * We 64-bit align the length of each iovec so that the start of
 301		 * the next one is naturally aligned.  We'll need to account for
 302		 * that slack space here.
 303		 *
 304		 * We also add the xlog_op_header to each region when
 305		 * formatting, but that's not accounted to the size of the item
 306		 * at this point. Hence we'll need an addition number of bytes
 307		 * for each vector to hold an opheader.
 308		 *
 309		 * Then round nbytes up to 64-bit alignment so that the initial
 310		 * buffer alignment is easy to calculate and verify.
 311		 */
 312		nbytes += niovecs *
 313			(sizeof(uint64_t) + sizeof(struct xlog_op_header));
 314		nbytes = round_up(nbytes, sizeof(uint64_t));
 315
 316		/*
 317		 * The data buffer needs to start 64-bit aligned, so round up
 318		 * that space to ensure we can align it appropriately and not
 319		 * overrun the buffer.
 320		 */
 321		buf_size = nbytes + xlog_cil_iovec_space(niovecs);
 322
 323		/*
 324		 * if we have no shadow buffer, or it is too small, we need to
 325		 * reallocate it.
 326		 */
 327		if (!lip->li_lv_shadow ||
 328		    buf_size > lip->li_lv_shadow->lv_size) {
 
 329			/*
 330			 * We free and allocate here as a realloc would copy
 331			 * unnecessary data. We don't use kvzalloc() for the
 332			 * same reason - we don't need to zero the data area in
 333			 * the buffer, only the log vector header and the iovec
 334			 * storage.
 335			 */
 336			kvfree(lip->li_lv_shadow);
 337			lv = xlog_kvmalloc(buf_size);
 338
 
 339			memset(lv, 0, xlog_cil_iovec_space(niovecs));
 340
 341			INIT_LIST_HEAD(&lv->lv_list);
 342			lv->lv_item = lip;
 343			lv->lv_size = buf_size;
 344			if (ordered)
 345				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 346			else
 347				lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
 348			lip->li_lv_shadow = lv;
 349		} else {
 350			/* same or smaller, optimise common overwrite case */
 351			lv = lip->li_lv_shadow;
 352			if (ordered)
 353				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 354			else
 355				lv->lv_buf_len = 0;
 356			lv->lv_bytes = 0;
 
 357		}
 358
 359		/* Ensure the lv is set up according to ->iop_size */
 360		lv->lv_niovecs = niovecs;
 361
 362		/* The allocated data region lies beyond the iovec region */
 363		lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
 364	}
 365
 366}
 367
 368/*
 369 * Prepare the log item for insertion into the CIL. Calculate the difference in
 370 * log space it will consume, and if it is a new item pin it as well.
 
 371 */
 372STATIC void
 373xfs_cil_prepare_item(
 374	struct xlog		*log,
 375	struct xfs_log_vec	*lv,
 376	struct xfs_log_vec	*old_lv,
 377	int			*diff_len)
 
 378{
 379	/* Account for the new LV being passed in */
 380	if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
 381		*diff_len += lv->lv_bytes;
 
 
 382
 383	/*
 384	 * If there is no old LV, this is the first time we've seen the item in
 385	 * this CIL context and so we need to pin it. If we are replacing the
 386	 * old_lv, then remove the space it accounts for and make it the shadow
 387	 * buffer for later freeing. In both cases we are now switching to the
 388	 * shadow buffer, so update the pointer to it appropriately.
 389	 */
 390	if (!old_lv) {
 391		if (lv->lv_item->li_ops->iop_pin)
 392			lv->lv_item->li_ops->iop_pin(lv->lv_item);
 393		lv->lv_item->li_lv_shadow = NULL;
 394	} else if (old_lv != lv) {
 395		ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
 396
 397		*diff_len -= old_lv->lv_bytes;
 
 398		lv->lv_item->li_lv_shadow = old_lv;
 399	}
 400
 401	/* attach new log vector to log item */
 402	lv->lv_item->li_lv = lv;
 403
 404	/*
 405	 * If this is the first time the item is being committed to the
 406	 * CIL, store the sequence number on the log item so we can
 407	 * tell in future commits whether this is the first checkpoint
 408	 * the item is being committed into.
 409	 */
 410	if (!lv->lv_item->li_seq)
 411		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
 412}
 413
 414/*
 415 * Format log item into a flat buffers
 416 *
 417 * For delayed logging, we need to hold a formatted buffer containing all the
 418 * changes on the log item. This enables us to relog the item in memory and
 419 * write it out asynchronously without needing to relock the object that was
 420 * modified at the time it gets written into the iclog.
 421 *
 422 * This function takes the prepared log vectors attached to each log item, and
 423 * formats the changes into the log vector buffer. The buffer it uses is
 424 * dependent on the current state of the vector in the CIL - the shadow lv is
 425 * guaranteed to be large enough for the current modification, but we will only
 426 * use that if we can't reuse the existing lv. If we can't reuse the existing
 427 * lv, then simple swap it out for the shadow lv. We don't free it - that is
 428 * done lazily either by th enext modification or the freeing of the log item.
 429 *
 430 * We don't set up region headers during this process; we simply copy the
 431 * regions into the flat buffer. We can do this because we still have to do a
 432 * formatting step to write the regions into the iclog buffer.  Writing the
 433 * ophdrs during the iclog write means that we can support splitting large
 434 * regions across iclog boundares without needing a change in the format of the
 435 * item/region encapsulation.
 436 *
 437 * Hence what we need to do now is change the rewrite the vector array to point
 438 * to the copied region inside the buffer we just allocated. This allows us to
 439 * format the regions into the iclog as though they are being formatted
 440 * directly out of the objects themselves.
 441 */
 442static void
 443xlog_cil_insert_format_items(
 444	struct xlog		*log,
 445	struct xfs_trans	*tp,
 446	int			*diff_len)
 
 447{
 448	struct xfs_log_item	*lip;
 
 449
 450	/* Bail out if we didn't find a log item.  */
 451	if (list_empty(&tp->t_items)) {
 452		ASSERT(0);
 453		return;
 454	}
 455
 456	list_for_each_entry(lip, &tp->t_items, li_trans) {
 
 457		struct xfs_log_vec *lv;
 458		struct xfs_log_vec *old_lv = NULL;
 459		struct xfs_log_vec *shadow;
 460		bool	ordered = false;
 461
 462		/* Skip items which aren't dirty in this transaction. */
 463		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 464			continue;
 465
 466		/*
 467		 * The formatting size information is already attached to
 468		 * the shadow lv on the log item.
 469		 */
 470		shadow = lip->li_lv_shadow;
 471		if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
 472			ordered = true;
 473
 474		/* Skip items that do not have any vectors for writing */
 475		if (!shadow->lv_niovecs && !ordered)
 476			continue;
 477
 478		/* compare to existing item size */
 479		old_lv = lip->li_lv;
 480		if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
 481			/* same or smaller, optimise common overwrite case */
 482			lv = lip->li_lv;
 
 483
 484			if (ordered)
 485				goto insert;
 486
 487			/*
 488			 * set the item up as though it is a new insertion so
 489			 * that the space reservation accounting is correct.
 490			 */
 
 491			*diff_len -= lv->lv_bytes;
 492
 493			/* Ensure the lv is set up according to ->iop_size */
 494			lv->lv_niovecs = shadow->lv_niovecs;
 495
 496			/* reset the lv buffer information for new formatting */
 497			lv->lv_buf_len = 0;
 498			lv->lv_bytes = 0;
 499			lv->lv_buf = (char *)lv +
 500					xlog_cil_iovec_space(lv->lv_niovecs);
 501		} else {
 502			/* switch to shadow buffer! */
 503			lv = shadow;
 504			lv->lv_item = lip;
 505			if (ordered) {
 506				/* track as an ordered logvec */
 507				ASSERT(lip->li_lv == NULL);
 508				goto insert;
 509			}
 510		}
 511
 512		ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
 513		lip->li_ops->iop_format(lip, lv);
 514insert:
 515		xfs_cil_prepare_item(log, lv, old_lv, diff_len);
 516	}
 517}
 518
 519/*
 520 * The use of lockless waitqueue_active() requires that the caller has
 521 * serialised itself against the wakeup call in xlog_cil_push_work(). That
 522 * can be done by either holding the push lock or the context lock.
 523 */
 524static inline bool
 525xlog_cil_over_hard_limit(
 526	struct xlog	*log,
 527	int32_t		space_used)
 528{
 529	if (waitqueue_active(&log->l_cilp->xc_push_wait))
 530		return true;
 531	if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
 532		return true;
 533	return false;
 534}
 535
 536/*
 537 * Insert the log items into the CIL and calculate the difference in space
 538 * consumed by the item. Add the space to the checkpoint ticket and calculate
 539 * if the change requires additional log metadata. If it does, take that space
 540 * as well. Remove the amount of space we added to the checkpoint ticket from
 541 * the current transaction ticket so that the accounting works out correctly.
 542 */
 543static void
 544xlog_cil_insert_items(
 545	struct xlog		*log,
 546	struct xfs_trans	*tp,
 547	uint32_t		released_space)
 548{
 549	struct xfs_cil		*cil = log->l_cilp;
 550	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
 551	struct xfs_log_item	*lip;
 552	int			len = 0;
 
 
 553	int			iovhdr_res = 0, split_res = 0, ctx_res = 0;
 554	int			space_used;
 555	int			order;
 556	unsigned int		cpu_nr;
 557	struct xlog_cil_pcp	*cilpcp;
 558
 559	ASSERT(tp);
 560
 561	/*
 562	 * We can do this safely because the context can't checkpoint until we
 563	 * are done so it doesn't matter exactly how we update the CIL.
 564	 */
 565	xlog_cil_insert_format_items(log, tp, &len);
 566
 567	/*
 568	 * Subtract the space released by intent cancelation from the space we
 569	 * consumed so that we remove it from the CIL space and add it back to
 570	 * the current transaction reservation context.
 571	 */
 572	len -= released_space;
 573
 574	/*
 575	 * Grab the per-cpu pointer for the CIL before we start any accounting.
 576	 * That ensures that we are running with pre-emption disabled and so we
 577	 * can't be scheduled away between split sample/update operations that
 578	 * are done without outside locking to serialise them.
 579	 */
 580	cpu_nr = get_cpu();
 581	cilpcp = this_cpu_ptr(cil->xc_pcp);
 582
 583	/* Tell the future push that there was work added by this CPU. */
 584	if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask))
 585		cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask);
 586
 587	/*
 588	 * We need to take the CIL checkpoint unit reservation on the first
 589	 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
 590	 * unnecessarily do an atomic op in the fast path here. We can clear the
 591	 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
 592	 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
 593	 */
 594	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
 595	    test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
 596		ctx_res = ctx->ticket->t_unit_res;
 597
 598	/*
 599	 * Check if we need to steal iclog headers. atomic_read() is not a
 600	 * locked atomic operation, so we can check the value before we do any
 601	 * real atomic ops in the fast path. If we've already taken the CIL unit
 602	 * reservation from this commit, we've already got one iclog header
 603	 * space reserved so we have to account for that otherwise we risk
 604	 * overrunning the reservation on this ticket.
 605	 *
 606	 * If the CIL is already at the hard limit, we might need more header
 607	 * space that originally reserved. So steal more header space from every
 608	 * commit that occurs once we are over the hard limit to ensure the CIL
 609	 * push won't run out of reservation space.
 610	 *
 611	 * This can steal more than we need, but that's OK.
 612	 *
 613	 * The cil->xc_ctx_lock provides the serialisation necessary for safely
 614	 * calling xlog_cil_over_hard_limit() in this context.
 615	 */
 616	space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
 617	if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
 618	    xlog_cil_over_hard_limit(log, space_used)) {
 619		split_res = log->l_iclog_hsize +
 620					sizeof(struct xlog_op_header);
 621		if (ctx_res)
 622			ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
 623		else
 624			ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
 625		atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
 626	}
 627	cilpcp->space_reserved += ctx_res;
 628
 629	/*
 630	 * Accurately account when over the soft limit, otherwise fold the
 631	 * percpu count into the global count if over the per-cpu threshold.
 632	 */
 633	if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
 634		atomic_add(len, &ctx->space_used);
 635	} else if (cilpcp->space_used + len >
 636			(XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
 637		space_used = atomic_add_return(cilpcp->space_used + len,
 638						&ctx->space_used);
 639		cilpcp->space_used = 0;
 640
 641		/*
 642		 * If we just transitioned over the soft limit, we need to
 643		 * transition to the global atomic counter.
 644		 */
 645		if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
 646			xlog_cil_insert_pcp_aggregate(cil, ctx);
 647	} else {
 648		cilpcp->space_used += len;
 649	}
 650	/* attach the transaction to the CIL if it has any busy extents */
 651	if (!list_empty(&tp->t_busy))
 652		list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
 653
 654	/*
 655	 * Now update the order of everything modified in the transaction
 656	 * and insert items into the CIL if they aren't already there.
 657	 * We do this here so we only need to take the CIL lock once during
 658	 * the transaction commit.
 659	 */
 660	order = atomic_inc_return(&ctx->order_id);
 661	list_for_each_entry(lip, &tp->t_items, li_trans) {
 662		/* Skip items which aren't dirty in this transaction. */
 663		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 664			continue;
 665
 666		lip->li_order_id = order;
 667		if (!list_empty(&lip->li_cil))
 668			continue;
 669		list_add_tail(&lip->li_cil, &cilpcp->log_items);
 
 
 
 
 
 
 
 670	}
 671	put_cpu();
 
 672
 673	/*
 674	 * If we've overrun the reservation, dump the tx details before we move
 675	 * the log items. Shutdown is imminent...
 676	 */
 677	tp->t_ticket->t_curr_res -= ctx_res + len;
 678	if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
 679		xfs_warn(log->l_mp, "Transaction log reservation overrun:");
 680		xfs_warn(log->l_mp,
 681			 "  log items: %d bytes (iov hdrs: %d bytes)",
 682			 len, iovhdr_res);
 683		xfs_warn(log->l_mp, "  split region headers: %d bytes",
 684			 split_res);
 685		xfs_warn(log->l_mp, "  ctx ticket: %d bytes", ctx_res);
 686		xlog_print_trans(tp);
 687		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
 688	}
 689}
 690
 691static inline void
 692xlog_cil_ail_insert_batch(
 693	struct xfs_ail		*ailp,
 694	struct xfs_ail_cursor	*cur,
 695	struct xfs_log_item	**log_items,
 696	int			nr_items,
 697	xfs_lsn_t		commit_lsn)
 698{
 699	int	i;
 700
 701	spin_lock(&ailp->ail_lock);
 702	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
 703	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 704
 705	for (i = 0; i < nr_items; i++) {
 706		struct xfs_log_item *lip = log_items[i];
 707
 708		if (lip->li_ops->iop_unpin)
 709			lip->li_ops->iop_unpin(lip, 0);
 710	}
 711}
 712
 713/*
 714 * Take the checkpoint's log vector chain of items and insert the attached log
 715 * items into the AIL. This uses bulk insertion techniques to minimise AIL lock
 716 * traffic.
 717 *
 718 * The AIL tracks log items via the start record LSN of the checkpoint,
 719 * not the commit record LSN. This is because we can pipeline multiple
 720 * checkpoints, and so the start record of checkpoint N+1 can be
 721 * written before the commit record of checkpoint N. i.e:
 722 *
 723 *   start N			commit N
 724 *	+-------------+------------+----------------+
 725 *		  start N+1			commit N+1
 726 *
 727 * The tail of the log cannot be moved to the LSN of commit N when all
 728 * the items of that checkpoint are written back, because then the
 729 * start record for N+1 is no longer in the active portion of the log
 730 * and recovery will fail/corrupt the filesystem.
 731 *
 732 * Hence when all the log items in checkpoint N are written back, the
 733 * tail of the log most now only move as far forwards as the start LSN
 734 * of checkpoint N+1.
 735 *
 736 * If we are called with the aborted flag set, it is because a log write during
 737 * a CIL checkpoint commit has failed. In this case, all the items in the
 738 * checkpoint have already gone through iop_committed and iop_committing, which
 739 * means that checkpoint commit abort handling is treated exactly the same as an
 740 * iclog write error even though we haven't started any IO yet. Hence in this
 741 * case all we need to do is iop_committed processing, followed by an
 742 * iop_unpin(aborted) call.
 743 *
 744 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 745 * at the end of the AIL, the insert cursor avoids the need to walk the AIL to
 746 * find the insertion point on every xfs_log_item_batch_insert() call. This
 747 * saves a lot of needless list walking and is a net win, even though it
 748 * slightly increases that amount of AIL lock traffic to set it up and tear it
 749 * down.
 750 */
 751static void
 752xlog_cil_ail_insert(
 753	struct xfs_cil_ctx	*ctx,
 754	bool			aborted)
 755{
 756#define LOG_ITEM_BATCH_SIZE	32
 757	struct xfs_ail		*ailp = ctx->cil->xc_log->l_ailp;
 758	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 759	struct xfs_log_vec	*lv;
 760	struct xfs_ail_cursor	cur;
 761	xfs_lsn_t		old_head;
 762	int			i = 0;
 763
 764	/*
 765	 * Update the AIL head LSN with the commit record LSN of this
 766	 * checkpoint. As iclogs are always completed in order, this should
 767	 * always be the same (as iclogs can contain multiple commit records) or
 768	 * higher LSN than the current head. We do this before insertion of the
 769	 * items so that log space checks during insertion will reflect the
 770	 * space that this checkpoint has already consumed.  We call
 771	 * xfs_ail_update_finish() so that tail space and space-based wakeups
 772	 * will be recalculated appropriately.
 773	 */
 774	ASSERT(XFS_LSN_CMP(ctx->commit_lsn, ailp->ail_head_lsn) >= 0 ||
 775			aborted);
 776	spin_lock(&ailp->ail_lock);
 777	xfs_trans_ail_cursor_last(ailp, &cur, ctx->start_lsn);
 778	old_head = ailp->ail_head_lsn;
 779	ailp->ail_head_lsn = ctx->commit_lsn;
 780	/* xfs_ail_update_finish() drops the ail_lock */
 781	xfs_ail_update_finish(ailp, NULLCOMMITLSN);
 782
 783	/*
 784	 * We move the AIL head forwards to account for the space used in the
 785	 * log before we remove that space from the grant heads. This prevents a
 786	 * transient condition where reservation space appears to become
 787	 * available on return, only for it to disappear again immediately as
 788	 * the AIL head update accounts in the log tail space.
 789	 */
 790	smp_wmb();	/* paired with smp_rmb in xlog_grant_space_left */
 791	xlog_grant_return_space(ailp->ail_log, old_head, ailp->ail_head_lsn);
 792
 793	/* unpin all the log items */
 794	list_for_each_entry(lv, &ctx->lv_chain, lv_list) {
 795		struct xfs_log_item	*lip = lv->lv_item;
 796		xfs_lsn_t		item_lsn;
 797
 798		if (aborted)
 799			set_bit(XFS_LI_ABORTED, &lip->li_flags);
 800
 801		if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
 802			lip->li_ops->iop_release(lip);
 803			continue;
 804		}
 805
 806		if (lip->li_ops->iop_committed)
 807			item_lsn = lip->li_ops->iop_committed(lip,
 808					ctx->start_lsn);
 809		else
 810			item_lsn = ctx->start_lsn;
 811
 812		/* item_lsn of -1 means the item needs no further processing */
 813		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 814			continue;
 815
 816		/*
 817		 * if we are aborting the operation, no point in inserting the
 818		 * object into the AIL as we are in a shutdown situation.
 
 819		 */
 820		if (aborted) {
 821			ASSERT(xlog_is_shutdown(ailp->ail_log));
 822			if (lip->li_ops->iop_unpin)
 823				lip->li_ops->iop_unpin(lip, 1);
 824			continue;
 825		}
 826
 827		if (item_lsn != ctx->start_lsn) {
 828
 829			/*
 830			 * Not a bulk update option due to unusual item_lsn.
 831			 * Push into AIL immediately, rechecking the lsn once
 832			 * we have the ail lock. Then unpin the item. This does
 833			 * not affect the AIL cursor the bulk insert path is
 834			 * using.
 835			 */
 836			spin_lock(&ailp->ail_lock);
 837			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 838				xfs_trans_ail_update(ailp, lip, item_lsn);
 839			else
 840				spin_unlock(&ailp->ail_lock);
 841			if (lip->li_ops->iop_unpin)
 842				lip->li_ops->iop_unpin(lip, 0);
 843			continue;
 844		}
 845
 846		/* Item is a candidate for bulk AIL insert.  */
 847		log_items[i++] = lv->lv_item;
 848		if (i >= LOG_ITEM_BATCH_SIZE) {
 849			xlog_cil_ail_insert_batch(ailp, &cur, log_items,
 850					LOG_ITEM_BATCH_SIZE, ctx->start_lsn);
 851			i = 0;
 852		}
 853	}
 854
 855	/* make sure we insert the remainder! */
 856	if (i)
 857		xlog_cil_ail_insert_batch(ailp, &cur, log_items, i,
 858				ctx->start_lsn);
 859
 860	spin_lock(&ailp->ail_lock);
 861	xfs_trans_ail_cursor_done(&cur);
 862	spin_unlock(&ailp->ail_lock);
 863}
 864
 865static void
 866xlog_cil_free_logvec(
 867	struct list_head	*lv_chain)
 868{
 869	struct xfs_log_vec	*lv;
 870
 871	while (!list_empty(lv_chain)) {
 872		lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
 873		list_del_init(&lv->lv_list);
 874		kvfree(lv);
 875	}
 876}
 877
 878/*
 879 * Mark all items committed and clear busy extents. We free the log vector
 880 * chains in a separate pass so that we unpin the log items as quickly as
 881 * possible.
 882 */
 883static void
 884xlog_cil_committed(
 885	struct xfs_cil_ctx	*ctx)
 886{
 
 
 887	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
 888	bool			abort = xlog_is_shutdown(ctx->cil->xc_log);
 889
 890	/*
 891	 * If the I/O failed, we're aborting the commit and already shutdown.
 892	 * Wake any commit waiters before aborting the log items so we don't
 893	 * block async log pushers on callbacks. Async log pushers explicitly do
 894	 * not wait on log force completion because they may be holding locks
 895	 * required to unpin items.
 896	 */
 897	if (abort) {
 898		spin_lock(&ctx->cil->xc_push_lock);
 899		wake_up_all(&ctx->cil->xc_start_wait);
 900		wake_up_all(&ctx->cil->xc_commit_wait);
 901		spin_unlock(&ctx->cil->xc_push_lock);
 902	}
 903
 904	xlog_cil_ail_insert(ctx, abort);
 905
 906	xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
 907	xfs_extent_busy_clear(&ctx->busy_extents.extent_list,
 908			      xfs_has_discard(mp) && !abort);
 909
 910	spin_lock(&ctx->cil->xc_push_lock);
 911	list_del(&ctx->committing);
 912	spin_unlock(&ctx->cil->xc_push_lock);
 913
 914	xlog_cil_free_logvec(&ctx->lv_chain);
 915
 916	if (!list_empty(&ctx->busy_extents.extent_list)) {
 917		ctx->busy_extents.owner = ctx;
 918		xfs_discard_extents(mp, &ctx->busy_extents);
 919		return;
 920	}
 921
 922	kfree(ctx);
 923}
 924
 925void
 926xlog_cil_process_committed(
 927	struct list_head	*list)
 
 
 
 
 
 928{
 929	struct xfs_cil_ctx	*ctx;
 930
 931	while ((ctx = list_first_entry_or_null(list,
 932			struct xfs_cil_ctx, iclog_entry))) {
 933		list_del(&ctx->iclog_entry);
 934		xlog_cil_committed(ctx);
 935	}
 936}
 937
 938/*
 939* Record the LSN of the iclog we were just granted space to start writing into.
 940* If the context doesn't have a start_lsn recorded, then this iclog will
 941* contain the start record for the checkpoint. Otherwise this write contains
 942* the commit record for the checkpoint.
 943*/
 944void
 945xlog_cil_set_ctx_write_state(
 946	struct xfs_cil_ctx	*ctx,
 947	struct xlog_in_core	*iclog)
 948{
 949	struct xfs_cil		*cil = ctx->cil;
 950	xfs_lsn_t		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
 951
 952	ASSERT(!ctx->commit_lsn);
 953	if (!ctx->start_lsn) {
 954		spin_lock(&cil->xc_push_lock);
 955		/*
 956		 * The LSN we need to pass to the log items on transaction
 957		 * commit is the LSN reported by the first log vector write, not
 958		 * the commit lsn. If we use the commit record lsn then we can
 959		 * move the grant write head beyond the tail LSN and overwrite
 960		 * it.
 961		 */
 962		ctx->start_lsn = lsn;
 963		wake_up_all(&cil->xc_start_wait);
 964		spin_unlock(&cil->xc_push_lock);
 965
 966		/*
 967		 * Make sure the metadata we are about to overwrite in the log
 968		 * has been flushed to stable storage before this iclog is
 969		 * issued.
 970		 */
 971		spin_lock(&cil->xc_log->l_icloglock);
 972		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
 973		spin_unlock(&cil->xc_log->l_icloglock);
 974		return;
 975	}
 976
 977	/*
 978	 * Take a reference to the iclog for the context so that we still hold
 979	 * it when xlog_write is done and has released it. This means the
 980	 * context controls when the iclog is released for IO.
 981	 */
 982	atomic_inc(&iclog->ic_refcnt);
 983
 984	/*
 985	 * xlog_state_get_iclog_space() guarantees there is enough space in the
 986	 * iclog for an entire commit record, so we can attach the context
 987	 * callbacks now.  This needs to be done before we make the commit_lsn
 988	 * visible to waiters so that checkpoints with commit records in the
 989	 * same iclog order their IO completion callbacks in the same order that
 990	 * the commit records appear in the iclog.
 991	 */
 992	spin_lock(&cil->xc_log->l_icloglock);
 993	list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
 994	spin_unlock(&cil->xc_log->l_icloglock);
 995
 996	/*
 997	 * Now we can record the commit LSN and wake anyone waiting for this
 998	 * sequence to have the ordered commit record assigned to a physical
 999	 * location in the log.
1000	 */
1001	spin_lock(&cil->xc_push_lock);
1002	ctx->commit_iclog = iclog;
1003	ctx->commit_lsn = lsn;
1004	wake_up_all(&cil->xc_commit_wait);
1005	spin_unlock(&cil->xc_push_lock);
1006}
1007
1008
1009/*
1010 * Ensure that the order of log writes follows checkpoint sequence order. This
1011 * relies on the context LSN being zero until the log write has guaranteed the
1012 * LSN that the log write will start at via xlog_state_get_iclog_space().
1013 */
1014enum _record_type {
1015	_START_RECORD,
1016	_COMMIT_RECORD,
1017};
1018
1019static int
1020xlog_cil_order_write(
1021	struct xfs_cil		*cil,
1022	xfs_csn_t		sequence,
1023	enum _record_type	record)
1024{
1025	struct xfs_cil_ctx	*ctx;
1026
1027restart:
1028	spin_lock(&cil->xc_push_lock);
1029	list_for_each_entry(ctx, &cil->xc_committing, committing) {
1030		/*
1031		 * Avoid getting stuck in this loop because we were woken by the
1032		 * shutdown, but then went back to sleep once already in the
1033		 * shutdown state.
1034		 */
1035		if (xlog_is_shutdown(cil->xc_log)) {
1036			spin_unlock(&cil->xc_push_lock);
1037			return -EIO;
1038		}
1039
1040		/*
1041		 * Higher sequences will wait for this one so skip them.
1042		 * Don't wait for our own sequence, either.
1043		 */
1044		if (ctx->sequence >= sequence)
1045			continue;
1046
1047		/* Wait until the LSN for the record has been recorded. */
1048		switch (record) {
1049		case _START_RECORD:
1050			if (!ctx->start_lsn) {
1051				xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
1052				goto restart;
1053			}
1054			break;
1055		case _COMMIT_RECORD:
1056			if (!ctx->commit_lsn) {
1057				xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1058				goto restart;
1059			}
 
 
1060			break;
1061		}
1062	}
1063	spin_unlock(&cil->xc_push_lock);
1064	return 0;
1065}
1066
1067/*
1068 * Write out the log vector change now attached to the CIL context. This will
1069 * write a start record that needs to be strictly ordered in ascending CIL
1070 * sequence order so that log recovery will always use in-order start LSNs when
1071 * replaying checkpoints.
1072 */
1073static int
1074xlog_cil_write_chain(
1075	struct xfs_cil_ctx	*ctx,
1076	uint32_t		chain_len)
1077{
1078	struct xlog		*log = ctx->cil->xc_log;
1079	int			error;
1080
1081	error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
1082	if (error)
1083		return error;
1084	return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
1085}
1086
1087/*
1088 * Write out the commit record of a checkpoint transaction to close off a
1089 * running log write. These commit records are strictly ordered in ascending CIL
1090 * sequence order so that log recovery will always replay the checkpoints in the
1091 * correct order.
1092 */
1093static int
1094xlog_cil_write_commit_record(
1095	struct xfs_cil_ctx	*ctx)
1096{
1097	struct xlog		*log = ctx->cil->xc_log;
1098	struct xlog_op_header	ophdr = {
1099		.oh_clientid = XFS_TRANSACTION,
1100		.oh_tid = cpu_to_be32(ctx->ticket->t_tid),
1101		.oh_flags = XLOG_COMMIT_TRANS,
1102	};
1103	struct xfs_log_iovec	reg = {
1104		.i_addr = &ophdr,
1105		.i_len = sizeof(struct xlog_op_header),
1106		.i_type = XLOG_REG_TYPE_COMMIT,
1107	};
1108	struct xfs_log_vec	vec = {
1109		.lv_niovecs = 1,
1110		.lv_iovecp = &reg,
1111	};
1112	int			error;
1113	LIST_HEAD(lv_chain);
1114	list_add(&vec.lv_list, &lv_chain);
1115
1116	if (xlog_is_shutdown(log))
1117		return -EIO;
1118
1119	error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
1120	if (error)
1121		return error;
1122
1123	/* account for space used by record data */
1124	ctx->ticket->t_curr_res -= reg.i_len;
1125	error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
1126	if (error)
1127		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1128	return error;
1129}
1130
1131struct xlog_cil_trans_hdr {
1132	struct xlog_op_header	oph[2];
1133	struct xfs_trans_header	thdr;
1134	struct xfs_log_iovec	lhdr[2];
1135};
1136
1137/*
1138 * Build a checkpoint transaction header to begin the journal transaction.  We
1139 * need to account for the space used by the transaction header here as it is
1140 * not accounted for in xlog_write().
1141 *
1142 * This is the only place we write a transaction header, so we also build the
1143 * log opheaders that indicate the start of a log transaction and wrap the
1144 * transaction header. We keep the start record in it's own log vector rather
1145 * than compacting them into a single region as this ends up making the logic
1146 * in xlog_write() for handling empty opheaders for start, commit and unmount
1147 * records much simpler.
1148 */
1149static void
1150xlog_cil_build_trans_hdr(
1151	struct xfs_cil_ctx	*ctx,
1152	struct xlog_cil_trans_hdr *hdr,
1153	struct xfs_log_vec	*lvhdr,
1154	int			num_iovecs)
1155{
1156	struct xlog_ticket	*tic = ctx->ticket;
1157	__be32			tid = cpu_to_be32(tic->t_tid);
1158
1159	memset(hdr, 0, sizeof(*hdr));
1160
1161	/* Log start record */
1162	hdr->oph[0].oh_tid = tid;
1163	hdr->oph[0].oh_clientid = XFS_TRANSACTION;
1164	hdr->oph[0].oh_flags = XLOG_START_TRANS;
1165
1166	/* log iovec region pointer */
1167	hdr->lhdr[0].i_addr = &hdr->oph[0];
1168	hdr->lhdr[0].i_len = sizeof(struct xlog_op_header);
1169	hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER;
1170
1171	/* log opheader */
1172	hdr->oph[1].oh_tid = tid;
1173	hdr->oph[1].oh_clientid = XFS_TRANSACTION;
1174	hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header));
1175
1176	/* transaction header in host byte order format */
1177	hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
1178	hdr->thdr.th_type = XFS_TRANS_CHECKPOINT;
1179	hdr->thdr.th_tid = tic->t_tid;
1180	hdr->thdr.th_num_items = num_iovecs;
1181
1182	/* log iovec region pointer */
1183	hdr->lhdr[1].i_addr = &hdr->oph[1];
1184	hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) +
1185				sizeof(struct xfs_trans_header);
1186	hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR;
1187
1188	lvhdr->lv_niovecs = 2;
1189	lvhdr->lv_iovecp = &hdr->lhdr[0];
1190	lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
1191
1192	tic->t_curr_res -= lvhdr->lv_bytes;
1193}
1194
1195/*
1196 * CIL item reordering compare function. We want to order in ascending ID order,
1197 * but we want to leave items with the same ID in the order they were added to
1198 * the list. This is important for operations like reflink where we log 4 order
1199 * dependent intents in a single transaction when we overwrite an existing
1200 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
1201 * CUI (inc), BUI(remap)...
1202 */
1203static int
1204xlog_cil_order_cmp(
1205	void			*priv,
1206	const struct list_head	*a,
1207	const struct list_head	*b)
1208{
1209	struct xfs_log_vec	*l1 = container_of(a, struct xfs_log_vec, lv_list);
1210	struct xfs_log_vec	*l2 = container_of(b, struct xfs_log_vec, lv_list);
1211
1212	return l1->lv_order_id > l2->lv_order_id;
1213}
1214
1215/*
1216 * Pull all the log vectors off the items in the CIL, and remove the items from
1217 * the CIL. We don't need the CIL lock here because it's only needed on the
1218 * transaction commit side which is currently locked out by the flush lock.
1219 *
1220 * If a log item is marked with a whiteout, we do not need to write it to the
1221 * journal and so we just move them to the whiteout list for the caller to
1222 * dispose of appropriately.
1223 */
1224static void
1225xlog_cil_build_lv_chain(
1226	struct xfs_cil_ctx	*ctx,
1227	struct list_head	*whiteouts,
1228	uint32_t		*num_iovecs,
1229	uint32_t		*num_bytes)
1230{
1231	while (!list_empty(&ctx->log_items)) {
1232		struct xfs_log_item	*item;
1233		struct xfs_log_vec	*lv;
1234
1235		item = list_first_entry(&ctx->log_items,
1236					struct xfs_log_item, li_cil);
1237
1238		if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
1239			list_move(&item->li_cil, whiteouts);
1240			trace_xfs_cil_whiteout_skip(item);
1241			continue;
1242		}
1243
1244		lv = item->li_lv;
1245		lv->lv_order_id = item->li_order_id;
1246
1247		/* we don't write ordered log vectors */
1248		if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
1249			*num_bytes += lv->lv_bytes;
1250		*num_iovecs += lv->lv_niovecs;
1251		list_add_tail(&lv->lv_list, &ctx->lv_chain);
1252
1253		list_del_init(&item->li_cil);
1254		item->li_order_id = 0;
1255		item->li_lv = NULL;
1256	}
1257}
1258
1259static void
1260xlog_cil_cleanup_whiteouts(
1261	struct list_head	*whiteouts)
1262{
1263	while (!list_empty(whiteouts)) {
1264		struct xfs_log_item *item = list_first_entry(whiteouts,
1265						struct xfs_log_item, li_cil);
1266		list_del_init(&item->li_cil);
1267		trace_xfs_cil_whiteout_unpin(item);
1268		item->li_ops->iop_unpin(item, 1);
1269	}
1270}
1271
1272/*
1273 * Push the Committed Item List to the log.
1274 *
1275 * If the current sequence is the same as xc_push_seq we need to do a flush. If
1276 * xc_push_seq is less than the current sequence, then it has already been
1277 * flushed and we don't need to do anything - the caller will wait for it to
1278 * complete if necessary.
1279 *
1280 * xc_push_seq is checked unlocked against the sequence number for a match.
1281 * Hence we can allow log forces to run racily and not issue pushes for the
1282 * same sequence twice.  If we get a race between multiple pushes for the same
1283 * sequence they will block on the first one and then abort, hence avoiding
1284 * needless pushes.
1285 *
1286 * This runs from a workqueue so it does not inherent any specific memory
1287 * allocation context. However, we do not want to block on memory reclaim
1288 * recursing back into the filesystem because this push may have been triggered
1289 * by memory reclaim itself. Hence we really need to run under full GFP_NOFS
1290 * contraints here.
1291 */
1292static void
1293xlog_cil_push_work(
1294	struct work_struct	*work)
1295{
1296	unsigned int		nofs_flags = memalloc_nofs_save();
1297	struct xfs_cil_ctx	*ctx =
1298		container_of(work, struct xfs_cil_ctx, push_work);
1299	struct xfs_cil		*cil = ctx->cil;
1300	struct xlog		*log = cil->xc_log;
1301	struct xfs_cil_ctx	*new_ctx;
1302	int			num_iovecs = 0;
1303	int			num_bytes = 0;
 
1304	int			error = 0;
1305	struct xlog_cil_trans_hdr thdr;
1306	struct xfs_log_vec	lvhdr = {};
1307	xfs_csn_t		push_seq;
1308	bool			push_commit_stable;
1309	LIST_HEAD		(whiteouts);
1310	struct xlog_ticket	*ticket;
1311
1312	new_ctx = xlog_cil_ctx_alloc();
 
 
 
1313	new_ctx->ticket = xlog_cil_ticket_alloc(log);
1314
1315	down_write(&cil->xc_ctx_lock);
 
1316
1317	spin_lock(&cil->xc_push_lock);
1318	push_seq = cil->xc_push_seq;
1319	ASSERT(push_seq <= ctx->sequence);
1320	push_commit_stable = cil->xc_push_commit_stable;
1321	cil->xc_push_commit_stable = false;
1322
1323	/*
1324	 * As we are about to switch to a new, empty CIL context, we no longer
1325	 * need to throttle tasks on CIL space overruns. Wake any waiters that
1326	 * the hard push throttle may have caught so they can start committing
1327	 * to the new context. The ctx->xc_push_lock provides the serialisation
1328	 * necessary for safely using the lockless waitqueue_active() check in
1329	 * this context.
1330	 */
1331	if (waitqueue_active(&cil->xc_push_wait))
1332		wake_up_all(&cil->xc_push_wait);
1333
1334	xlog_cil_push_pcp_aggregate(cil, ctx);
1335
1336	/*
1337	 * Check if we've anything to push. If there is nothing, then we don't
1338	 * move on to a new sequence number and so we have to be able to push
1339	 * this sequence again later.
1340	 */
1341	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1342		cil->xc_push_seq = 0;
1343		spin_unlock(&cil->xc_push_lock);
1344		goto out_skip;
1345	}
1346
1347
1348	/* check for a previously pushed sequence */
1349	if (push_seq < ctx->sequence) {
1350		spin_unlock(&cil->xc_push_lock);
1351		goto out_skip;
1352	}
1353
1354	/*
1355	 * We are now going to push this context, so add it to the committing
1356	 * list before we do anything else. This ensures that anyone waiting on
1357	 * this push can easily detect the difference between a "push in
1358	 * progress" and "CIL is empty, nothing to do".
1359	 *
1360	 * IOWs, a wait loop can now check for:
1361	 *	the current sequence not being found on the committing list;
1362	 *	an empty CIL; and
1363	 *	an unchanged sequence number
1364	 * to detect a push that had nothing to do and therefore does not need
1365	 * waiting on. If the CIL is not empty, we get put on the committing
1366	 * list before emptying the CIL and bumping the sequence number. Hence
1367	 * an empty CIL and an unchanged sequence number means we jumped out
1368	 * above after doing nothing.
1369	 *
1370	 * Hence the waiter will either find the commit sequence on the
1371	 * committing list or the sequence number will be unchanged and the CIL
1372	 * still dirty. In that latter case, the push has not yet started, and
1373	 * so the waiter will have to continue trying to check the CIL
1374	 * committing list until it is found. In extreme cases of delay, the
1375	 * sequence may fully commit between the attempts the wait makes to wait
1376	 * on the commit sequence.
1377	 */
1378	list_add(&ctx->committing, &cil->xc_committing);
1379	spin_unlock(&cil->xc_push_lock);
1380
1381	xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1382
1383	/*
1384	 * Switch the contexts so we can drop the context lock and move out
1385	 * of a shared context. We can't just go straight to the commit record,
1386	 * though - we need to synchronise with previous and future commits so
1387	 * that the commit records are correctly ordered in the log to ensure
1388	 * that we process items during log IO completion in the correct order.
1389	 *
1390	 * For example, if we get an EFI in one checkpoint and the EFD in the
1391	 * next (e.g. due to log forces), we do not want the checkpoint with
1392	 * the EFD to be committed before the checkpoint with the EFI.  Hence
1393	 * we must strictly order the commit records of the checkpoints so
1394	 * that: a) the checkpoint callbacks are attached to the iclogs in the
1395	 * correct order; and b) the checkpoints are replayed in correct order
1396	 * in log recovery.
1397	 *
1398	 * Hence we need to add this context to the committing context list so
1399	 * that higher sequences will wait for us to write out a commit record
1400	 * before they do.
1401	 *
1402	 * xfs_log_force_seq requires us to mirror the new sequence into the cil
1403	 * structure atomically with the addition of this sequence to the
1404	 * committing list. This also ensures that we can do unlocked checks
1405	 * against the current sequence in log forces without risking
1406	 * deferencing a freed context pointer.
1407	 */
1408	spin_lock(&cil->xc_push_lock);
1409	xlog_cil_ctx_switch(cil, new_ctx);
1410	spin_unlock(&cil->xc_push_lock);
1411	up_write(&cil->xc_ctx_lock);
1412
1413	/*
1414	 * Sort the log vector chain before we add the transaction headers.
1415	 * This ensures we always have the transaction headers at the start
1416	 * of the chain.
1417	 */
1418	list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
1419
1420	/*
1421	 * Build a checkpoint transaction header and write it to the log to
1422	 * begin the transaction. We need to account for the space used by the
1423	 * transaction header here as it is not accounted for in xlog_write().
1424	 * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
1425	 * it gets written into the iclog first.
1426	 */
1427	xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
1428	num_bytes += lvhdr.lv_bytes;
1429	list_add(&lvhdr.lv_list, &ctx->lv_chain);
1430
1431	/*
1432	 * Take the lvhdr back off the lv_chain immediately after calling
1433	 * xlog_cil_write_chain() as it should not be passed to log IO
1434	 * completion.
1435	 */
1436	error = xlog_cil_write_chain(ctx, num_bytes);
1437	list_del(&lvhdr.lv_list);
1438	if (error)
1439		goto out_abort_free_ticket;
 
 
1440
1441	error = xlog_cil_write_commit_record(ctx);
1442	if (error)
1443		goto out_abort_free_ticket;
1444
1445	/*
1446	 * Grab the ticket from the ctx so we can ungrant it after releasing the
1447	 * commit_iclog. The ctx may be freed by the time we return from
1448	 * releasing the commit_iclog (i.e. checkpoint has been completed and
1449	 * callback run) so we can't reference the ctx after the call to
1450	 * xlog_state_release_iclog().
1451	 */
1452	ticket = ctx->ticket;
1453
1454	/*
1455	 * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1456	 * to complete before we submit the commit_iclog. We can't use state
1457	 * checks for this - ACTIVE can be either a past completed iclog or a
1458	 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1459	 * past or future iclog awaiting IO or ordered IO completion to be run.
1460	 * In the latter case, if it's a future iclog and we wait on it, the we
1461	 * will hang because it won't get processed through to ic_force_wait
1462	 * wakeup until this commit_iclog is written to disk.  Hence we use the
1463	 * iclog header lsn and compare it to the commit lsn to determine if we
1464	 * need to wait on iclogs or not.
1465	 */
1466	spin_lock(&log->l_icloglock);
1467	if (ctx->start_lsn != ctx->commit_lsn) {
1468		xfs_lsn_t	plsn;
1469
1470		plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
1471		if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
 
 
 
 
 
1472			/*
1473			 * Waiting on ic_force_wait orders the completion of
1474			 * iclogs older than ic_prev. Hence we only need to wait
1475			 * on the most recent older iclog here.
1476			 */
1477			xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
1478			spin_lock(&log->l_icloglock);
1479		}
 
 
1480
1481		/*
1482		 * We need to issue a pre-flush so that the ordering for this
1483		 * checkpoint is correctly preserved down to stable storage.
1484		 */
1485		ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
1486	}
 
 
 
 
 
1487
1488	/*
1489	 * The commit iclog must be written to stable storage to guarantee
1490	 * journal IO vs metadata writeback IO is correctly ordered on stable
1491	 * storage.
1492	 *
1493	 * If the push caller needs the commit to be immediately stable and the
1494	 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1495	 * will be written when released, switch it's state to WANT_SYNC right
1496	 * now.
1497	 */
1498	ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
1499	if (push_commit_stable &&
1500	    ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
1501		xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
1502	ticket = ctx->ticket;
1503	xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1504
1505	/* Not safe to reference ctx now! */
1506
1507	spin_unlock(&log->l_icloglock);
1508	xlog_cil_cleanup_whiteouts(&whiteouts);
1509	xfs_log_ticket_ungrant(log, ticket);
1510	memalloc_nofs_restore(nofs_flags);
1511	return;
1512
1513out_skip:
1514	up_write(&cil->xc_ctx_lock);
1515	xfs_log_ticket_put(new_ctx->ticket);
1516	kfree(new_ctx);
1517	memalloc_nofs_restore(nofs_flags);
1518	return;
1519
1520out_abort_free_ticket:
1521	ASSERT(xlog_is_shutdown(log));
1522	xlog_cil_cleanup_whiteouts(&whiteouts);
1523	if (!ctx->commit_iclog) {
1524		xfs_log_ticket_ungrant(log, ctx->ticket);
1525		xlog_cil_committed(ctx);
1526		memalloc_nofs_restore(nofs_flags);
1527		return;
1528	}
1529	spin_lock(&log->l_icloglock);
1530	ticket = ctx->ticket;
1531	xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1532	/* Not safe to reference ctx now! */
1533	spin_unlock(&log->l_icloglock);
1534	xfs_log_ticket_ungrant(log, ticket);
1535	memalloc_nofs_restore(nofs_flags);
1536}
1537
1538/*
1539 * We need to push CIL every so often so we don't cache more than we can fit in
1540 * the log. The limit really is that a checkpoint can't be more than half the
1541 * log (the current checkpoint is not allowed to overwrite the previous
1542 * checkpoint), but commit latency and memory usage limit this to a smaller
1543 * size.
1544 */
1545static void
1546xlog_cil_push_background(
1547	struct xlog	*log)
1548{
1549	struct xfs_cil	*cil = log->l_cilp;
1550	int		space_used = atomic_read(&cil->xc_ctx->space_used);
1551
1552	/*
1553	 * The cil won't be empty because we are called while holding the
1554	 * context lock so whatever we added to the CIL will still be there.
1555	 */
1556	ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1557
1558	/*
1559	 * We are done if:
1560	 * - we haven't used up all the space available yet; or
1561	 * - we've already queued up a push; and
1562	 * - we're not over the hard limit; and
1563	 * - nothing has been over the hard limit.
1564	 *
1565	 * If so, we don't need to take the push lock as there's nothing to do.
1566	 */
1567	if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
1568	    (cil->xc_push_seq == cil->xc_current_sequence &&
1569	     space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
1570	     !waitqueue_active(&cil->xc_push_wait))) {
1571		up_read(&cil->xc_ctx_lock);
1572		return;
1573	}
1574
1575	spin_lock(&cil->xc_push_lock);
1576	if (cil->xc_push_seq < cil->xc_current_sequence) {
1577		cil->xc_push_seq = cil->xc_current_sequence;
1578		queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1579	}
1580
1581	/*
1582	 * Drop the context lock now, we can't hold that if we need to sleep
1583	 * because we are over the blocking threshold. The push_lock is still
1584	 * held, so blocking threshold sleep/wakeup is still correctly
1585	 * serialised here.
1586	 */
1587	up_read(&cil->xc_ctx_lock);
1588
1589	/*
1590	 * If we are well over the space limit, throttle the work that is being
1591	 * done until the push work on this context has begun. Enforce the hard
1592	 * throttle on all transaction commits once it has been activated, even
1593	 * if the committing transactions have resulted in the space usage
1594	 * dipping back down under the hard limit.
1595	 *
1596	 * The ctx->xc_push_lock provides the serialisation necessary for safely
1597	 * calling xlog_cil_over_hard_limit() in this context.
1598	 */
1599	if (xlog_cil_over_hard_limit(log, space_used)) {
1600		trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1601		ASSERT(space_used < log->l_logsize);
1602		xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1603		return;
1604	}
1605
1606	spin_unlock(&cil->xc_push_lock);
1607
1608}
1609
1610/*
1611 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1612 * number that is passed. When it returns, the work will be queued for
1613 * @push_seq, but it won't be completed.
1614 *
1615 * If the caller is performing a synchronous force, we will flush the workqueue
1616 * to get previously queued work moving to minimise the wait time they will
1617 * undergo waiting for all outstanding pushes to complete. The caller is
1618 * expected to do the required waiting for push_seq to complete.
1619 *
1620 * If the caller is performing an async push, we need to ensure that the
1621 * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1622 * don't do this, then the commit record may remain sitting in memory in an
1623 * ACTIVE iclog. This then requires another full log force to push to disk,
1624 * which defeats the purpose of having an async, non-blocking CIL force
1625 * mechanism. Hence in this case we need to pass a flag to the push work to
1626 * indicate it needs to flush the commit record itself.
1627 */
1628static void
1629xlog_cil_push_now(
1630	struct xlog	*log,
1631	xfs_lsn_t	push_seq,
1632	bool		async)
1633{
1634	struct xfs_cil	*cil = log->l_cilp;
1635
1636	if (!cil)
1637		return;
1638
1639	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1640
1641	/* start on any pending background push to minimise wait time on it */
1642	if (!async)
1643		flush_workqueue(cil->xc_push_wq);
1644
1645	spin_lock(&cil->xc_push_lock);
1646
1647	/*
1648	 * If this is an async flush request, we always need to set the
1649	 * xc_push_commit_stable flag even if something else has already queued
1650	 * a push. The flush caller is asking for the CIL to be on stable
1651	 * storage when the next push completes, so regardless of who has queued
1652	 * the push, the flush requires stable semantics from it.
1653	 */
1654	cil->xc_push_commit_stable = async;
1655
1656	/*
1657	 * If the CIL is empty or we've already pushed the sequence then
1658	 * there's no more work that we need to do.
1659	 */
1660	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
1661	    push_seq <= cil->xc_push_seq) {
1662		spin_unlock(&cil->xc_push_lock);
1663		return;
1664	}
1665
1666	cil->xc_push_seq = push_seq;
1667	queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1668	spin_unlock(&cil->xc_push_lock);
1669}
1670
1671bool
1672xlog_cil_empty(
1673	struct xlog	*log)
1674{
1675	struct xfs_cil	*cil = log->l_cilp;
1676	bool		empty = false;
1677
1678	spin_lock(&cil->xc_push_lock);
1679	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
1680		empty = true;
1681	spin_unlock(&cil->xc_push_lock);
1682	return empty;
1683}
1684
1685/*
1686 * If there are intent done items in this transaction and the related intent was
1687 * committed in the current (same) CIL checkpoint, we don't need to write either
1688 * the intent or intent done item to the journal as the change will be
1689 * journalled atomically within this checkpoint. As we cannot remove items from
1690 * the CIL here, mark the related intent with a whiteout so that the CIL push
1691 * can remove it rather than writing it to the journal. Then remove the intent
1692 * done item from the current transaction and release it so it doesn't get put
1693 * into the CIL at all.
1694 */
1695static uint32_t
1696xlog_cil_process_intents(
1697	struct xfs_cil		*cil,
1698	struct xfs_trans	*tp)
1699{
1700	struct xfs_log_item	*lip, *ilip, *next;
1701	uint32_t		len = 0;
1702
1703	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1704		if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE))
1705			continue;
1706
1707		ilip = lip->li_ops->iop_intent(lip);
1708		if (!ilip || !xlog_item_in_current_chkpt(cil, ilip))
1709			continue;
1710		set_bit(XFS_LI_WHITEOUT, &ilip->li_flags);
1711		trace_xfs_cil_whiteout_mark(ilip);
1712		len += ilip->li_lv->lv_bytes;
1713		kvfree(ilip->li_lv);
1714		ilip->li_lv = NULL;
1715
1716		xfs_trans_del_item(lip);
1717		lip->li_ops->iop_release(lip);
1718	}
1719	return len;
1720}
1721
1722/*
1723 * Commit a transaction with the given vector to the Committed Item List.
1724 *
1725 * To do this, we need to format the item, pin it in memory if required and
1726 * account for the space used by the transaction. Once we have done that we
1727 * need to release the unused reservation for the transaction, attach the
1728 * transaction to the checkpoint context so we carry the busy extents through
1729 * to checkpoint completion, and then unlock all the items in the transaction.
1730 *
1731 * Called with the context lock already held in read mode to lock out
1732 * background commit, returns without it held once background commits are
1733 * allowed again.
1734 */
1735void
1736xlog_cil_commit(
1737	struct xlog		*log,
1738	struct xfs_trans	*tp,
1739	xfs_csn_t		*commit_seq,
1740	bool			regrant)
1741{
 
1742	struct xfs_cil		*cil = log->l_cilp;
1743	struct xfs_log_item	*lip, *next;
1744	uint32_t		released_space = 0;
1745
1746	/*
1747	 * Do all necessary memory allocation before we lock the CIL.
1748	 * This ensures the allocation does not deadlock with a CIL
1749	 * push in memory reclaim (e.g. from kswapd).
1750	 */
1751	xlog_cil_alloc_shadow_bufs(log, tp);
1752
1753	/* lock out background commit */
1754	down_read(&cil->xc_ctx_lock);
1755
1756	if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE)
1757		released_space = xlog_cil_process_intents(cil, tp);
1758
1759	xlog_cil_insert_items(log, tp, released_space);
 
 
1760
1761	if (regrant && !xlog_is_shutdown(log))
1762		xfs_log_ticket_regrant(log, tp->t_ticket);
1763	else
1764		xfs_log_ticket_ungrant(log, tp->t_ticket);
1765	tp->t_ticket = NULL;
1766	xfs_trans_unreserve_and_mod_sb(tp);
1767
1768	/*
1769	 * Once all the items of the transaction have been copied to the CIL,
1770	 * the items can be unlocked and possibly freed.
1771	 *
1772	 * This needs to be done before we drop the CIL context lock because we
1773	 * have to update state in the log items and unlock them before they go
1774	 * to disk. If we don't, then the CIL checkpoint can race with us and
1775	 * we can run checkpoint completion before we've updated and unlocked
1776	 * the log items. This affects (at least) processing of stale buffers,
1777	 * inodes and EFIs.
1778	 */
1779	trace_xfs_trans_commit_items(tp, _RET_IP_);
1780	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1781		xfs_trans_del_item(lip);
1782		if (lip->li_ops->iop_committing)
1783			lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1784	}
1785	if (commit_seq)
1786		*commit_seq = cil->xc_ctx->sequence;
1787
1788	/* xlog_cil_push_background() releases cil->xc_ctx_lock */
1789	xlog_cil_push_background(log);
1790}
1791
1792/*
1793 * Flush the CIL to stable storage but don't wait for it to complete. This
1794 * requires the CIL push to ensure the commit record for the push hits the disk,
1795 * but otherwise is no different to a push done from a log force.
1796 */
1797void
1798xlog_cil_flush(
1799	struct xlog	*log)
1800{
1801	xfs_csn_t	seq = log->l_cilp->xc_current_sequence;
1802
1803	trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
1804	xlog_cil_push_now(log, seq, true);
1805
1806	/*
1807	 * If the CIL is empty, make sure that any previous checkpoint that may
1808	 * still be in an active iclog is pushed to stable storage.
1809	 */
1810	if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
1811		xfs_log_force(log->l_mp, 0);
1812}
1813
1814/*
1815 * Conditionally push the CIL based on the sequence passed in.
1816 *
1817 * We only need to push if we haven't already pushed the sequence number given.
1818 * Hence the only time we will trigger a push here is if the push sequence is
1819 * the same as the current context.
1820 *
1821 * We return the current commit lsn to allow the callers to determine if a
1822 * iclog flush is necessary following this call.
1823 */
1824xfs_lsn_t
1825xlog_cil_force_seq(
1826	struct xlog	*log,
1827	xfs_csn_t	sequence)
1828{
1829	struct xfs_cil		*cil = log->l_cilp;
1830	struct xfs_cil_ctx	*ctx;
1831	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
1832
1833	ASSERT(sequence <= cil->xc_current_sequence);
1834
1835	if (!sequence)
1836		sequence = cil->xc_current_sequence;
1837	trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
1838
1839	/*
1840	 * check to see if we need to force out the current context.
1841	 * xlog_cil_push() handles racing pushes for the same sequence,
1842	 * so no need to deal with it here.
1843	 */
1844restart:
1845	xlog_cil_push_now(log, sequence, false);
1846
1847	/*
1848	 * See if we can find a previous sequence still committing.
1849	 * We need to wait for all previous sequence commits to complete
1850	 * before allowing the force of push_seq to go ahead. Hence block
1851	 * on commits for those as well.
1852	 */
1853	spin_lock(&cil->xc_push_lock);
1854	list_for_each_entry(ctx, &cil->xc_committing, committing) {
1855		/*
1856		 * Avoid getting stuck in this loop because we were woken by the
1857		 * shutdown, but then went back to sleep once already in the
1858		 * shutdown state.
1859		 */
1860		if (xlog_is_shutdown(log))
1861			goto out_shutdown;
1862		if (ctx->sequence > sequence)
1863			continue;
1864		if (!ctx->commit_lsn) {
1865			/*
1866			 * It is still being pushed! Wait for the push to
1867			 * complete, then start again from the beginning.
1868			 */
1869			XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
1870			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1871			goto restart;
1872		}
1873		if (ctx->sequence != sequence)
1874			continue;
1875		/* found it! */
1876		commit_lsn = ctx->commit_lsn;
1877	}
1878
1879	/*
1880	 * The call to xlog_cil_push_now() executes the push in the background.
1881	 * Hence by the time we have got here it our sequence may not have been
1882	 * pushed yet. This is true if the current sequence still matches the
1883	 * push sequence after the above wait loop and the CIL still contains
1884	 * dirty objects. This is guaranteed by the push code first adding the
1885	 * context to the committing list before emptying the CIL.
1886	 *
1887	 * Hence if we don't find the context in the committing list and the
1888	 * current sequence number is unchanged then the CIL contents are
1889	 * significant.  If the CIL is empty, if means there was nothing to push
1890	 * and that means there is nothing to wait for. If the CIL is not empty,
1891	 * it means we haven't yet started the push, because if it had started
1892	 * we would have found the context on the committing list.
1893	 */
1894	if (sequence == cil->xc_current_sequence &&
1895	    !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1896		spin_unlock(&cil->xc_push_lock);
1897		goto restart;
1898	}
1899
1900	spin_unlock(&cil->xc_push_lock);
1901	return commit_lsn;
1902
1903	/*
1904	 * We detected a shutdown in progress. We need to trigger the log force
1905	 * to pass through it's iclog state machine error handling, even though
1906	 * we are already in a shutdown state. Hence we can't return
1907	 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1908	 * LSN is already stable), so we return a zero LSN instead.
1909	 */
1910out_shutdown:
1911	spin_unlock(&cil->xc_push_lock);
1912	return 0;
1913}
1914
1915/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1916 * Perform initial CIL structure initialisation.
1917 */
1918int
1919xlog_cil_init(
1920	struct xlog		*log)
1921{
1922	struct xfs_cil		*cil;
1923	struct xfs_cil_ctx	*ctx;
1924	struct xlog_cil_pcp	*cilpcp;
1925	int			cpu;
1926
1927	cil = kzalloc(sizeof(*cil), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1928	if (!cil)
1929		return -ENOMEM;
1930	/*
1931	 * Limit the CIL pipeline depth to 4 concurrent works to bound the
1932	 * concurrency the log spinlocks will be exposed to.
1933	 */
1934	cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
1935			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
1936			4, log->l_mp->m_super->s_id);
1937	if (!cil->xc_push_wq)
1938		goto out_destroy_cil;
1939
1940	cil->xc_log = log;
1941	cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
1942	if (!cil->xc_pcp)
1943		goto out_destroy_wq;
1944
1945	for_each_possible_cpu(cpu) {
1946		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1947		INIT_LIST_HEAD(&cilpcp->busy_extents);
1948		INIT_LIST_HEAD(&cilpcp->log_items);
1949	}
1950
 
 
1951	INIT_LIST_HEAD(&cil->xc_committing);
 
1952	spin_lock_init(&cil->xc_push_lock);
1953	init_waitqueue_head(&cil->xc_push_wait);
1954	init_rwsem(&cil->xc_ctx_lock);
1955	init_waitqueue_head(&cil->xc_start_wait);
1956	init_waitqueue_head(&cil->xc_commit_wait);
 
 
 
 
 
 
 
 
 
1957	log->l_cilp = cil;
1958
1959	ctx = xlog_cil_ctx_alloc();
1960	xlog_cil_ctx_switch(cil, ctx);
1961	return 0;
1962
1963out_destroy_wq:
1964	destroy_workqueue(cil->xc_push_wq);
1965out_destroy_cil:
1966	kfree(cil);
1967	return -ENOMEM;
1968}
1969
1970void
1971xlog_cil_destroy(
1972	struct xlog	*log)
1973{
1974	struct xfs_cil	*cil = log->l_cilp;
1975
1976	if (cil->xc_ctx) {
1977		if (cil->xc_ctx->ticket)
1978			xfs_log_ticket_put(cil->xc_ctx->ticket);
1979		kfree(cil->xc_ctx);
1980	}
1981
1982	ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1983	free_percpu(cil->xc_pcp);
1984	destroy_workqueue(cil->xc_push_wq);
1985	kfree(cil);
1986}
1987