Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
   4 */
   5
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_format.h"
   9#include "xfs_log_format.h"
  10#include "xfs_shared.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
  13#include "xfs_extent_busy.h"
  14#include "xfs_trans.h"
  15#include "xfs_trans_priv.h"
  16#include "xfs_log.h"
  17#include "xfs_log_priv.h"
  18#include "xfs_trace.h"
  19#include "xfs_discard.h"
 
  20
  21/*
  22 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
  23 * recover, so we don't allow failure here. Also, we allocate in a context that
  24 * we don't want to be issuing transactions from, so we need to tell the
  25 * allocation code this as well.
  26 *
  27 * We don't reserve any space for the ticket - we are going to steal whatever
  28 * space we require from transactions as they commit. To ensure we reserve all
  29 * the space required, we need to set the current reservation of the ticket to
  30 * zero so that we know to steal the initial transaction overhead from the
  31 * first transaction commit.
  32 */
  33static struct xlog_ticket *
  34xlog_cil_ticket_alloc(
  35	struct xlog	*log)
  36{
  37	struct xlog_ticket *tic;
  38
  39	tic = xlog_ticket_alloc(log, 0, 1, 0);
  40
  41	/*
  42	 * set the current reservation to zero so we know to steal the basic
  43	 * transaction overhead reservation from the first transaction commit.
  44	 */
  45	tic->t_curr_res = 0;
  46	tic->t_iclog_hdrs = 0;
  47	return tic;
  48}
  49
  50static inline void
  51xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
  52{
  53	struct xlog	*log = cil->xc_log;
  54
  55	atomic_set(&cil->xc_iclog_hdrs,
  56		   (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
  57			(log->l_iclog_size - log->l_iclog_hsize)));
  58}
  59
  60/*
  61 * Check if the current log item was first committed in this sequence.
  62 * We can't rely on just the log item being in the CIL, we have to check
  63 * the recorded commit sequence number.
  64 *
  65 * Note: for this to be used in a non-racy manner, it has to be called with
  66 * CIL flushing locked out. As a result, it should only be used during the
  67 * transaction commit process when deciding what to format into the item.
  68 */
  69static bool
  70xlog_item_in_current_chkpt(
  71	struct xfs_cil		*cil,
  72	struct xfs_log_item	*lip)
  73{
  74	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
  75		return false;
  76
  77	/*
  78	 * li_seq is written on the first commit of a log item to record the
  79	 * first checkpoint it is written to. Hence if it is different to the
  80	 * current sequence, we're in a new checkpoint.
  81	 */
  82	return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
  83}
  84
  85bool
  86xfs_log_item_in_current_chkpt(
  87	struct xfs_log_item *lip)
  88{
  89	return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip);
  90}
  91
  92/*
  93 * Unavoidable forward declaration - xlog_cil_push_work() calls
  94 * xlog_cil_ctx_alloc() itself.
  95 */
  96static void xlog_cil_push_work(struct work_struct *work);
  97
  98static struct xfs_cil_ctx *
  99xlog_cil_ctx_alloc(void)
 100{
 101	struct xfs_cil_ctx	*ctx;
 102
 103	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
 104	INIT_LIST_HEAD(&ctx->committing);
 105	INIT_LIST_HEAD(&ctx->busy_extents.extent_list);
 106	INIT_LIST_HEAD(&ctx->log_items);
 107	INIT_LIST_HEAD(&ctx->lv_chain);
 108	INIT_WORK(&ctx->push_work, xlog_cil_push_work);
 109	return ctx;
 110}
 111
 112/*
 113 * Aggregate the CIL per cpu structures into global counts, lists, etc and
 114 * clear the percpu state ready for the next context to use. This is called
 115 * from the push code with the context lock held exclusively, hence nothing else
 116 * will be accessing or modifying the per-cpu counters.
 117 */
 118static void
 119xlog_cil_push_pcp_aggregate(
 120	struct xfs_cil		*cil,
 121	struct xfs_cil_ctx	*ctx)
 122{
 123	struct xlog_cil_pcp	*cilpcp;
 124	int			cpu;
 125
 126	for_each_cpu(cpu, &ctx->cil_pcpmask) {
 127		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
 128
 129		ctx->ticket->t_curr_res += cilpcp->space_reserved;
 130		cilpcp->space_reserved = 0;
 131
 132		if (!list_empty(&cilpcp->busy_extents)) {
 133			list_splice_init(&cilpcp->busy_extents,
 134					&ctx->busy_extents.extent_list);
 135		}
 136		if (!list_empty(&cilpcp->log_items))
 137			list_splice_init(&cilpcp->log_items, &ctx->log_items);
 138
 139		/*
 140		 * We're in the middle of switching cil contexts.  Reset the
 141		 * counter we use to detect when the current context is nearing
 142		 * full.
 143		 */
 144		cilpcp->space_used = 0;
 145	}
 146}
 147
 148/*
 149 * Aggregate the CIL per-cpu space used counters into the global atomic value.
 150 * This is called when the per-cpu counter aggregation will first pass the soft
 151 * limit threshold so we can switch to atomic counter aggregation for accurate
 152 * detection of hard limit traversal.
 153 */
 154static void
 155xlog_cil_insert_pcp_aggregate(
 156	struct xfs_cil		*cil,
 157	struct xfs_cil_ctx	*ctx)
 158{
 159	struct xlog_cil_pcp	*cilpcp;
 160	int			cpu;
 161	int			count = 0;
 162
 163	/* Trigger atomic updates then aggregate only for the first caller */
 164	if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
 165		return;
 166
 167	/*
 168	 * We can race with other cpus setting cil_pcpmask.  However, we've
 169	 * atomically cleared PCP_SPACE which forces other threads to add to
 170	 * the global space used count.  cil_pcpmask is a superset of cilpcp
 171	 * structures that could have a nonzero space_used.
 172	 */
 173	for_each_cpu(cpu, &ctx->cil_pcpmask) {
 174		int	old, prev;
 175
 176		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
 177		do {
 178			old = cilpcp->space_used;
 179			prev = cmpxchg(&cilpcp->space_used, old, 0);
 180		} while (old != prev);
 181		count += old;
 182	}
 183	atomic_add(count, &ctx->space_used);
 184}
 185
 186static void
 187xlog_cil_ctx_switch(
 188	struct xfs_cil		*cil,
 189	struct xfs_cil_ctx	*ctx)
 190{
 191	xlog_cil_set_iclog_hdr_count(cil);
 192	set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
 193	set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
 194	ctx->sequence = ++cil->xc_current_sequence;
 195	ctx->cil = cil;
 196	cil->xc_ctx = ctx;
 197}
 198
 199/*
 200 * After the first stage of log recovery is done, we know where the head and
 201 * tail of the log are. We need this log initialisation done before we can
 202 * initialise the first CIL checkpoint context.
 203 *
 204 * Here we allocate a log ticket to track space usage during a CIL push.  This
 205 * ticket is passed to xlog_write() directly so that we don't slowly leak log
 206 * space by failing to account for space used by log headers and additional
 207 * region headers for split regions.
 208 */
 209void
 210xlog_cil_init_post_recovery(
 211	struct xlog	*log)
 212{
 213	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
 214	log->l_cilp->xc_ctx->sequence = 1;
 215	xlog_cil_set_iclog_hdr_count(log->l_cilp);
 216}
 217
 218static inline int
 219xlog_cil_iovec_space(
 220	uint	niovecs)
 221{
 222	return round_up((sizeof(struct xfs_log_vec) +
 223					niovecs * sizeof(struct xfs_log_iovec)),
 224			sizeof(uint64_t));
 225}
 226
 227/*
 228 * Allocate or pin log vector buffers for CIL insertion.
 229 *
 230 * The CIL currently uses disposable buffers for copying a snapshot of the
 231 * modified items into the log during a push. The biggest problem with this is
 232 * the requirement to allocate the disposable buffer during the commit if:
 233 *	a) does not exist; or
 234 *	b) it is too small
 235 *
 236 * If we do this allocation within xlog_cil_insert_format_items(), it is done
 237 * under the xc_ctx_lock, which means that a CIL push cannot occur during
 238 * the memory allocation. This means that we have a potential deadlock situation
 239 * under low memory conditions when we have lots of dirty metadata pinned in
 240 * the CIL and we need a CIL commit to occur to free memory.
 241 *
 242 * To avoid this, we need to move the memory allocation outside the
 243 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
 244 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
 245 * vector buffers between the check and the formatting of the item into the
 246 * log vector buffer within the xc_ctx_lock.
 247 *
 248 * Because the log vector buffer needs to be unchanged during the CIL push
 249 * process, we cannot share the buffer between the transaction commit (which
 250 * modifies the buffer) and the CIL push context that is writing the changes
 251 * into the log. This means skipping preallocation of buffer space is
 252 * unreliable, but we most definitely do not want to be allocating and freeing
 253 * buffers unnecessarily during commits when overwrites can be done safely.
 254 *
 255 * The simplest solution to this problem is to allocate a shadow buffer when a
 256 * log item is committed for the second time, and then to only use this buffer
 257 * if necessary. The buffer can remain attached to the log item until such time
 258 * it is needed, and this is the buffer that is reallocated to match the size of
 259 * the incoming modification. Then during the formatting of the item we can swap
 260 * the active buffer with the new one if we can't reuse the existing buffer. We
 261 * don't free the old buffer as it may be reused on the next modification if
 262 * it's size is right, otherwise we'll free and reallocate it at that point.
 263 *
 264 * This function builds a vector for the changes in each log item in the
 265 * transaction. It then works out the length of the buffer needed for each log
 266 * item, allocates them and attaches the vector to the log item in preparation
 267 * for the formatting step which occurs under the xc_ctx_lock.
 268 *
 269 * While this means the memory footprint goes up, it avoids the repeated
 270 * alloc/free pattern that repeated modifications of an item would otherwise
 271 * cause, and hence minimises the CPU overhead of such behaviour.
 272 */
 273static void
 274xlog_cil_alloc_shadow_bufs(
 275	struct xlog		*log,
 276	struct xfs_trans	*tp)
 277{
 278	struct xfs_log_item	*lip;
 279
 280	list_for_each_entry(lip, &tp->t_items, li_trans) {
 281		struct xfs_log_vec *lv;
 282		int	niovecs = 0;
 283		int	nbytes = 0;
 284		int	buf_size;
 285		bool	ordered = false;
 286
 287		/* Skip items which aren't dirty in this transaction. */
 288		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 289			continue;
 290
 291		/* get number of vecs and size of data to be stored */
 292		lip->li_ops->iop_size(lip, &niovecs, &nbytes);
 293
 294		/*
 295		 * Ordered items need to be tracked but we do not wish to write
 296		 * them. We need a logvec to track the object, but we do not
 297		 * need an iovec or buffer to be allocated for copying data.
 298		 */
 299		if (niovecs == XFS_LOG_VEC_ORDERED) {
 300			ordered = true;
 301			niovecs = 0;
 302			nbytes = 0;
 303		}
 304
 305		/*
 306		 * We 64-bit align the length of each iovec so that the start of
 307		 * the next one is naturally aligned.  We'll need to account for
 308		 * that slack space here.
 309		 *
 310		 * We also add the xlog_op_header to each region when
 311		 * formatting, but that's not accounted to the size of the item
 312		 * at this point. Hence we'll need an addition number of bytes
 313		 * for each vector to hold an opheader.
 314		 *
 315		 * Then round nbytes up to 64-bit alignment so that the initial
 316		 * buffer alignment is easy to calculate and verify.
 317		 */
 318		nbytes += niovecs *
 319			(sizeof(uint64_t) + sizeof(struct xlog_op_header));
 320		nbytes = round_up(nbytes, sizeof(uint64_t));
 321
 322		/*
 323		 * The data buffer needs to start 64-bit aligned, so round up
 324		 * that space to ensure we can align it appropriately and not
 325		 * overrun the buffer.
 326		 */
 327		buf_size = nbytes + xlog_cil_iovec_space(niovecs);
 328
 329		/*
 330		 * if we have no shadow buffer, or it is too small, we need to
 331		 * reallocate it.
 332		 */
 333		if (!lip->li_lv_shadow ||
 334		    buf_size > lip->li_lv_shadow->lv_size) {
 
 335			/*
 336			 * We free and allocate here as a realloc would copy
 337			 * unnecessary data. We don't use kvzalloc() for the
 338			 * same reason - we don't need to zero the data area in
 339			 * the buffer, only the log vector header and the iovec
 340			 * storage.
 341			 */
 342			kvfree(lip->li_lv_shadow);
 343			lv = xlog_kvmalloc(buf_size);
 344
 
 345			memset(lv, 0, xlog_cil_iovec_space(niovecs));
 346
 347			INIT_LIST_HEAD(&lv->lv_list);
 348			lv->lv_item = lip;
 349			lv->lv_size = buf_size;
 350			if (ordered)
 351				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 352			else
 353				lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
 354			lip->li_lv_shadow = lv;
 355		} else {
 356			/* same or smaller, optimise common overwrite case */
 357			lv = lip->li_lv_shadow;
 358			if (ordered)
 359				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 360			else
 361				lv->lv_buf_len = 0;
 362			lv->lv_bytes = 0;
 
 363		}
 364
 365		/* Ensure the lv is set up according to ->iop_size */
 366		lv->lv_niovecs = niovecs;
 367
 368		/* The allocated data region lies beyond the iovec region */
 369		lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
 370	}
 371
 372}
 373
 374/*
 375 * Prepare the log item for insertion into the CIL. Calculate the difference in
 376 * log space it will consume, and if it is a new item pin it as well.
 
 377 */
 378STATIC void
 379xfs_cil_prepare_item(
 380	struct xlog		*log,
 381	struct xfs_log_vec	*lv,
 382	struct xfs_log_vec	*old_lv,
 383	int			*diff_len)
 
 384{
 385	/* Account for the new LV being passed in */
 386	if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
 387		*diff_len += lv->lv_bytes;
 
 
 388
 389	/*
 390	 * If there is no old LV, this is the first time we've seen the item in
 391	 * this CIL context and so we need to pin it. If we are replacing the
 392	 * old_lv, then remove the space it accounts for and make it the shadow
 393	 * buffer for later freeing. In both cases we are now switching to the
 394	 * shadow buffer, so update the pointer to it appropriately.
 395	 */
 396	if (!old_lv) {
 397		if (lv->lv_item->li_ops->iop_pin)
 398			lv->lv_item->li_ops->iop_pin(lv->lv_item);
 399		lv->lv_item->li_lv_shadow = NULL;
 400	} else if (old_lv != lv) {
 401		ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
 402
 403		*diff_len -= old_lv->lv_bytes;
 
 404		lv->lv_item->li_lv_shadow = old_lv;
 405	}
 406
 407	/* attach new log vector to log item */
 408	lv->lv_item->li_lv = lv;
 409
 410	/*
 411	 * If this is the first time the item is being committed to the
 412	 * CIL, store the sequence number on the log item so we can
 413	 * tell in future commits whether this is the first checkpoint
 414	 * the item is being committed into.
 415	 */
 416	if (!lv->lv_item->li_seq)
 417		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
 418}
 419
 420/*
 421 * Format log item into a flat buffers
 422 *
 423 * For delayed logging, we need to hold a formatted buffer containing all the
 424 * changes on the log item. This enables us to relog the item in memory and
 425 * write it out asynchronously without needing to relock the object that was
 426 * modified at the time it gets written into the iclog.
 427 *
 428 * This function takes the prepared log vectors attached to each log item, and
 429 * formats the changes into the log vector buffer. The buffer it uses is
 430 * dependent on the current state of the vector in the CIL - the shadow lv is
 431 * guaranteed to be large enough for the current modification, but we will only
 432 * use that if we can't reuse the existing lv. If we can't reuse the existing
 433 * lv, then simple swap it out for the shadow lv. We don't free it - that is
 434 * done lazily either by th enext modification or the freeing of the log item.
 435 *
 436 * We don't set up region headers during this process; we simply copy the
 437 * regions into the flat buffer. We can do this because we still have to do a
 438 * formatting step to write the regions into the iclog buffer.  Writing the
 439 * ophdrs during the iclog write means that we can support splitting large
 440 * regions across iclog boundares without needing a change in the format of the
 441 * item/region encapsulation.
 442 *
 443 * Hence what we need to do now is change the rewrite the vector array to point
 444 * to the copied region inside the buffer we just allocated. This allows us to
 445 * format the regions into the iclog as though they are being formatted
 446 * directly out of the objects themselves.
 447 */
 448static void
 449xlog_cil_insert_format_items(
 450	struct xlog		*log,
 451	struct xfs_trans	*tp,
 452	int			*diff_len)
 
 453{
 454	struct xfs_log_item	*lip;
 455
 
 456	/* Bail out if we didn't find a log item.  */
 457	if (list_empty(&tp->t_items)) {
 458		ASSERT(0);
 459		return;
 460	}
 461
 462	list_for_each_entry(lip, &tp->t_items, li_trans) {
 463		struct xfs_log_vec *lv;
 464		struct xfs_log_vec *old_lv = NULL;
 465		struct xfs_log_vec *shadow;
 466		bool	ordered = false;
 467
 468		/* Skip items which aren't dirty in this transaction. */
 469		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 470			continue;
 471
 472		/*
 473		 * The formatting size information is already attached to
 474		 * the shadow lv on the log item.
 475		 */
 476		shadow = lip->li_lv_shadow;
 477		if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
 478			ordered = true;
 479
 480		/* Skip items that do not have any vectors for writing */
 481		if (!shadow->lv_niovecs && !ordered)
 482			continue;
 483
 484		/* compare to existing item size */
 485		old_lv = lip->li_lv;
 486		if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
 487			/* same or smaller, optimise common overwrite case */
 488			lv = lip->li_lv;
 
 489
 490			if (ordered)
 491				goto insert;
 492
 493			/*
 494			 * set the item up as though it is a new insertion so
 495			 * that the space reservation accounting is correct.
 496			 */
 
 497			*diff_len -= lv->lv_bytes;
 498
 499			/* Ensure the lv is set up according to ->iop_size */
 500			lv->lv_niovecs = shadow->lv_niovecs;
 501
 502			/* reset the lv buffer information for new formatting */
 503			lv->lv_buf_len = 0;
 504			lv->lv_bytes = 0;
 505			lv->lv_buf = (char *)lv +
 506					xlog_cil_iovec_space(lv->lv_niovecs);
 507		} else {
 508			/* switch to shadow buffer! */
 509			lv = shadow;
 510			lv->lv_item = lip;
 511			if (ordered) {
 512				/* track as an ordered logvec */
 513				ASSERT(lip->li_lv == NULL);
 514				goto insert;
 515			}
 516		}
 517
 518		ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
 519		lip->li_ops->iop_format(lip, lv);
 520insert:
 521		xfs_cil_prepare_item(log, lv, old_lv, diff_len);
 522	}
 523}
 524
 525/*
 526 * The use of lockless waitqueue_active() requires that the caller has
 527 * serialised itself against the wakeup call in xlog_cil_push_work(). That
 528 * can be done by either holding the push lock or the context lock.
 529 */
 530static inline bool
 531xlog_cil_over_hard_limit(
 532	struct xlog	*log,
 533	int32_t		space_used)
 534{
 535	if (waitqueue_active(&log->l_cilp->xc_push_wait))
 536		return true;
 537	if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
 538		return true;
 539	return false;
 540}
 541
 542/*
 543 * Insert the log items into the CIL and calculate the difference in space
 544 * consumed by the item. Add the space to the checkpoint ticket and calculate
 545 * if the change requires additional log metadata. If it does, take that space
 546 * as well. Remove the amount of space we added to the checkpoint ticket from
 547 * the current transaction ticket so that the accounting works out correctly.
 548 */
 549static void
 550xlog_cil_insert_items(
 551	struct xlog		*log,
 552	struct xfs_trans	*tp,
 553	uint32_t		released_space)
 554{
 555	struct xfs_cil		*cil = log->l_cilp;
 556	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
 557	struct xfs_log_item	*lip;
 558	int			len = 0;
 
 
 559	int			iovhdr_res = 0, split_res = 0, ctx_res = 0;
 560	int			space_used;
 561	int			order;
 562	unsigned int		cpu_nr;
 563	struct xlog_cil_pcp	*cilpcp;
 564
 565	ASSERT(tp);
 566
 567	/*
 568	 * We can do this safely because the context can't checkpoint until we
 569	 * are done so it doesn't matter exactly how we update the CIL.
 570	 */
 571	xlog_cil_insert_format_items(log, tp, &len);
 572
 573	/*
 574	 * Subtract the space released by intent cancelation from the space we
 575	 * consumed so that we remove it from the CIL space and add it back to
 576	 * the current transaction reservation context.
 577	 */
 578	len -= released_space;
 579
 580	/*
 581	 * Grab the per-cpu pointer for the CIL before we start any accounting.
 582	 * That ensures that we are running with pre-emption disabled and so we
 583	 * can't be scheduled away between split sample/update operations that
 584	 * are done without outside locking to serialise them.
 585	 */
 586	cpu_nr = get_cpu();
 587	cilpcp = this_cpu_ptr(cil->xc_pcp);
 588
 589	/* Tell the future push that there was work added by this CPU. */
 590	if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask))
 591		cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask);
 592
 593	/*
 594	 * We need to take the CIL checkpoint unit reservation on the first
 595	 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
 596	 * unnecessarily do an atomic op in the fast path here. We can clear the
 597	 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
 598	 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
 599	 */
 600	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
 601	    test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
 602		ctx_res = ctx->ticket->t_unit_res;
 603
 604	/*
 605	 * Check if we need to steal iclog headers. atomic_read() is not a
 606	 * locked atomic operation, so we can check the value before we do any
 607	 * real atomic ops in the fast path. If we've already taken the CIL unit
 608	 * reservation from this commit, we've already got one iclog header
 609	 * space reserved so we have to account for that otherwise we risk
 610	 * overrunning the reservation on this ticket.
 611	 *
 612	 * If the CIL is already at the hard limit, we might need more header
 613	 * space that originally reserved. So steal more header space from every
 614	 * commit that occurs once we are over the hard limit to ensure the CIL
 615	 * push won't run out of reservation space.
 616	 *
 617	 * This can steal more than we need, but that's OK.
 618	 *
 619	 * The cil->xc_ctx_lock provides the serialisation necessary for safely
 620	 * calling xlog_cil_over_hard_limit() in this context.
 621	 */
 622	space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
 623	if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
 624	    xlog_cil_over_hard_limit(log, space_used)) {
 625		split_res = log->l_iclog_hsize +
 626					sizeof(struct xlog_op_header);
 627		if (ctx_res)
 628			ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
 629		else
 630			ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
 631		atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
 632	}
 633	cilpcp->space_reserved += ctx_res;
 634
 635	/*
 636	 * Accurately account when over the soft limit, otherwise fold the
 637	 * percpu count into the global count if over the per-cpu threshold.
 638	 */
 639	if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
 640		atomic_add(len, &ctx->space_used);
 641	} else if (cilpcp->space_used + len >
 642			(XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
 643		space_used = atomic_add_return(cilpcp->space_used + len,
 644						&ctx->space_used);
 645		cilpcp->space_used = 0;
 646
 647		/*
 648		 * If we just transitioned over the soft limit, we need to
 649		 * transition to the global atomic counter.
 650		 */
 651		if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
 652			xlog_cil_insert_pcp_aggregate(cil, ctx);
 653	} else {
 654		cilpcp->space_used += len;
 655	}
 656	/* attach the transaction to the CIL if it has any busy extents */
 657	if (!list_empty(&tp->t_busy))
 658		list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
 659
 660	/*
 661	 * Now update the order of everything modified in the transaction
 662	 * and insert items into the CIL if they aren't already there.
 663	 * We do this here so we only need to take the CIL lock once during
 664	 * the transaction commit.
 
 665	 */
 666	order = atomic_inc_return(&ctx->order_id);
 667	list_for_each_entry(lip, &tp->t_items, li_trans) {
 668		/* Skip items which aren't dirty in this transaction. */
 669		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 670			continue;
 671
 672		lip->li_order_id = order;
 673		if (!list_empty(&lip->li_cil))
 674			continue;
 675		list_add_tail(&lip->li_cil, &cilpcp->log_items);
 
 
 
 
 
 
 
 676	}
 677	put_cpu();
 
 678
 679	/*
 680	 * If we've overrun the reservation, dump the tx details before we move
 681	 * the log items. Shutdown is imminent...
 682	 */
 683	tp->t_ticket->t_curr_res -= ctx_res + len;
 684	if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
 685		xfs_warn(log->l_mp, "Transaction log reservation overrun:");
 686		xfs_warn(log->l_mp,
 687			 "  log items: %d bytes (iov hdrs: %d bytes)",
 688			 len, iovhdr_res);
 689		xfs_warn(log->l_mp, "  split region headers: %d bytes",
 690			 split_res);
 691		xfs_warn(log->l_mp, "  ctx ticket: %d bytes", ctx_res);
 692		xlog_print_trans(tp);
 693		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
 694	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 695}
 696
 697static void
 698xlog_cil_free_logvec(
 699	struct list_head	*lv_chain)
 700{
 701	struct xfs_log_vec	*lv;
 702
 703	while (!list_empty(lv_chain)) {
 704		lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
 705		list_del_init(&lv->lv_list);
 706		kvfree(lv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707	}
 
 708}
 709
 710/*
 711 * Mark all items committed and clear busy extents. We free the log vector
 712 * chains in a separate pass so that we unpin the log items as quickly as
 713 * possible.
 714 */
 715static void
 716xlog_cil_committed(
 717	struct xfs_cil_ctx	*ctx)
 718{
 719	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
 720	bool			abort = xlog_is_shutdown(ctx->cil->xc_log);
 721
 722	/*
 723	 * If the I/O failed, we're aborting the commit and already shutdown.
 724	 * Wake any commit waiters before aborting the log items so we don't
 725	 * block async log pushers on callbacks. Async log pushers explicitly do
 726	 * not wait on log force completion because they may be holding locks
 727	 * required to unpin items.
 728	 */
 729	if (abort) {
 730		spin_lock(&ctx->cil->xc_push_lock);
 731		wake_up_all(&ctx->cil->xc_start_wait);
 732		wake_up_all(&ctx->cil->xc_commit_wait);
 733		spin_unlock(&ctx->cil->xc_push_lock);
 734	}
 735
 736	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain,
 737					ctx->start_lsn, abort);
 738
 739	xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
 740	xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list,
 741			      xfs_has_discard(mp) && !abort);
 742
 743	spin_lock(&ctx->cil->xc_push_lock);
 744	list_del(&ctx->committing);
 745	spin_unlock(&ctx->cil->xc_push_lock);
 746
 747	xlog_cil_free_logvec(&ctx->lv_chain);
 748
 749	if (!list_empty(&ctx->busy_extents.extent_list)) {
 750		ctx->busy_extents.mount = mp;
 751		ctx->busy_extents.owner = ctx;
 752		xfs_discard_extents(mp, &ctx->busy_extents);
 753		return;
 754	}
 755
 756	kfree(ctx);
 757}
 758
 759void
 760xlog_cil_process_committed(
 761	struct list_head	*list)
 762{
 763	struct xfs_cil_ctx	*ctx;
 764
 765	while ((ctx = list_first_entry_or_null(list,
 766			struct xfs_cil_ctx, iclog_entry))) {
 767		list_del(&ctx->iclog_entry);
 768		xlog_cil_committed(ctx);
 769	}
 770}
 771
 772/*
 773* Record the LSN of the iclog we were just granted space to start writing into.
 774* If the context doesn't have a start_lsn recorded, then this iclog will
 775* contain the start record for the checkpoint. Otherwise this write contains
 776* the commit record for the checkpoint.
 777*/
 778void
 779xlog_cil_set_ctx_write_state(
 780	struct xfs_cil_ctx	*ctx,
 781	struct xlog_in_core	*iclog)
 782{
 783	struct xfs_cil		*cil = ctx->cil;
 784	xfs_lsn_t		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
 785
 786	ASSERT(!ctx->commit_lsn);
 787	if (!ctx->start_lsn) {
 788		spin_lock(&cil->xc_push_lock);
 789		/*
 790		 * The LSN we need to pass to the log items on transaction
 791		 * commit is the LSN reported by the first log vector write, not
 792		 * the commit lsn. If we use the commit record lsn then we can
 793		 * move the grant write head beyond the tail LSN and overwrite
 794		 * it.
 795		 */
 796		ctx->start_lsn = lsn;
 797		wake_up_all(&cil->xc_start_wait);
 798		spin_unlock(&cil->xc_push_lock);
 799
 800		/*
 801		 * Make sure the metadata we are about to overwrite in the log
 802		 * has been flushed to stable storage before this iclog is
 803		 * issued.
 804		 */
 805		spin_lock(&cil->xc_log->l_icloglock);
 806		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
 807		spin_unlock(&cil->xc_log->l_icloglock);
 808		return;
 809	}
 810
 811	/*
 812	 * Take a reference to the iclog for the context so that we still hold
 813	 * it when xlog_write is done and has released it. This means the
 814	 * context controls when the iclog is released for IO.
 815	 */
 816	atomic_inc(&iclog->ic_refcnt);
 817
 818	/*
 819	 * xlog_state_get_iclog_space() guarantees there is enough space in the
 820	 * iclog for an entire commit record, so we can attach the context
 821	 * callbacks now.  This needs to be done before we make the commit_lsn
 822	 * visible to waiters so that checkpoints with commit records in the
 823	 * same iclog order their IO completion callbacks in the same order that
 824	 * the commit records appear in the iclog.
 825	 */
 826	spin_lock(&cil->xc_log->l_icloglock);
 827	list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
 828	spin_unlock(&cil->xc_log->l_icloglock);
 829
 830	/*
 831	 * Now we can record the commit LSN and wake anyone waiting for this
 832	 * sequence to have the ordered commit record assigned to a physical
 833	 * location in the log.
 834	 */
 835	spin_lock(&cil->xc_push_lock);
 836	ctx->commit_iclog = iclog;
 837	ctx->commit_lsn = lsn;
 838	wake_up_all(&cil->xc_commit_wait);
 839	spin_unlock(&cil->xc_push_lock);
 840}
 841
 842
 843/*
 844 * Ensure that the order of log writes follows checkpoint sequence order. This
 845 * relies on the context LSN being zero until the log write has guaranteed the
 846 * LSN that the log write will start at via xlog_state_get_iclog_space().
 847 */
 848enum _record_type {
 849	_START_RECORD,
 850	_COMMIT_RECORD,
 851};
 852
 853static int
 854xlog_cil_order_write(
 855	struct xfs_cil		*cil,
 856	xfs_csn_t		sequence,
 857	enum _record_type	record)
 858{
 859	struct xfs_cil_ctx	*ctx;
 860
 861restart:
 862	spin_lock(&cil->xc_push_lock);
 863	list_for_each_entry(ctx, &cil->xc_committing, committing) {
 864		/*
 865		 * Avoid getting stuck in this loop because we were woken by the
 866		 * shutdown, but then went back to sleep once already in the
 867		 * shutdown state.
 868		 */
 869		if (xlog_is_shutdown(cil->xc_log)) {
 870			spin_unlock(&cil->xc_push_lock);
 871			return -EIO;
 872		}
 873
 874		/*
 875		 * Higher sequences will wait for this one so skip them.
 876		 * Don't wait for our own sequence, either.
 877		 */
 878		if (ctx->sequence >= sequence)
 879			continue;
 880
 881		/* Wait until the LSN for the record has been recorded. */
 882		switch (record) {
 883		case _START_RECORD:
 884			if (!ctx->start_lsn) {
 885				xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
 886				goto restart;
 887			}
 888			break;
 889		case _COMMIT_RECORD:
 890			if (!ctx->commit_lsn) {
 891				xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
 892				goto restart;
 893			}
 894			break;
 895		}
 896	}
 897	spin_unlock(&cil->xc_push_lock);
 898	return 0;
 899}
 900
 901/*
 902 * Write out the log vector change now attached to the CIL context. This will
 903 * write a start record that needs to be strictly ordered in ascending CIL
 904 * sequence order so that log recovery will always use in-order start LSNs when
 905 * replaying checkpoints.
 906 */
 907static int
 908xlog_cil_write_chain(
 909	struct xfs_cil_ctx	*ctx,
 910	uint32_t		chain_len)
 911{
 912	struct xlog		*log = ctx->cil->xc_log;
 913	int			error;
 914
 915	error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
 916	if (error)
 917		return error;
 918	return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
 919}
 920
 921/*
 922 * Write out the commit record of a checkpoint transaction to close off a
 923 * running log write. These commit records are strictly ordered in ascending CIL
 924 * sequence order so that log recovery will always replay the checkpoints in the
 925 * correct order.
 926 */
 927static int
 928xlog_cil_write_commit_record(
 929	struct xfs_cil_ctx	*ctx)
 930{
 931	struct xlog		*log = ctx->cil->xc_log;
 932	struct xlog_op_header	ophdr = {
 933		.oh_clientid = XFS_TRANSACTION,
 934		.oh_tid = cpu_to_be32(ctx->ticket->t_tid),
 935		.oh_flags = XLOG_COMMIT_TRANS,
 936	};
 937	struct xfs_log_iovec	reg = {
 938		.i_addr = &ophdr,
 939		.i_len = sizeof(struct xlog_op_header),
 940		.i_type = XLOG_REG_TYPE_COMMIT,
 941	};
 942	struct xfs_log_vec	vec = {
 943		.lv_niovecs = 1,
 944		.lv_iovecp = &reg,
 945	};
 946	int			error;
 947	LIST_HEAD(lv_chain);
 948	list_add(&vec.lv_list, &lv_chain);
 949
 950	if (xlog_is_shutdown(log))
 951		return -EIO;
 952
 953	error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
 954	if (error)
 955		return error;
 956
 957	/* account for space used by record data */
 958	ctx->ticket->t_curr_res -= reg.i_len;
 959	error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
 960	if (error)
 961		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
 962	return error;
 963}
 964
 965struct xlog_cil_trans_hdr {
 966	struct xlog_op_header	oph[2];
 967	struct xfs_trans_header	thdr;
 968	struct xfs_log_iovec	lhdr[2];
 969};
 970
 971/*
 972 * Build a checkpoint transaction header to begin the journal transaction.  We
 973 * need to account for the space used by the transaction header here as it is
 974 * not accounted for in xlog_write().
 975 *
 976 * This is the only place we write a transaction header, so we also build the
 977 * log opheaders that indicate the start of a log transaction and wrap the
 978 * transaction header. We keep the start record in it's own log vector rather
 979 * than compacting them into a single region as this ends up making the logic
 980 * in xlog_write() for handling empty opheaders for start, commit and unmount
 981 * records much simpler.
 982 */
 983static void
 984xlog_cil_build_trans_hdr(
 985	struct xfs_cil_ctx	*ctx,
 986	struct xlog_cil_trans_hdr *hdr,
 987	struct xfs_log_vec	*lvhdr,
 988	int			num_iovecs)
 989{
 990	struct xlog_ticket	*tic = ctx->ticket;
 991	__be32			tid = cpu_to_be32(tic->t_tid);
 992
 993	memset(hdr, 0, sizeof(*hdr));
 994
 995	/* Log start record */
 996	hdr->oph[0].oh_tid = tid;
 997	hdr->oph[0].oh_clientid = XFS_TRANSACTION;
 998	hdr->oph[0].oh_flags = XLOG_START_TRANS;
 999
1000	/* log iovec region pointer */
1001	hdr->lhdr[0].i_addr = &hdr->oph[0];
1002	hdr->lhdr[0].i_len = sizeof(struct xlog_op_header);
1003	hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER;
1004
1005	/* log opheader */
1006	hdr->oph[1].oh_tid = tid;
1007	hdr->oph[1].oh_clientid = XFS_TRANSACTION;
1008	hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header));
1009
1010	/* transaction header in host byte order format */
1011	hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
1012	hdr->thdr.th_type = XFS_TRANS_CHECKPOINT;
1013	hdr->thdr.th_tid = tic->t_tid;
1014	hdr->thdr.th_num_items = num_iovecs;
1015
1016	/* log iovec region pointer */
1017	hdr->lhdr[1].i_addr = &hdr->oph[1];
1018	hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) +
1019				sizeof(struct xfs_trans_header);
1020	hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR;
1021
1022	lvhdr->lv_niovecs = 2;
1023	lvhdr->lv_iovecp = &hdr->lhdr[0];
1024	lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
1025
1026	tic->t_curr_res -= lvhdr->lv_bytes;
1027}
1028
1029/*
1030 * CIL item reordering compare function. We want to order in ascending ID order,
1031 * but we want to leave items with the same ID in the order they were added to
1032 * the list. This is important for operations like reflink where we log 4 order
1033 * dependent intents in a single transaction when we overwrite an existing
1034 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
1035 * CUI (inc), BUI(remap)...
1036 */
1037static int
1038xlog_cil_order_cmp(
1039	void			*priv,
1040	const struct list_head	*a,
1041	const struct list_head	*b)
1042{
1043	struct xfs_log_vec	*l1 = container_of(a, struct xfs_log_vec, lv_list);
1044	struct xfs_log_vec	*l2 = container_of(b, struct xfs_log_vec, lv_list);
1045
1046	return l1->lv_order_id > l2->lv_order_id;
1047}
1048
1049/*
1050 * Pull all the log vectors off the items in the CIL, and remove the items from
1051 * the CIL. We don't need the CIL lock here because it's only needed on the
1052 * transaction commit side which is currently locked out by the flush lock.
1053 *
1054 * If a log item is marked with a whiteout, we do not need to write it to the
1055 * journal and so we just move them to the whiteout list for the caller to
1056 * dispose of appropriately.
1057 */
1058static void
1059xlog_cil_build_lv_chain(
1060	struct xfs_cil_ctx	*ctx,
1061	struct list_head	*whiteouts,
1062	uint32_t		*num_iovecs,
1063	uint32_t		*num_bytes)
1064{
1065	while (!list_empty(&ctx->log_items)) {
1066		struct xfs_log_item	*item;
1067		struct xfs_log_vec	*lv;
1068
1069		item = list_first_entry(&ctx->log_items,
1070					struct xfs_log_item, li_cil);
1071
1072		if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
1073			list_move(&item->li_cil, whiteouts);
1074			trace_xfs_cil_whiteout_skip(item);
1075			continue;
1076		}
1077
1078		lv = item->li_lv;
1079		lv->lv_order_id = item->li_order_id;
1080
1081		/* we don't write ordered log vectors */
1082		if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
1083			*num_bytes += lv->lv_bytes;
1084		*num_iovecs += lv->lv_niovecs;
1085		list_add_tail(&lv->lv_list, &ctx->lv_chain);
1086
1087		list_del_init(&item->li_cil);
1088		item->li_order_id = 0;
1089		item->li_lv = NULL;
1090	}
1091}
1092
1093static void
1094xlog_cil_cleanup_whiteouts(
1095	struct list_head	*whiteouts)
1096{
1097	while (!list_empty(whiteouts)) {
1098		struct xfs_log_item *item = list_first_entry(whiteouts,
1099						struct xfs_log_item, li_cil);
1100		list_del_init(&item->li_cil);
1101		trace_xfs_cil_whiteout_unpin(item);
1102		item->li_ops->iop_unpin(item, 1);
1103	}
1104}
1105
1106/*
1107 * Push the Committed Item List to the log.
1108 *
1109 * If the current sequence is the same as xc_push_seq we need to do a flush. If
1110 * xc_push_seq is less than the current sequence, then it has already been
1111 * flushed and we don't need to do anything - the caller will wait for it to
1112 * complete if necessary.
1113 *
1114 * xc_push_seq is checked unlocked against the sequence number for a match.
1115 * Hence we can allow log forces to run racily and not issue pushes for the
1116 * same sequence twice.  If we get a race between multiple pushes for the same
1117 * sequence they will block on the first one and then abort, hence avoiding
1118 * needless pushes.
1119 *
1120 * This runs from a workqueue so it does not inherent any specific memory
1121 * allocation context. However, we do not want to block on memory reclaim
1122 * recursing back into the filesystem because this push may have been triggered
1123 * by memory reclaim itself. Hence we really need to run under full GFP_NOFS
1124 * contraints here.
1125 */
1126static void
1127xlog_cil_push_work(
1128	struct work_struct	*work)
1129{
1130	unsigned int		nofs_flags = memalloc_nofs_save();
1131	struct xfs_cil_ctx	*ctx =
1132		container_of(work, struct xfs_cil_ctx, push_work);
1133	struct xfs_cil		*cil = ctx->cil;
1134	struct xlog		*log = cil->xc_log;
 
 
1135	struct xfs_cil_ctx	*new_ctx;
1136	int			num_iovecs = 0;
1137	int			num_bytes = 0;
 
1138	int			error = 0;
1139	struct xlog_cil_trans_hdr thdr;
1140	struct xfs_log_vec	lvhdr = {};
 
 
 
1141	xfs_csn_t		push_seq;
1142	bool			push_commit_stable;
1143	LIST_HEAD		(whiteouts);
1144	struct xlog_ticket	*ticket;
1145
1146	new_ctx = xlog_cil_ctx_alloc();
1147	new_ctx->ticket = xlog_cil_ticket_alloc(log);
1148
1149	down_write(&cil->xc_ctx_lock);
 
1150
1151	spin_lock(&cil->xc_push_lock);
1152	push_seq = cil->xc_push_seq;
1153	ASSERT(push_seq <= ctx->sequence);
1154	push_commit_stable = cil->xc_push_commit_stable;
1155	cil->xc_push_commit_stable = false;
1156
1157	/*
1158	 * As we are about to switch to a new, empty CIL context, we no longer
1159	 * need to throttle tasks on CIL space overruns. Wake any waiters that
1160	 * the hard push throttle may have caught so they can start committing
1161	 * to the new context. The ctx->xc_push_lock provides the serialisation
1162	 * necessary for safely using the lockless waitqueue_active() check in
1163	 * this context.
1164	 */
1165	if (waitqueue_active(&cil->xc_push_wait))
1166		wake_up_all(&cil->xc_push_wait);
1167
1168	xlog_cil_push_pcp_aggregate(cil, ctx);
1169
1170	/*
1171	 * Check if we've anything to push. If there is nothing, then we don't
1172	 * move on to a new sequence number and so we have to be able to push
1173	 * this sequence again later.
1174	 */
1175	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1176		cil->xc_push_seq = 0;
1177		spin_unlock(&cil->xc_push_lock);
1178		goto out_skip;
1179	}
1180
1181
1182	/* check for a previously pushed sequence */
1183	if (push_seq < ctx->sequence) {
1184		spin_unlock(&cil->xc_push_lock);
1185		goto out_skip;
1186	}
1187
1188	/*
1189	 * We are now going to push this context, so add it to the committing
1190	 * list before we do anything else. This ensures that anyone waiting on
1191	 * this push can easily detect the difference between a "push in
1192	 * progress" and "CIL is empty, nothing to do".
1193	 *
1194	 * IOWs, a wait loop can now check for:
1195	 *	the current sequence not being found on the committing list;
1196	 *	an empty CIL; and
1197	 *	an unchanged sequence number
1198	 * to detect a push that had nothing to do and therefore does not need
1199	 * waiting on. If the CIL is not empty, we get put on the committing
1200	 * list before emptying the CIL and bumping the sequence number. Hence
1201	 * an empty CIL and an unchanged sequence number means we jumped out
1202	 * above after doing nothing.
1203	 *
1204	 * Hence the waiter will either find the commit sequence on the
1205	 * committing list or the sequence number will be unchanged and the CIL
1206	 * still dirty. In that latter case, the push has not yet started, and
1207	 * so the waiter will have to continue trying to check the CIL
1208	 * committing list until it is found. In extreme cases of delay, the
1209	 * sequence may fully commit between the attempts the wait makes to wait
1210	 * on the commit sequence.
1211	 */
1212	list_add(&ctx->committing, &cil->xc_committing);
1213	spin_unlock(&cil->xc_push_lock);
1214
1215	xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1216
1217	/*
1218	 * Switch the contexts so we can drop the context lock and move out
 
 
 
 
 
 
 
 
 
 
 
 
1219	 * of a shared context. We can't just go straight to the commit record,
1220	 * though - we need to synchronise with previous and future commits so
1221	 * that the commit records are correctly ordered in the log to ensure
1222	 * that we process items during log IO completion in the correct order.
1223	 *
1224	 * For example, if we get an EFI in one checkpoint and the EFD in the
1225	 * next (e.g. due to log forces), we do not want the checkpoint with
1226	 * the EFD to be committed before the checkpoint with the EFI.  Hence
1227	 * we must strictly order the commit records of the checkpoints so
1228	 * that: a) the checkpoint callbacks are attached to the iclogs in the
1229	 * correct order; and b) the checkpoints are replayed in correct order
1230	 * in log recovery.
1231	 *
1232	 * Hence we need to add this context to the committing context list so
1233	 * that higher sequences will wait for us to write out a commit record
1234	 * before they do.
1235	 *
1236	 * xfs_log_force_seq requires us to mirror the new sequence into the cil
1237	 * structure atomically with the addition of this sequence to the
1238	 * committing list. This also ensures that we can do unlocked checks
1239	 * against the current sequence in log forces without risking
1240	 * deferencing a freed context pointer.
1241	 */
1242	spin_lock(&cil->xc_push_lock);
1243	xlog_cil_ctx_switch(cil, new_ctx);
1244	spin_unlock(&cil->xc_push_lock);
1245	up_write(&cil->xc_ctx_lock);
1246
1247	/*
1248	 * Sort the log vector chain before we add the transaction headers.
1249	 * This ensures we always have the transaction headers at the start
1250	 * of the chain.
1251	 */
1252	list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
1253
1254	/*
1255	 * Build a checkpoint transaction header and write it to the log to
1256	 * begin the transaction. We need to account for the space used by the
1257	 * transaction header here as it is not accounted for in xlog_write().
1258	 * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
1259	 * it gets written into the iclog first.
1260	 */
1261	xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
1262	num_bytes += lvhdr.lv_bytes;
1263	list_add(&lvhdr.lv_list, &ctx->lv_chain);
 
 
 
 
 
 
 
 
 
 
 
 
1264
1265	/*
1266	 * Take the lvhdr back off the lv_chain immediately after calling
1267	 * xlog_cil_write_chain() as it should not be passed to log IO
1268	 * completion.
1269	 */
1270	error = xlog_cil_write_chain(ctx, num_bytes);
1271	list_del(&lvhdr.lv_list);
 
 
1272	if (error)
1273		goto out_abort_free_ticket;
1274
1275	error = xlog_cil_write_commit_record(ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1276	if (error)
1277		goto out_abort_free_ticket;
1278
 
 
1279	/*
1280	 * Grab the ticket from the ctx so we can ungrant it after releasing the
1281	 * commit_iclog. The ctx may be freed by the time we return from
1282	 * releasing the commit_iclog (i.e. checkpoint has been completed and
1283	 * callback run) so we can't reference the ctx after the call to
1284	 * xlog_state_release_iclog().
1285	 */
1286	ticket = ctx->ticket;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1287
1288	/*
1289	 * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1290	 * to complete before we submit the commit_iclog. We can't use state
1291	 * checks for this - ACTIVE can be either a past completed iclog or a
1292	 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1293	 * past or future iclog awaiting IO or ordered IO completion to be run.
1294	 * In the latter case, if it's a future iclog and we wait on it, the we
1295	 * will hang because it won't get processed through to ic_force_wait
1296	 * wakeup until this commit_iclog is written to disk.  Hence we use the
1297	 * iclog header lsn and compare it to the commit lsn to determine if we
1298	 * need to wait on iclogs or not.
 
 
 
1299	 */
1300	spin_lock(&log->l_icloglock);
1301	if (ctx->start_lsn != ctx->commit_lsn) {
1302		xfs_lsn_t	plsn;
1303
1304		plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
1305		if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
1306			/*
1307			 * Waiting on ic_force_wait orders the completion of
1308			 * iclogs older than ic_prev. Hence we only need to wait
1309			 * on the most recent older iclog here.
1310			 */
1311			xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
1312			spin_lock(&log->l_icloglock);
1313		}
1314
1315		/*
1316		 * We need to issue a pre-flush so that the ordering for this
1317		 * checkpoint is correctly preserved down to stable storage.
1318		 */
1319		ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
1320	}
1321
1322	/*
1323	 * The commit iclog must be written to stable storage to guarantee
1324	 * journal IO vs metadata writeback IO is correctly ordered on stable
1325	 * storage.
1326	 *
1327	 * If the push caller needs the commit to be immediately stable and the
1328	 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1329	 * will be written when released, switch it's state to WANT_SYNC right
1330	 * now.
1331	 */
1332	ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
1333	if (push_commit_stable &&
1334	    ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
1335		xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
1336	ticket = ctx->ticket;
1337	xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1338
1339	/* Not safe to reference ctx now! */
1340
1341	spin_unlock(&log->l_icloglock);
1342	xlog_cil_cleanup_whiteouts(&whiteouts);
1343	xfs_log_ticket_ungrant(log, ticket);
1344	memalloc_nofs_restore(nofs_flags);
1345	return;
1346
1347out_skip:
1348	up_write(&cil->xc_ctx_lock);
1349	xfs_log_ticket_put(new_ctx->ticket);
1350	kfree(new_ctx);
1351	memalloc_nofs_restore(nofs_flags);
1352	return;
1353
1354out_abort_free_ticket:
1355	ASSERT(xlog_is_shutdown(log));
1356	xlog_cil_cleanup_whiteouts(&whiteouts);
1357	if (!ctx->commit_iclog) {
1358		xfs_log_ticket_ungrant(log, ctx->ticket);
1359		xlog_cil_committed(ctx);
1360		memalloc_nofs_restore(nofs_flags);
1361		return;
1362	}
1363	spin_lock(&log->l_icloglock);
1364	ticket = ctx->ticket;
1365	xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1366	/* Not safe to reference ctx now! */
1367	spin_unlock(&log->l_icloglock);
1368	xfs_log_ticket_ungrant(log, ticket);
1369	memalloc_nofs_restore(nofs_flags);
1370}
1371
1372/*
1373 * We need to push CIL every so often so we don't cache more than we can fit in
1374 * the log. The limit really is that a checkpoint can't be more than half the
1375 * log (the current checkpoint is not allowed to overwrite the previous
1376 * checkpoint), but commit latency and memory usage limit this to a smaller
1377 * size.
1378 */
1379static void
1380xlog_cil_push_background(
1381	struct xlog	*log) __releases(cil->xc_ctx_lock)
1382{
1383	struct xfs_cil	*cil = log->l_cilp;
1384	int		space_used = atomic_read(&cil->xc_ctx->space_used);
1385
1386	/*
1387	 * The cil won't be empty because we are called while holding the
1388	 * context lock so whatever we added to the CIL will still be there.
1389	 */
1390	ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1391
1392	/*
1393	 * We are done if:
1394	 * - we haven't used up all the space available yet; or
1395	 * - we've already queued up a push; and
1396	 * - we're not over the hard limit; and
1397	 * - nothing has been over the hard limit.
1398	 *
1399	 * If so, we don't need to take the push lock as there's nothing to do.
1400	 */
1401	if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
1402	    (cil->xc_push_seq == cil->xc_current_sequence &&
1403	     space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
1404	     !waitqueue_active(&cil->xc_push_wait))) {
1405		up_read(&cil->xc_ctx_lock);
1406		return;
1407	}
1408
1409	spin_lock(&cil->xc_push_lock);
1410	if (cil->xc_push_seq < cil->xc_current_sequence) {
1411		cil->xc_push_seq = cil->xc_current_sequence;
1412		queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1413	}
1414
1415	/*
1416	 * Drop the context lock now, we can't hold that if we need to sleep
1417	 * because we are over the blocking threshold. The push_lock is still
1418	 * held, so blocking threshold sleep/wakeup is still correctly
1419	 * serialised here.
1420	 */
1421	up_read(&cil->xc_ctx_lock);
1422
1423	/*
1424	 * If we are well over the space limit, throttle the work that is being
1425	 * done until the push work on this context has begun. Enforce the hard
1426	 * throttle on all transaction commits once it has been activated, even
1427	 * if the committing transactions have resulted in the space usage
1428	 * dipping back down under the hard limit.
1429	 *
1430	 * The ctx->xc_push_lock provides the serialisation necessary for safely
1431	 * calling xlog_cil_over_hard_limit() in this context.
1432	 */
1433	if (xlog_cil_over_hard_limit(log, space_used)) {
 
1434		trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1435		ASSERT(space_used < log->l_logsize);
1436		xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1437		return;
1438	}
1439
1440	spin_unlock(&cil->xc_push_lock);
1441
1442}
1443
1444/*
1445 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1446 * number that is passed. When it returns, the work will be queued for
1447 * @push_seq, but it won't be completed.
1448 *
1449 * If the caller is performing a synchronous force, we will flush the workqueue
1450 * to get previously queued work moving to minimise the wait time they will
1451 * undergo waiting for all outstanding pushes to complete. The caller is
1452 * expected to do the required waiting for push_seq to complete.
1453 *
1454 * If the caller is performing an async push, we need to ensure that the
1455 * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1456 * don't do this, then the commit record may remain sitting in memory in an
1457 * ACTIVE iclog. This then requires another full log force to push to disk,
1458 * which defeats the purpose of having an async, non-blocking CIL force
1459 * mechanism. Hence in this case we need to pass a flag to the push work to
1460 * indicate it needs to flush the commit record itself.
1461 */
1462static void
1463xlog_cil_push_now(
1464	struct xlog	*log,
1465	xfs_lsn_t	push_seq,
1466	bool		async)
1467{
1468	struct xfs_cil	*cil = log->l_cilp;
1469
1470	if (!cil)
1471		return;
1472
1473	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1474
1475	/* start on any pending background push to minimise wait time on it */
1476	if (!async)
1477		flush_workqueue(cil->xc_push_wq);
1478
1479	spin_lock(&cil->xc_push_lock);
1480
1481	/*
1482	 * If this is an async flush request, we always need to set the
1483	 * xc_push_commit_stable flag even if something else has already queued
1484	 * a push. The flush caller is asking for the CIL to be on stable
1485	 * storage when the next push completes, so regardless of who has queued
1486	 * the push, the flush requires stable semantics from it.
1487	 */
1488	cil->xc_push_commit_stable = async;
1489
1490	/*
1491	 * If the CIL is empty or we've already pushed the sequence then
1492	 * there's no more work that we need to do.
1493	 */
1494	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
1495	    push_seq <= cil->xc_push_seq) {
1496		spin_unlock(&cil->xc_push_lock);
1497		return;
1498	}
1499
1500	cil->xc_push_seq = push_seq;
1501	queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1502	spin_unlock(&cil->xc_push_lock);
1503}
1504
1505bool
1506xlog_cil_empty(
1507	struct xlog	*log)
1508{
1509	struct xfs_cil	*cil = log->l_cilp;
1510	bool		empty = false;
1511
1512	spin_lock(&cil->xc_push_lock);
1513	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
1514		empty = true;
1515	spin_unlock(&cil->xc_push_lock);
1516	return empty;
1517}
1518
1519/*
1520 * If there are intent done items in this transaction and the related intent was
1521 * committed in the current (same) CIL checkpoint, we don't need to write either
1522 * the intent or intent done item to the journal as the change will be
1523 * journalled atomically within this checkpoint. As we cannot remove items from
1524 * the CIL here, mark the related intent with a whiteout so that the CIL push
1525 * can remove it rather than writing it to the journal. Then remove the intent
1526 * done item from the current transaction and release it so it doesn't get put
1527 * into the CIL at all.
1528 */
1529static uint32_t
1530xlog_cil_process_intents(
1531	struct xfs_cil		*cil,
1532	struct xfs_trans	*tp)
1533{
1534	struct xfs_log_item	*lip, *ilip, *next;
1535	uint32_t		len = 0;
1536
1537	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1538		if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE))
1539			continue;
1540
1541		ilip = lip->li_ops->iop_intent(lip);
1542		if (!ilip || !xlog_item_in_current_chkpt(cil, ilip))
1543			continue;
1544		set_bit(XFS_LI_WHITEOUT, &ilip->li_flags);
1545		trace_xfs_cil_whiteout_mark(ilip);
1546		len += ilip->li_lv->lv_bytes;
1547		kvfree(ilip->li_lv);
1548		ilip->li_lv = NULL;
1549
1550		xfs_trans_del_item(lip);
1551		lip->li_ops->iop_release(lip);
1552	}
1553	return len;
1554}
1555
1556/*
1557 * Commit a transaction with the given vector to the Committed Item List.
1558 *
1559 * To do this, we need to format the item, pin it in memory if required and
1560 * account for the space used by the transaction. Once we have done that we
1561 * need to release the unused reservation for the transaction, attach the
1562 * transaction to the checkpoint context so we carry the busy extents through
1563 * to checkpoint completion, and then unlock all the items in the transaction.
1564 *
1565 * Called with the context lock already held in read mode to lock out
1566 * background commit, returns without it held once background commits are
1567 * allowed again.
1568 */
1569void
1570xlog_cil_commit(
1571	struct xlog		*log,
1572	struct xfs_trans	*tp,
1573	xfs_csn_t		*commit_seq,
1574	bool			regrant)
1575{
1576	struct xfs_cil		*cil = log->l_cilp;
1577	struct xfs_log_item	*lip, *next;
1578	uint32_t		released_space = 0;
1579
1580	/*
1581	 * Do all necessary memory allocation before we lock the CIL.
1582	 * This ensures the allocation does not deadlock with a CIL
1583	 * push in memory reclaim (e.g. from kswapd).
1584	 */
1585	xlog_cil_alloc_shadow_bufs(log, tp);
1586
1587	/* lock out background commit */
1588	down_read(&cil->xc_ctx_lock);
1589
1590	if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE)
1591		released_space = xlog_cil_process_intents(cil, tp);
1592
1593	xlog_cil_insert_items(log, tp, released_space);
1594
1595	if (regrant && !xlog_is_shutdown(log))
1596		xfs_log_ticket_regrant(log, tp->t_ticket);
1597	else
1598		xfs_log_ticket_ungrant(log, tp->t_ticket);
1599	tp->t_ticket = NULL;
1600	xfs_trans_unreserve_and_mod_sb(tp);
1601
1602	/*
1603	 * Once all the items of the transaction have been copied to the CIL,
1604	 * the items can be unlocked and possibly freed.
1605	 *
1606	 * This needs to be done before we drop the CIL context lock because we
1607	 * have to update state in the log items and unlock them before they go
1608	 * to disk. If we don't, then the CIL checkpoint can race with us and
1609	 * we can run checkpoint completion before we've updated and unlocked
1610	 * the log items. This affects (at least) processing of stale buffers,
1611	 * inodes and EFIs.
1612	 */
1613	trace_xfs_trans_commit_items(tp, _RET_IP_);
1614	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1615		xfs_trans_del_item(lip);
1616		if (lip->li_ops->iop_committing)
1617			lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1618	}
1619	if (commit_seq)
1620		*commit_seq = cil->xc_ctx->sequence;
1621
1622	/* xlog_cil_push_background() releases cil->xc_ctx_lock */
1623	xlog_cil_push_background(log);
1624}
1625
1626/*
1627 * Flush the CIL to stable storage but don't wait for it to complete. This
1628 * requires the CIL push to ensure the commit record for the push hits the disk,
1629 * but otherwise is no different to a push done from a log force.
1630 */
1631void
1632xlog_cil_flush(
1633	struct xlog	*log)
1634{
1635	xfs_csn_t	seq = log->l_cilp->xc_current_sequence;
1636
1637	trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
1638	xlog_cil_push_now(log, seq, true);
1639
1640	/*
1641	 * If the CIL is empty, make sure that any previous checkpoint that may
1642	 * still be in an active iclog is pushed to stable storage.
1643	 */
1644	if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
1645		xfs_log_force(log->l_mp, 0);
1646}
1647
1648/*
1649 * Conditionally push the CIL based on the sequence passed in.
1650 *
1651 * We only need to push if we haven't already pushed the sequence number given.
1652 * Hence the only time we will trigger a push here is if the push sequence is
1653 * the same as the current context.
1654 *
1655 * We return the current commit lsn to allow the callers to determine if a
1656 * iclog flush is necessary following this call.
1657 */
1658xfs_lsn_t
1659xlog_cil_force_seq(
1660	struct xlog	*log,
1661	xfs_csn_t	sequence)
1662{
1663	struct xfs_cil		*cil = log->l_cilp;
1664	struct xfs_cil_ctx	*ctx;
1665	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
1666
1667	ASSERT(sequence <= cil->xc_current_sequence);
1668
1669	if (!sequence)
1670		sequence = cil->xc_current_sequence;
1671	trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
1672
1673	/*
1674	 * check to see if we need to force out the current context.
1675	 * xlog_cil_push() handles racing pushes for the same sequence,
1676	 * so no need to deal with it here.
1677	 */
1678restart:
1679	xlog_cil_push_now(log, sequence, false);
1680
1681	/*
1682	 * See if we can find a previous sequence still committing.
1683	 * We need to wait for all previous sequence commits to complete
1684	 * before allowing the force of push_seq to go ahead. Hence block
1685	 * on commits for those as well.
1686	 */
1687	spin_lock(&cil->xc_push_lock);
1688	list_for_each_entry(ctx, &cil->xc_committing, committing) {
1689		/*
1690		 * Avoid getting stuck in this loop because we were woken by the
1691		 * shutdown, but then went back to sleep once already in the
1692		 * shutdown state.
1693		 */
1694		if (xlog_is_shutdown(log))
1695			goto out_shutdown;
1696		if (ctx->sequence > sequence)
1697			continue;
1698		if (!ctx->commit_lsn) {
1699			/*
1700			 * It is still being pushed! Wait for the push to
1701			 * complete, then start again from the beginning.
1702			 */
1703			XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
1704			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1705			goto restart;
1706		}
1707		if (ctx->sequence != sequence)
1708			continue;
1709		/* found it! */
1710		commit_lsn = ctx->commit_lsn;
1711	}
1712
1713	/*
1714	 * The call to xlog_cil_push_now() executes the push in the background.
1715	 * Hence by the time we have got here it our sequence may not have been
1716	 * pushed yet. This is true if the current sequence still matches the
1717	 * push sequence after the above wait loop and the CIL still contains
1718	 * dirty objects. This is guaranteed by the push code first adding the
1719	 * context to the committing list before emptying the CIL.
1720	 *
1721	 * Hence if we don't find the context in the committing list and the
1722	 * current sequence number is unchanged then the CIL contents are
1723	 * significant.  If the CIL is empty, if means there was nothing to push
1724	 * and that means there is nothing to wait for. If the CIL is not empty,
1725	 * it means we haven't yet started the push, because if it had started
1726	 * we would have found the context on the committing list.
1727	 */
1728	if (sequence == cil->xc_current_sequence &&
1729	    !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1730		spin_unlock(&cil->xc_push_lock);
1731		goto restart;
1732	}
1733
1734	spin_unlock(&cil->xc_push_lock);
1735	return commit_lsn;
1736
1737	/*
1738	 * We detected a shutdown in progress. We need to trigger the log force
1739	 * to pass through it's iclog state machine error handling, even though
1740	 * we are already in a shutdown state. Hence we can't return
1741	 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1742	 * LSN is already stable), so we return a zero LSN instead.
1743	 */
1744out_shutdown:
1745	spin_unlock(&cil->xc_push_lock);
1746	return 0;
1747}
1748
1749/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1750 * Perform initial CIL structure initialisation.
1751 */
1752int
1753xlog_cil_init(
1754	struct xlog		*log)
1755{
1756	struct xfs_cil		*cil;
1757	struct xfs_cil_ctx	*ctx;
1758	struct xlog_cil_pcp	*cilpcp;
1759	int			cpu;
1760
1761	cil = kzalloc(sizeof(*cil), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1762	if (!cil)
1763		return -ENOMEM;
1764	/*
1765	 * Limit the CIL pipeline depth to 4 concurrent works to bound the
1766	 * concurrency the log spinlocks will be exposed to.
1767	 */
1768	cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
1769			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
1770			4, log->l_mp->m_super->s_id);
1771	if (!cil->xc_push_wq)
1772		goto out_destroy_cil;
1773
1774	cil->xc_log = log;
1775	cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
1776	if (!cil->xc_pcp)
1777		goto out_destroy_wq;
1778
1779	for_each_possible_cpu(cpu) {
1780		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1781		INIT_LIST_HEAD(&cilpcp->busy_extents);
1782		INIT_LIST_HEAD(&cilpcp->log_items);
1783	}
1784
 
 
1785	INIT_LIST_HEAD(&cil->xc_committing);
 
1786	spin_lock_init(&cil->xc_push_lock);
1787	init_waitqueue_head(&cil->xc_push_wait);
1788	init_rwsem(&cil->xc_ctx_lock);
1789	init_waitqueue_head(&cil->xc_start_wait);
1790	init_waitqueue_head(&cil->xc_commit_wait);
1791	log->l_cilp = cil;
1792
1793	ctx = xlog_cil_ctx_alloc();
1794	xlog_cil_ctx_switch(cil, ctx);
1795	return 0;
 
 
 
1796
1797out_destroy_wq:
1798	destroy_workqueue(cil->xc_push_wq);
1799out_destroy_cil:
1800	kfree(cil);
1801	return -ENOMEM;
1802}
1803
1804void
1805xlog_cil_destroy(
1806	struct xlog	*log)
1807{
1808	struct xfs_cil	*cil = log->l_cilp;
1809
1810	if (cil->xc_ctx) {
1811		if (cil->xc_ctx->ticket)
1812			xfs_log_ticket_put(cil->xc_ctx->ticket);
1813		kfree(cil->xc_ctx);
1814	}
1815
1816	ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1817	free_percpu(cil->xc_pcp);
1818	destroy_workqueue(cil->xc_push_wq);
1819	kfree(cil);
1820}
1821
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
   4 */
   5
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_format.h"
   9#include "xfs_log_format.h"
  10#include "xfs_shared.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
  13#include "xfs_extent_busy.h"
  14#include "xfs_trans.h"
  15#include "xfs_trans_priv.h"
  16#include "xfs_log.h"
  17#include "xfs_log_priv.h"
  18#include "xfs_trace.h"
  19
  20struct workqueue_struct *xfs_discard_wq;
  21
  22/*
  23 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
  24 * recover, so we don't allow failure here. Also, we allocate in a context that
  25 * we don't want to be issuing transactions from, so we need to tell the
  26 * allocation code this as well.
  27 *
  28 * We don't reserve any space for the ticket - we are going to steal whatever
  29 * space we require from transactions as they commit. To ensure we reserve all
  30 * the space required, we need to set the current reservation of the ticket to
  31 * zero so that we know to steal the initial transaction overhead from the
  32 * first transaction commit.
  33 */
  34static struct xlog_ticket *
  35xlog_cil_ticket_alloc(
  36	struct xlog	*log)
  37{
  38	struct xlog_ticket *tic;
  39
  40	tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0);
  41
  42	/*
  43	 * set the current reservation to zero so we know to steal the basic
  44	 * transaction overhead reservation from the first transaction commit.
  45	 */
  46	tic->t_curr_res = 0;
 
  47	return tic;
  48}
  49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50/*
  51 * After the first stage of log recovery is done, we know where the head and
  52 * tail of the log are. We need this log initialisation done before we can
  53 * initialise the first CIL checkpoint context.
  54 *
  55 * Here we allocate a log ticket to track space usage during a CIL push.  This
  56 * ticket is passed to xlog_write() directly so that we don't slowly leak log
  57 * space by failing to account for space used by log headers and additional
  58 * region headers for split regions.
  59 */
  60void
  61xlog_cil_init_post_recovery(
  62	struct xlog	*log)
  63{
  64	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
  65	log->l_cilp->xc_ctx->sequence = 1;
 
  66}
  67
  68static inline int
  69xlog_cil_iovec_space(
  70	uint	niovecs)
  71{
  72	return round_up((sizeof(struct xfs_log_vec) +
  73					niovecs * sizeof(struct xfs_log_iovec)),
  74			sizeof(uint64_t));
  75}
  76
  77/*
  78 * Allocate or pin log vector buffers for CIL insertion.
  79 *
  80 * The CIL currently uses disposable buffers for copying a snapshot of the
  81 * modified items into the log during a push. The biggest problem with this is
  82 * the requirement to allocate the disposable buffer during the commit if:
  83 *	a) does not exist; or
  84 *	b) it is too small
  85 *
  86 * If we do this allocation within xlog_cil_insert_format_items(), it is done
  87 * under the xc_ctx_lock, which means that a CIL push cannot occur during
  88 * the memory allocation. This means that we have a potential deadlock situation
  89 * under low memory conditions when we have lots of dirty metadata pinned in
  90 * the CIL and we need a CIL commit to occur to free memory.
  91 *
  92 * To avoid this, we need to move the memory allocation outside the
  93 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
  94 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
  95 * vector buffers between the check and the formatting of the item into the
  96 * log vector buffer within the xc_ctx_lock.
  97 *
  98 * Because the log vector buffer needs to be unchanged during the CIL push
  99 * process, we cannot share the buffer between the transaction commit (which
 100 * modifies the buffer) and the CIL push context that is writing the changes
 101 * into the log. This means skipping preallocation of buffer space is
 102 * unreliable, but we most definitely do not want to be allocating and freeing
 103 * buffers unnecessarily during commits when overwrites can be done safely.
 104 *
 105 * The simplest solution to this problem is to allocate a shadow buffer when a
 106 * log item is committed for the second time, and then to only use this buffer
 107 * if necessary. The buffer can remain attached to the log item until such time
 108 * it is needed, and this is the buffer that is reallocated to match the size of
 109 * the incoming modification. Then during the formatting of the item we can swap
 110 * the active buffer with the new one if we can't reuse the existing buffer. We
 111 * don't free the old buffer as it may be reused on the next modification if
 112 * it's size is right, otherwise we'll free and reallocate it at that point.
 113 *
 114 * This function builds a vector for the changes in each log item in the
 115 * transaction. It then works out the length of the buffer needed for each log
 116 * item, allocates them and attaches the vector to the log item in preparation
 117 * for the formatting step which occurs under the xc_ctx_lock.
 118 *
 119 * While this means the memory footprint goes up, it avoids the repeated
 120 * alloc/free pattern that repeated modifications of an item would otherwise
 121 * cause, and hence minimises the CPU overhead of such behaviour.
 122 */
 123static void
 124xlog_cil_alloc_shadow_bufs(
 125	struct xlog		*log,
 126	struct xfs_trans	*tp)
 127{
 128	struct xfs_log_item	*lip;
 129
 130	list_for_each_entry(lip, &tp->t_items, li_trans) {
 131		struct xfs_log_vec *lv;
 132		int	niovecs = 0;
 133		int	nbytes = 0;
 134		int	buf_size;
 135		bool	ordered = false;
 136
 137		/* Skip items which aren't dirty in this transaction. */
 138		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 139			continue;
 140
 141		/* get number of vecs and size of data to be stored */
 142		lip->li_ops->iop_size(lip, &niovecs, &nbytes);
 143
 144		/*
 145		 * Ordered items need to be tracked but we do not wish to write
 146		 * them. We need a logvec to track the object, but we do not
 147		 * need an iovec or buffer to be allocated for copying data.
 148		 */
 149		if (niovecs == XFS_LOG_VEC_ORDERED) {
 150			ordered = true;
 151			niovecs = 0;
 152			nbytes = 0;
 153		}
 154
 155		/*
 156		 * We 64-bit align the length of each iovec so that the start
 157		 * of the next one is naturally aligned.  We'll need to
 158		 * account for that slack space here. Then round nbytes up
 159		 * to 64-bit alignment so that the initial buffer alignment is
 160		 * easy to calculate and verify.
 
 
 
 
 
 
 161		 */
 162		nbytes += niovecs * sizeof(uint64_t);
 
 163		nbytes = round_up(nbytes, sizeof(uint64_t));
 164
 165		/*
 166		 * The data buffer needs to start 64-bit aligned, so round up
 167		 * that space to ensure we can align it appropriately and not
 168		 * overrun the buffer.
 169		 */
 170		buf_size = nbytes + xlog_cil_iovec_space(niovecs);
 171
 172		/*
 173		 * if we have no shadow buffer, or it is too small, we need to
 174		 * reallocate it.
 175		 */
 176		if (!lip->li_lv_shadow ||
 177		    buf_size > lip->li_lv_shadow->lv_size) {
 178
 179			/*
 180			 * We free and allocate here as a realloc would copy
 181			 * unnecessary data. We don't use kmem_zalloc() for the
 182			 * same reason - we don't need to zero the data area in
 183			 * the buffer, only the log vector header and the iovec
 184			 * storage.
 185			 */
 186			kmem_free(lip->li_lv_shadow);
 
 187
 188			lv = kmem_alloc_large(buf_size, KM_NOFS);
 189			memset(lv, 0, xlog_cil_iovec_space(niovecs));
 190
 
 191			lv->lv_item = lip;
 192			lv->lv_size = buf_size;
 193			if (ordered)
 194				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 195			else
 196				lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
 197			lip->li_lv_shadow = lv;
 198		} else {
 199			/* same or smaller, optimise common overwrite case */
 200			lv = lip->li_lv_shadow;
 201			if (ordered)
 202				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 203			else
 204				lv->lv_buf_len = 0;
 205			lv->lv_bytes = 0;
 206			lv->lv_next = NULL;
 207		}
 208
 209		/* Ensure the lv is set up according to ->iop_size */
 210		lv->lv_niovecs = niovecs;
 211
 212		/* The allocated data region lies beyond the iovec region */
 213		lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
 214	}
 215
 216}
 217
 218/*
 219 * Prepare the log item for insertion into the CIL. Calculate the difference in
 220 * log space and vectors it will consume, and if it is a new item pin it as
 221 * well.
 222 */
 223STATIC void
 224xfs_cil_prepare_item(
 225	struct xlog		*log,
 226	struct xfs_log_vec	*lv,
 227	struct xfs_log_vec	*old_lv,
 228	int			*diff_len,
 229	int			*diff_iovecs)
 230{
 231	/* Account for the new LV being passed in */
 232	if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
 233		*diff_len += lv->lv_bytes;
 234		*diff_iovecs += lv->lv_niovecs;
 235	}
 236
 237	/*
 238	 * If there is no old LV, this is the first time we've seen the item in
 239	 * this CIL context and so we need to pin it. If we are replacing the
 240	 * old_lv, then remove the space it accounts for and make it the shadow
 241	 * buffer for later freeing. In both cases we are now switching to the
 242	 * shadow buffer, so update the pointer to it appropriately.
 243	 */
 244	if (!old_lv) {
 245		if (lv->lv_item->li_ops->iop_pin)
 246			lv->lv_item->li_ops->iop_pin(lv->lv_item);
 247		lv->lv_item->li_lv_shadow = NULL;
 248	} else if (old_lv != lv) {
 249		ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
 250
 251		*diff_len -= old_lv->lv_bytes;
 252		*diff_iovecs -= old_lv->lv_niovecs;
 253		lv->lv_item->li_lv_shadow = old_lv;
 254	}
 255
 256	/* attach new log vector to log item */
 257	lv->lv_item->li_lv = lv;
 258
 259	/*
 260	 * If this is the first time the item is being committed to the
 261	 * CIL, store the sequence number on the log item so we can
 262	 * tell in future commits whether this is the first checkpoint
 263	 * the item is being committed into.
 264	 */
 265	if (!lv->lv_item->li_seq)
 266		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
 267}
 268
 269/*
 270 * Format log item into a flat buffers
 271 *
 272 * For delayed logging, we need to hold a formatted buffer containing all the
 273 * changes on the log item. This enables us to relog the item in memory and
 274 * write it out asynchronously without needing to relock the object that was
 275 * modified at the time it gets written into the iclog.
 276 *
 277 * This function takes the prepared log vectors attached to each log item, and
 278 * formats the changes into the log vector buffer. The buffer it uses is
 279 * dependent on the current state of the vector in the CIL - the shadow lv is
 280 * guaranteed to be large enough for the current modification, but we will only
 281 * use that if we can't reuse the existing lv. If we can't reuse the existing
 282 * lv, then simple swap it out for the shadow lv. We don't free it - that is
 283 * done lazily either by th enext modification or the freeing of the log item.
 284 *
 285 * We don't set up region headers during this process; we simply copy the
 286 * regions into the flat buffer. We can do this because we still have to do a
 287 * formatting step to write the regions into the iclog buffer.  Writing the
 288 * ophdrs during the iclog write means that we can support splitting large
 289 * regions across iclog boundares without needing a change in the format of the
 290 * item/region encapsulation.
 291 *
 292 * Hence what we need to do now is change the rewrite the vector array to point
 293 * to the copied region inside the buffer we just allocated. This allows us to
 294 * format the regions into the iclog as though they are being formatted
 295 * directly out of the objects themselves.
 296 */
 297static void
 298xlog_cil_insert_format_items(
 299	struct xlog		*log,
 300	struct xfs_trans	*tp,
 301	int			*diff_len,
 302	int			*diff_iovecs)
 303{
 304	struct xfs_log_item	*lip;
 305
 306
 307	/* Bail out if we didn't find a log item.  */
 308	if (list_empty(&tp->t_items)) {
 309		ASSERT(0);
 310		return;
 311	}
 312
 313	list_for_each_entry(lip, &tp->t_items, li_trans) {
 314		struct xfs_log_vec *lv;
 315		struct xfs_log_vec *old_lv = NULL;
 316		struct xfs_log_vec *shadow;
 317		bool	ordered = false;
 318
 319		/* Skip items which aren't dirty in this transaction. */
 320		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 321			continue;
 322
 323		/*
 324		 * The formatting size information is already attached to
 325		 * the shadow lv on the log item.
 326		 */
 327		shadow = lip->li_lv_shadow;
 328		if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
 329			ordered = true;
 330
 331		/* Skip items that do not have any vectors for writing */
 332		if (!shadow->lv_niovecs && !ordered)
 333			continue;
 334
 335		/* compare to existing item size */
 336		old_lv = lip->li_lv;
 337		if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
 338			/* same or smaller, optimise common overwrite case */
 339			lv = lip->li_lv;
 340			lv->lv_next = NULL;
 341
 342			if (ordered)
 343				goto insert;
 344
 345			/*
 346			 * set the item up as though it is a new insertion so
 347			 * that the space reservation accounting is correct.
 348			 */
 349			*diff_iovecs -= lv->lv_niovecs;
 350			*diff_len -= lv->lv_bytes;
 351
 352			/* Ensure the lv is set up according to ->iop_size */
 353			lv->lv_niovecs = shadow->lv_niovecs;
 354
 355			/* reset the lv buffer information for new formatting */
 356			lv->lv_buf_len = 0;
 357			lv->lv_bytes = 0;
 358			lv->lv_buf = (char *)lv +
 359					xlog_cil_iovec_space(lv->lv_niovecs);
 360		} else {
 361			/* switch to shadow buffer! */
 362			lv = shadow;
 363			lv->lv_item = lip;
 364			if (ordered) {
 365				/* track as an ordered logvec */
 366				ASSERT(lip->li_lv == NULL);
 367				goto insert;
 368			}
 369		}
 370
 371		ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
 372		lip->li_ops->iop_format(lip, lv);
 373insert:
 374		xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
 375	}
 376}
 377
 378/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 379 * Insert the log items into the CIL and calculate the difference in space
 380 * consumed by the item. Add the space to the checkpoint ticket and calculate
 381 * if the change requires additional log metadata. If it does, take that space
 382 * as well. Remove the amount of space we added to the checkpoint ticket from
 383 * the current transaction ticket so that the accounting works out correctly.
 384 */
 385static void
 386xlog_cil_insert_items(
 387	struct xlog		*log,
 388	struct xfs_trans	*tp)
 
 389{
 390	struct xfs_cil		*cil = log->l_cilp;
 391	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
 392	struct xfs_log_item	*lip;
 393	int			len = 0;
 394	int			diff_iovecs = 0;
 395	int			iclog_space;
 396	int			iovhdr_res = 0, split_res = 0, ctx_res = 0;
 
 
 
 
 397
 398	ASSERT(tp);
 399
 400	/*
 401	 * We can do this safely because the context can't checkpoint until we
 402	 * are done so it doesn't matter exactly how we update the CIL.
 403	 */
 404	xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 405
 406	spin_lock(&cil->xc_cil_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407
 408	/* account for space used by new iovec headers  */
 409	iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t);
 410	len += iovhdr_res;
 411	ctx->nvecs += diff_iovecs;
 
 
 
 
 
 
 
 412
 
 
 
 
 
 
 
 
 
 413	/* attach the transaction to the CIL if it has any busy extents */
 414	if (!list_empty(&tp->t_busy))
 415		list_splice_init(&tp->t_busy, &ctx->busy_extents);
 416
 417	/*
 418	 * Now transfer enough transaction reservation to the context ticket
 419	 * for the checkpoint. The context ticket is special - the unit
 420	 * reservation has to grow as well as the current reservation as we
 421	 * steal from tickets so we can correctly determine the space used
 422	 * during the transaction commit.
 423	 */
 424	if (ctx->ticket->t_curr_res == 0) {
 425		ctx_res = ctx->ticket->t_unit_res;
 426		ctx->ticket->t_curr_res = ctx_res;
 427		tp->t_ticket->t_curr_res -= ctx_res;
 428	}
 429
 430	/* do we need space for more log record headers? */
 431	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
 432	if (len > 0 && (ctx->space_used / iclog_space !=
 433				(ctx->space_used + len) / iclog_space)) {
 434		split_res = (len + iclog_space - 1) / iclog_space;
 435		/* need to take into account split region headers, too */
 436		split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
 437		ctx->ticket->t_unit_res += split_res;
 438		ctx->ticket->t_curr_res += split_res;
 439		tp->t_ticket->t_curr_res -= split_res;
 440		ASSERT(tp->t_ticket->t_curr_res >= len);
 441	}
 442	tp->t_ticket->t_curr_res -= len;
 443	ctx->space_used += len;
 444
 445	/*
 446	 * If we've overrun the reservation, dump the tx details before we move
 447	 * the log items. Shutdown is imminent...
 448	 */
 
 449	if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
 450		xfs_warn(log->l_mp, "Transaction log reservation overrun:");
 451		xfs_warn(log->l_mp,
 452			 "  log items: %d bytes (iov hdrs: %d bytes)",
 453			 len, iovhdr_res);
 454		xfs_warn(log->l_mp, "  split region headers: %d bytes",
 455			 split_res);
 456		xfs_warn(log->l_mp, "  ctx ticket: %d bytes", ctx_res);
 457		xlog_print_trans(tp);
 
 458	}
 459
 460	/*
 461	 * Now (re-)position everything modified at the tail of the CIL.
 462	 * We do this here so we only need to take the CIL lock once during
 463	 * the transaction commit.
 464	 */
 465	list_for_each_entry(lip, &tp->t_items, li_trans) {
 466
 467		/* Skip items which aren't dirty in this transaction. */
 468		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 469			continue;
 470
 471		/*
 472		 * Only move the item if it isn't already at the tail. This is
 473		 * to prevent a transient list_empty() state when reinserting
 474		 * an item that is already the only item in the CIL.
 475		 */
 476		if (!list_is_last(&lip->li_cil, &cil->xc_cil))
 477			list_move_tail(&lip->li_cil, &cil->xc_cil);
 478	}
 479
 480	spin_unlock(&cil->xc_cil_lock);
 481
 482	if (tp->t_ticket->t_curr_res < 0)
 483		xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
 484}
 485
 486static void
 487xlog_cil_free_logvec(
 488	struct xfs_log_vec	*log_vector)
 489{
 490	struct xfs_log_vec	*lv;
 491
 492	for (lv = log_vector; lv; ) {
 493		struct xfs_log_vec *next = lv->lv_next;
 494		kmem_free(lv);
 495		lv = next;
 496	}
 497}
 498
 499static void
 500xlog_discard_endio_work(
 501	struct work_struct	*work)
 502{
 503	struct xfs_cil_ctx	*ctx =
 504		container_of(work, struct xfs_cil_ctx, discard_endio_work);
 505	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
 506
 507	xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
 508	kmem_free(ctx);
 509}
 510
 511/*
 512 * Queue up the actual completion to a thread to avoid IRQ-safe locking for
 513 * pagb_lock.  Note that we need a unbounded workqueue, otherwise we might
 514 * get the execution delayed up to 30 seconds for weird reasons.
 515 */
 516static void
 517xlog_discard_endio(
 518	struct bio		*bio)
 519{
 520	struct xfs_cil_ctx	*ctx = bio->bi_private;
 521
 522	INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
 523	queue_work(xfs_discard_wq, &ctx->discard_endio_work);
 524	bio_put(bio);
 525}
 526
 527static void
 528xlog_discard_busy_extents(
 529	struct xfs_mount	*mp,
 530	struct xfs_cil_ctx	*ctx)
 531{
 532	struct list_head	*list = &ctx->busy_extents;
 533	struct xfs_extent_busy	*busyp;
 534	struct bio		*bio = NULL;
 535	struct blk_plug		plug;
 536	int			error = 0;
 537
 538	ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
 539
 540	blk_start_plug(&plug);
 541	list_for_each_entry(busyp, list, list) {
 542		trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
 543					 busyp->length);
 544
 545		error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
 546				XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
 547				XFS_FSB_TO_BB(mp, busyp->length),
 548				GFP_NOFS, 0, &bio);
 549		if (error && error != -EOPNOTSUPP) {
 550			xfs_info(mp,
 551	 "discard failed for extent [0x%llx,%u], error %d",
 552				 (unsigned long long)busyp->bno,
 553				 busyp->length,
 554				 error);
 555			break;
 556		}
 557	}
 558
 559	if (bio) {
 560		bio->bi_private = ctx;
 561		bio->bi_end_io = xlog_discard_endio;
 562		submit_bio(bio);
 563	} else {
 564		xlog_discard_endio_work(&ctx->discard_endio_work);
 565	}
 566	blk_finish_plug(&plug);
 567}
 568
 569/*
 570 * Mark all items committed and clear busy extents. We free the log vector
 571 * chains in a separate pass so that we unpin the log items as quickly as
 572 * possible.
 573 */
 574static void
 575xlog_cil_committed(
 576	struct xfs_cil_ctx	*ctx)
 577{
 578	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
 579	bool			abort = XLOG_FORCED_SHUTDOWN(ctx->cil->xc_log);
 580
 581	/*
 582	 * If the I/O failed, we're aborting the commit and already shutdown.
 583	 * Wake any commit waiters before aborting the log items so we don't
 584	 * block async log pushers on callbacks. Async log pushers explicitly do
 585	 * not wait on log force completion because they may be holding locks
 586	 * required to unpin items.
 587	 */
 588	if (abort) {
 589		spin_lock(&ctx->cil->xc_push_lock);
 
 590		wake_up_all(&ctx->cil->xc_commit_wait);
 591		spin_unlock(&ctx->cil->xc_push_lock);
 592	}
 593
 594	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
 595					ctx->start_lsn, abort);
 596
 597	xfs_extent_busy_sort(&ctx->busy_extents);
 598	xfs_extent_busy_clear(mp, &ctx->busy_extents,
 599			     (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
 600
 601	spin_lock(&ctx->cil->xc_push_lock);
 602	list_del(&ctx->committing);
 603	spin_unlock(&ctx->cil->xc_push_lock);
 604
 605	xlog_cil_free_logvec(ctx->lv_chain);
 606
 607	if (!list_empty(&ctx->busy_extents))
 608		xlog_discard_busy_extents(mp, ctx);
 609	else
 610		kmem_free(ctx);
 
 
 
 
 611}
 612
 613void
 614xlog_cil_process_committed(
 615	struct list_head	*list)
 616{
 617	struct xfs_cil_ctx	*ctx;
 618
 619	while ((ctx = list_first_entry_or_null(list,
 620			struct xfs_cil_ctx, iclog_entry))) {
 621		list_del(&ctx->iclog_entry);
 622		xlog_cil_committed(ctx);
 623	}
 624}
 625
 626/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627 * Push the Committed Item List to the log.
 628 *
 629 * If the current sequence is the same as xc_push_seq we need to do a flush. If
 630 * xc_push_seq is less than the current sequence, then it has already been
 631 * flushed and we don't need to do anything - the caller will wait for it to
 632 * complete if necessary.
 633 *
 634 * xc_push_seq is checked unlocked against the sequence number for a match.
 635 * Hence we can allow log forces to run racily and not issue pushes for the
 636 * same sequence twice.  If we get a race between multiple pushes for the same
 637 * sequence they will block on the first one and then abort, hence avoiding
 638 * needless pushes.
 
 
 
 
 
 
 639 */
 640static void
 641xlog_cil_push_work(
 642	struct work_struct	*work)
 643{
 644	struct xfs_cil		*cil =
 645		container_of(work, struct xfs_cil, xc_push_work);
 
 
 646	struct xlog		*log = cil->xc_log;
 647	struct xfs_log_vec	*lv;
 648	struct xfs_cil_ctx	*ctx;
 649	struct xfs_cil_ctx	*new_ctx;
 650	struct xlog_in_core	*commit_iclog;
 651	struct xlog_ticket	*tic;
 652	int			num_iovecs;
 653	int			error = 0;
 654	struct xfs_trans_header thdr;
 655	struct xfs_log_iovec	lhdr;
 656	struct xfs_log_vec	lvhdr = { NULL };
 657	xfs_lsn_t		preflush_tail_lsn;
 658	xfs_lsn_t		commit_lsn;
 659	xfs_csn_t		push_seq;
 660	struct bio		bio;
 661	DECLARE_COMPLETION_ONSTACK(bdev_flush);
 
 662
 663	new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
 664	new_ctx->ticket = xlog_cil_ticket_alloc(log);
 665
 666	down_write(&cil->xc_ctx_lock);
 667	ctx = cil->xc_ctx;
 668
 669	spin_lock(&cil->xc_push_lock);
 670	push_seq = cil->xc_push_seq;
 671	ASSERT(push_seq <= ctx->sequence);
 
 
 672
 673	/*
 674	 * As we are about to switch to a new, empty CIL context, we no longer
 675	 * need to throttle tasks on CIL space overruns. Wake any waiters that
 676	 * the hard push throttle may have caught so they can start committing
 677	 * to the new context. The ctx->xc_push_lock provides the serialisation
 678	 * necessary for safely using the lockless waitqueue_active() check in
 679	 * this context.
 680	 */
 681	if (waitqueue_active(&cil->xc_push_wait))
 682		wake_up_all(&cil->xc_push_wait);
 683
 
 
 684	/*
 685	 * Check if we've anything to push. If there is nothing, then we don't
 686	 * move on to a new sequence number and so we have to be able to push
 687	 * this sequence again later.
 688	 */
 689	if (list_empty(&cil->xc_cil)) {
 690		cil->xc_push_seq = 0;
 691		spin_unlock(&cil->xc_push_lock);
 692		goto out_skip;
 693	}
 694
 695
 696	/* check for a previously pushed sequence */
 697	if (push_seq < cil->xc_ctx->sequence) {
 698		spin_unlock(&cil->xc_push_lock);
 699		goto out_skip;
 700	}
 701
 702	/*
 703	 * We are now going to push this context, so add it to the committing
 704	 * list before we do anything else. This ensures that anyone waiting on
 705	 * this push can easily detect the difference between a "push in
 706	 * progress" and "CIL is empty, nothing to do".
 707	 *
 708	 * IOWs, a wait loop can now check for:
 709	 *	the current sequence not being found on the committing list;
 710	 *	an empty CIL; and
 711	 *	an unchanged sequence number
 712	 * to detect a push that had nothing to do and therefore does not need
 713	 * waiting on. If the CIL is not empty, we get put on the committing
 714	 * list before emptying the CIL and bumping the sequence number. Hence
 715	 * an empty CIL and an unchanged sequence number means we jumped out
 716	 * above after doing nothing.
 717	 *
 718	 * Hence the waiter will either find the commit sequence on the
 719	 * committing list or the sequence number will be unchanged and the CIL
 720	 * still dirty. In that latter case, the push has not yet started, and
 721	 * so the waiter will have to continue trying to check the CIL
 722	 * committing list until it is found. In extreme cases of delay, the
 723	 * sequence may fully commit between the attempts the wait makes to wait
 724	 * on the commit sequence.
 725	 */
 726	list_add(&ctx->committing, &cil->xc_committing);
 727	spin_unlock(&cil->xc_push_lock);
 728
 729	/*
 730	 * The CIL is stable at this point - nothing new will be added to it
 731	 * because we hold the flush lock exclusively. Hence we can now issue
 732	 * a cache flush to ensure all the completed metadata in the journal we
 733	 * are about to overwrite is on stable storage.
 734	 *
 735	 * Because we are issuing this cache flush before we've written the
 736	 * tail lsn to the iclog, we can have metadata IO completions move the
 737	 * tail forwards between the completion of this flush and the iclog
 738	 * being written. In this case, we need to re-issue the cache flush
 739	 * before the iclog write. To detect whether the log tail moves, sample
 740	 * the tail LSN *before* we issue the flush.
 741	 */
 742	preflush_tail_lsn = atomic64_read(&log->l_tail_lsn);
 743	xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
 744				&bdev_flush);
 745
 746	/*
 747	 * Pull all the log vectors off the items in the CIL, and remove the
 748	 * items from the CIL. We don't need the CIL lock here because it's only
 749	 * needed on the transaction commit side which is currently locked out
 750	 * by the flush lock.
 751	 */
 752	lv = NULL;
 753	num_iovecs = 0;
 754	while (!list_empty(&cil->xc_cil)) {
 755		struct xfs_log_item	*item;
 756
 757		item = list_first_entry(&cil->xc_cil,
 758					struct xfs_log_item, li_cil);
 759		list_del_init(&item->li_cil);
 760		if (!ctx->lv_chain)
 761			ctx->lv_chain = item->li_lv;
 762		else
 763			lv->lv_next = item->li_lv;
 764		lv = item->li_lv;
 765		item->li_lv = NULL;
 766		num_iovecs += lv->lv_niovecs;
 767	}
 768
 769	/*
 770	 * initialise the new context and attach it to the CIL. Then attach
 771	 * the current context to the CIL committing list so it can be found
 772	 * during log forces to extract the commit lsn of the sequence that
 773	 * needs to be forced.
 774	 */
 775	INIT_LIST_HEAD(&new_ctx->committing);
 776	INIT_LIST_HEAD(&new_ctx->busy_extents);
 777	new_ctx->sequence = ctx->sequence + 1;
 778	new_ctx->cil = cil;
 779	cil->xc_ctx = new_ctx;
 780
 781	/*
 782	 * The switch is now done, so we can drop the context lock and move out
 783	 * of a shared context. We can't just go straight to the commit record,
 784	 * though - we need to synchronise with previous and future commits so
 785	 * that the commit records are correctly ordered in the log to ensure
 786	 * that we process items during log IO completion in the correct order.
 787	 *
 788	 * For example, if we get an EFI in one checkpoint and the EFD in the
 789	 * next (e.g. due to log forces), we do not want the checkpoint with
 790	 * the EFD to be committed before the checkpoint with the EFI.  Hence
 791	 * we must strictly order the commit records of the checkpoints so
 792	 * that: a) the checkpoint callbacks are attached to the iclogs in the
 793	 * correct order; and b) the checkpoints are replayed in correct order
 794	 * in log recovery.
 795	 *
 796	 * Hence we need to add this context to the committing context list so
 797	 * that higher sequences will wait for us to write out a commit record
 798	 * before they do.
 799	 *
 800	 * xfs_log_force_seq requires us to mirror the new sequence into the cil
 801	 * structure atomically with the addition of this sequence to the
 802	 * committing list. This also ensures that we can do unlocked checks
 803	 * against the current sequence in log forces without risking
 804	 * deferencing a freed context pointer.
 805	 */
 806	spin_lock(&cil->xc_push_lock);
 807	cil->xc_current_sequence = new_ctx->sequence;
 808	spin_unlock(&cil->xc_push_lock);
 809	up_write(&cil->xc_ctx_lock);
 810
 811	/*
 
 
 
 
 
 
 
 812	 * Build a checkpoint transaction header and write it to the log to
 813	 * begin the transaction. We need to account for the space used by the
 814	 * transaction header here as it is not accounted for in xlog_write().
 815	 *
 816	 * The LSN we need to pass to the log items on transaction commit is
 817	 * the LSN reported by the first log vector write. If we use the commit
 818	 * record lsn then we can move the tail beyond the grant write head.
 819	 */
 820	tic = ctx->ticket;
 821	thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
 822	thdr.th_type = XFS_TRANS_CHECKPOINT;
 823	thdr.th_tid = tic->t_tid;
 824	thdr.th_num_items = num_iovecs;
 825	lhdr.i_addr = &thdr;
 826	lhdr.i_len = sizeof(xfs_trans_header_t);
 827	lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
 828	tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
 829
 830	lvhdr.lv_niovecs = 1;
 831	lvhdr.lv_iovecp = &lhdr;
 832	lvhdr.lv_next = ctx->lv_chain;
 833
 834	/*
 835	 * Before we format and submit the first iclog, we have to ensure that
 836	 * the metadata writeback ordering cache flush is complete.
 
 837	 */
 838	wait_for_completion(&bdev_flush);
 839
 840	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL,
 841				XLOG_START_TRANS);
 842	if (error)
 843		goto out_abort_free_ticket;
 844
 845	/*
 846	 * now that we've written the checkpoint into the log, strictly
 847	 * order the commit records so replay will get them in the right order.
 848	 */
 849restart:
 850	spin_lock(&cil->xc_push_lock);
 851	list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
 852		/*
 853		 * Avoid getting stuck in this loop because we were woken by the
 854		 * shutdown, but then went back to sleep once already in the
 855		 * shutdown state.
 856		 */
 857		if (XLOG_FORCED_SHUTDOWN(log)) {
 858			spin_unlock(&cil->xc_push_lock);
 859			goto out_abort_free_ticket;
 860		}
 861
 862		/*
 863		 * Higher sequences will wait for this one so skip them.
 864		 * Don't wait for our own sequence, either.
 865		 */
 866		if (new_ctx->sequence >= ctx->sequence)
 867			continue;
 868		if (!new_ctx->commit_lsn) {
 869			/*
 870			 * It is still being pushed! Wait for the push to
 871			 * complete, then start again from the beginning.
 872			 */
 873			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
 874			goto restart;
 875		}
 876	}
 877	spin_unlock(&cil->xc_push_lock);
 878
 879	error = xlog_commit_record(log, tic, &commit_iclog, &commit_lsn);
 880	if (error)
 881		goto out_abort_free_ticket;
 882
 883	xfs_log_ticket_ungrant(log, tic);
 884
 885	/*
 886	 * Once we attach the ctx to the iclog, a shutdown can process the
 887	 * iclog, run the callbacks and free the ctx. The only thing preventing
 888	 * this potential UAF situation here is that we are holding the
 889	 * icloglock. Hence we cannot access the ctx once we have attached the
 890	 * callbacks and dropped the icloglock.
 891	 */
 892	spin_lock(&log->l_icloglock);
 893	if (commit_iclog->ic_state == XLOG_STATE_IOERROR) {
 894		spin_unlock(&log->l_icloglock);
 895		goto out_abort;
 896	}
 897	ASSERT_ALWAYS(commit_iclog->ic_state == XLOG_STATE_ACTIVE ||
 898		      commit_iclog->ic_state == XLOG_STATE_WANT_SYNC);
 899	list_add_tail(&ctx->iclog_entry, &commit_iclog->ic_callbacks);
 900
 901	/*
 902	 * now the checkpoint commit is complete and we've attached the
 903	 * callbacks to the iclog we can assign the commit LSN to the context
 904	 * and wake up anyone who is waiting for the commit to complete.
 905	 */
 906	spin_lock(&cil->xc_push_lock);
 907	ctx->commit_lsn = commit_lsn;
 908	wake_up_all(&cil->xc_commit_wait);
 909	spin_unlock(&cil->xc_push_lock);
 910
 911	/*
 912	 * If the checkpoint spans multiple iclogs, wait for all previous iclogs
 913	 * to complete before we submit the commit_iclog. We can't use state
 914	 * checks for this - ACTIVE can be either a past completed iclog or a
 915	 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
 916	 * past or future iclog awaiting IO or ordered IO completion to be run.
 917	 * In the latter case, if it's a future iclog and we wait on it, the we
 918	 * will hang because it won't get processed through to ic_force_wait
 919	 * wakeup until this commit_iclog is written to disk.  Hence we use the
 920	 * iclog header lsn and compare it to the commit lsn to determine if we
 921	 * need to wait on iclogs or not.
 922	 *
 923	 * NOTE: It is not safe to reference the ctx after this check as we drop
 924	 * the icloglock if we have to wait for completion of other iclogs.
 925	 */
 926	if (ctx->start_lsn != commit_lsn) {
 
 927		xfs_lsn_t	plsn;
 928
 929		plsn = be64_to_cpu(commit_iclog->ic_prev->ic_header.h_lsn);
 930		if (plsn && XFS_LSN_CMP(plsn, commit_lsn) < 0) {
 931			/*
 932			 * Waiting on ic_force_wait orders the completion of
 933			 * iclogs older than ic_prev. Hence we only need to wait
 934			 * on the most recent older iclog here.
 935			 */
 936			xlog_wait_on_iclog(commit_iclog->ic_prev);
 937			spin_lock(&log->l_icloglock);
 938		}
 939
 940		/*
 941		 * We need to issue a pre-flush so that the ordering for this
 942		 * checkpoint is correctly preserved down to stable storage.
 943		 */
 944		commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
 945	}
 946
 947	/*
 948	 * The commit iclog must be written to stable storage to guarantee
 949	 * journal IO vs metadata writeback IO is correctly ordered on stable
 950	 * storage.
 951	 */
 952	commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
 953	xlog_state_release_iclog(log, commit_iclog, preflush_tail_lsn);
 
 
 
 
 
 
 
 
 
 
 
 
 954	spin_unlock(&log->l_icloglock);
 
 
 
 955	return;
 956
 957out_skip:
 958	up_write(&cil->xc_ctx_lock);
 959	xfs_log_ticket_put(new_ctx->ticket);
 960	kmem_free(new_ctx);
 
 961	return;
 962
 963out_abort_free_ticket:
 964	xfs_log_ticket_ungrant(log, tic);
 965out_abort:
 966	ASSERT(XLOG_FORCED_SHUTDOWN(log));
 967	xlog_cil_committed(ctx);
 
 
 
 
 
 
 
 
 
 
 
 968}
 969
 970/*
 971 * We need to push CIL every so often so we don't cache more than we can fit in
 972 * the log. The limit really is that a checkpoint can't be more than half the
 973 * log (the current checkpoint is not allowed to overwrite the previous
 974 * checkpoint), but commit latency and memory usage limit this to a smaller
 975 * size.
 976 */
 977static void
 978xlog_cil_push_background(
 979	struct xlog	*log) __releases(cil->xc_ctx_lock)
 980{
 981	struct xfs_cil	*cil = log->l_cilp;
 
 982
 983	/*
 984	 * The cil won't be empty because we are called while holding the
 985	 * context lock so whatever we added to the CIL will still be there
 986	 */
 987	ASSERT(!list_empty(&cil->xc_cil));
 988
 989	/*
 990	 * Don't do a background push if we haven't used up all the
 991	 * space available yet.
 
 
 
 
 
 992	 */
 993	if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
 
 
 
 994		up_read(&cil->xc_ctx_lock);
 995		return;
 996	}
 997
 998	spin_lock(&cil->xc_push_lock);
 999	if (cil->xc_push_seq < cil->xc_current_sequence) {
1000		cil->xc_push_seq = cil->xc_current_sequence;
1001		queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
1002	}
1003
1004	/*
1005	 * Drop the context lock now, we can't hold that if we need to sleep
1006	 * because we are over the blocking threshold. The push_lock is still
1007	 * held, so blocking threshold sleep/wakeup is still correctly
1008	 * serialised here.
1009	 */
1010	up_read(&cil->xc_ctx_lock);
1011
1012	/*
1013	 * If we are well over the space limit, throttle the work that is being
1014	 * done until the push work on this context has begun. Enforce the hard
1015	 * throttle on all transaction commits once it has been activated, even
1016	 * if the committing transactions have resulted in the space usage
1017	 * dipping back down under the hard limit.
1018	 *
1019	 * The ctx->xc_push_lock provides the serialisation necessary for safely
1020	 * using the lockless waitqueue_active() check in this context.
1021	 */
1022	if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) ||
1023	    waitqueue_active(&cil->xc_push_wait)) {
1024		trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1025		ASSERT(cil->xc_ctx->space_used < log->l_logsize);
1026		xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1027		return;
1028	}
1029
1030	spin_unlock(&cil->xc_push_lock);
1031
1032}
1033
1034/*
1035 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1036 * number that is passed. When it returns, the work will be queued for
1037 * @push_seq, but it won't be completed. The caller is expected to do any
1038 * waiting for push_seq to complete if it is required.
 
 
 
 
 
 
 
 
 
 
 
 
1039 */
1040static void
1041xlog_cil_push_now(
1042	struct xlog	*log,
1043	xfs_lsn_t	push_seq)
 
1044{
1045	struct xfs_cil	*cil = log->l_cilp;
1046
1047	if (!cil)
1048		return;
1049
1050	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1051
1052	/* start on any pending background push to minimise wait time on it */
1053	flush_work(&cil->xc_push_work);
 
 
 
 
 
 
 
 
 
 
 
 
1054
1055	/*
1056	 * If the CIL is empty or we've already pushed the sequence then
1057	 * there's no work we need to do.
1058	 */
1059	spin_lock(&cil->xc_push_lock);
1060	if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
1061		spin_unlock(&cil->xc_push_lock);
1062		return;
1063	}
1064
1065	cil->xc_push_seq = push_seq;
1066	queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
1067	spin_unlock(&cil->xc_push_lock);
1068}
1069
1070bool
1071xlog_cil_empty(
1072	struct xlog	*log)
1073{
1074	struct xfs_cil	*cil = log->l_cilp;
1075	bool		empty = false;
1076
1077	spin_lock(&cil->xc_push_lock);
1078	if (list_empty(&cil->xc_cil))
1079		empty = true;
1080	spin_unlock(&cil->xc_push_lock);
1081	return empty;
1082}
1083
1084/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1085 * Commit a transaction with the given vector to the Committed Item List.
1086 *
1087 * To do this, we need to format the item, pin it in memory if required and
1088 * account for the space used by the transaction. Once we have done that we
1089 * need to release the unused reservation for the transaction, attach the
1090 * transaction to the checkpoint context so we carry the busy extents through
1091 * to checkpoint completion, and then unlock all the items in the transaction.
1092 *
1093 * Called with the context lock already held in read mode to lock out
1094 * background commit, returns without it held once background commits are
1095 * allowed again.
1096 */
1097void
1098xlog_cil_commit(
1099	struct xlog		*log,
1100	struct xfs_trans	*tp,
1101	xfs_csn_t		*commit_seq,
1102	bool			regrant)
1103{
1104	struct xfs_cil		*cil = log->l_cilp;
1105	struct xfs_log_item	*lip, *next;
 
1106
1107	/*
1108	 * Do all necessary memory allocation before we lock the CIL.
1109	 * This ensures the allocation does not deadlock with a CIL
1110	 * push in memory reclaim (e.g. from kswapd).
1111	 */
1112	xlog_cil_alloc_shadow_bufs(log, tp);
1113
1114	/* lock out background commit */
1115	down_read(&cil->xc_ctx_lock);
1116
1117	xlog_cil_insert_items(log, tp);
 
 
 
1118
1119	if (regrant && !XLOG_FORCED_SHUTDOWN(log))
1120		xfs_log_ticket_regrant(log, tp->t_ticket);
1121	else
1122		xfs_log_ticket_ungrant(log, tp->t_ticket);
1123	tp->t_ticket = NULL;
1124	xfs_trans_unreserve_and_mod_sb(tp);
1125
1126	/*
1127	 * Once all the items of the transaction have been copied to the CIL,
1128	 * the items can be unlocked and possibly freed.
1129	 *
1130	 * This needs to be done before we drop the CIL context lock because we
1131	 * have to update state in the log items and unlock them before they go
1132	 * to disk. If we don't, then the CIL checkpoint can race with us and
1133	 * we can run checkpoint completion before we've updated and unlocked
1134	 * the log items. This affects (at least) processing of stale buffers,
1135	 * inodes and EFIs.
1136	 */
1137	trace_xfs_trans_commit_items(tp, _RET_IP_);
1138	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1139		xfs_trans_del_item(lip);
1140		if (lip->li_ops->iop_committing)
1141			lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1142	}
1143	if (commit_seq)
1144		*commit_seq = cil->xc_ctx->sequence;
1145
1146	/* xlog_cil_push_background() releases cil->xc_ctx_lock */
1147	xlog_cil_push_background(log);
1148}
1149
1150/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151 * Conditionally push the CIL based on the sequence passed in.
1152 *
1153 * We only need to push if we haven't already pushed the sequence
1154 * number given. Hence the only time we will trigger a push here is
1155 * if the push sequence is the same as the current context.
1156 *
1157 * We return the current commit lsn to allow the callers to determine if a
1158 * iclog flush is necessary following this call.
1159 */
1160xfs_lsn_t
1161xlog_cil_force_seq(
1162	struct xlog	*log,
1163	xfs_csn_t	sequence)
1164{
1165	struct xfs_cil		*cil = log->l_cilp;
1166	struct xfs_cil_ctx	*ctx;
1167	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
1168
1169	ASSERT(sequence <= cil->xc_current_sequence);
1170
 
 
 
 
1171	/*
1172	 * check to see if we need to force out the current context.
1173	 * xlog_cil_push() handles racing pushes for the same sequence,
1174	 * so no need to deal with it here.
1175	 */
1176restart:
1177	xlog_cil_push_now(log, sequence);
1178
1179	/*
1180	 * See if we can find a previous sequence still committing.
1181	 * We need to wait for all previous sequence commits to complete
1182	 * before allowing the force of push_seq to go ahead. Hence block
1183	 * on commits for those as well.
1184	 */
1185	spin_lock(&cil->xc_push_lock);
1186	list_for_each_entry(ctx, &cil->xc_committing, committing) {
1187		/*
1188		 * Avoid getting stuck in this loop because we were woken by the
1189		 * shutdown, but then went back to sleep once already in the
1190		 * shutdown state.
1191		 */
1192		if (XLOG_FORCED_SHUTDOWN(log))
1193			goto out_shutdown;
1194		if (ctx->sequence > sequence)
1195			continue;
1196		if (!ctx->commit_lsn) {
1197			/*
1198			 * It is still being pushed! Wait for the push to
1199			 * complete, then start again from the beginning.
1200			 */
 
1201			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1202			goto restart;
1203		}
1204		if (ctx->sequence != sequence)
1205			continue;
1206		/* found it! */
1207		commit_lsn = ctx->commit_lsn;
1208	}
1209
1210	/*
1211	 * The call to xlog_cil_push_now() executes the push in the background.
1212	 * Hence by the time we have got here it our sequence may not have been
1213	 * pushed yet. This is true if the current sequence still matches the
1214	 * push sequence after the above wait loop and the CIL still contains
1215	 * dirty objects. This is guaranteed by the push code first adding the
1216	 * context to the committing list before emptying the CIL.
1217	 *
1218	 * Hence if we don't find the context in the committing list and the
1219	 * current sequence number is unchanged then the CIL contents are
1220	 * significant.  If the CIL is empty, if means there was nothing to push
1221	 * and that means there is nothing to wait for. If the CIL is not empty,
1222	 * it means we haven't yet started the push, because if it had started
1223	 * we would have found the context on the committing list.
1224	 */
1225	if (sequence == cil->xc_current_sequence &&
1226	    !list_empty(&cil->xc_cil)) {
1227		spin_unlock(&cil->xc_push_lock);
1228		goto restart;
1229	}
1230
1231	spin_unlock(&cil->xc_push_lock);
1232	return commit_lsn;
1233
1234	/*
1235	 * We detected a shutdown in progress. We need to trigger the log force
1236	 * to pass through it's iclog state machine error handling, even though
1237	 * we are already in a shutdown state. Hence we can't return
1238	 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1239	 * LSN is already stable), so we return a zero LSN instead.
1240	 */
1241out_shutdown:
1242	spin_unlock(&cil->xc_push_lock);
1243	return 0;
1244}
1245
1246/*
1247 * Check if the current log item was first committed in this sequence.
1248 * We can't rely on just the log item being in the CIL, we have to check
1249 * the recorded commit sequence number.
1250 *
1251 * Note: for this to be used in a non-racy manner, it has to be called with
1252 * CIL flushing locked out. As a result, it should only be used during the
1253 * transaction commit process when deciding what to format into the item.
1254 */
1255bool
1256xfs_log_item_in_current_chkpt(
1257	struct xfs_log_item *lip)
1258{
1259	struct xfs_cil_ctx *ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
1260
1261	if (list_empty(&lip->li_cil))
1262		return false;
1263
1264	/*
1265	 * li_seq is written on the first commit of a log item to record the
1266	 * first checkpoint it is written to. Hence if it is different to the
1267	 * current sequence, we're in a new checkpoint.
1268	 */
1269	return lip->li_seq == ctx->sequence;
1270}
1271
1272/*
1273 * Perform initial CIL structure initialisation.
1274 */
1275int
1276xlog_cil_init(
1277	struct xlog	*log)
1278{
1279	struct xfs_cil	*cil;
1280	struct xfs_cil_ctx *ctx;
 
 
1281
1282	cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1283	if (!cil)
1284		return -ENOMEM;
 
 
 
 
 
 
 
 
 
1285
1286	ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL);
1287	if (!ctx) {
1288		kmem_free(cil);
1289		return -ENOMEM;
 
 
 
 
 
1290	}
1291
1292	INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
1293	INIT_LIST_HEAD(&cil->xc_cil);
1294	INIT_LIST_HEAD(&cil->xc_committing);
1295	spin_lock_init(&cil->xc_cil_lock);
1296	spin_lock_init(&cil->xc_push_lock);
1297	init_waitqueue_head(&cil->xc_push_wait);
1298	init_rwsem(&cil->xc_ctx_lock);
 
1299	init_waitqueue_head(&cil->xc_commit_wait);
 
1300
1301	INIT_LIST_HEAD(&ctx->committing);
1302	INIT_LIST_HEAD(&ctx->busy_extents);
1303	ctx->sequence = 1;
1304	ctx->cil = cil;
1305	cil->xc_ctx = ctx;
1306	cil->xc_current_sequence = ctx->sequence;
1307
1308	cil->xc_log = log;
1309	log->l_cilp = cil;
1310	return 0;
 
 
1311}
1312
1313void
1314xlog_cil_destroy(
1315	struct xlog	*log)
1316{
1317	if (log->l_cilp->xc_ctx) {
1318		if (log->l_cilp->xc_ctx->ticket)
1319			xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
1320		kmem_free(log->l_cilp->xc_ctx);
 
 
1321	}
1322
1323	ASSERT(list_empty(&log->l_cilp->xc_cil));
1324	kmem_free(log->l_cilp);
 
 
1325}
1326