Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
  13#include "xfs_errortag.h"
  14#include "xfs_error.h"
  15#include "xfs_trans.h"
  16#include "xfs_trans_priv.h"
  17#include "xfs_log.h"
  18#include "xfs_log_priv.h"
  19#include "xfs_trace.h"
  20#include "xfs_sysfs.h"
  21#include "xfs_sb.h"
  22#include "xfs_health.h"
  23
  24struct kmem_cache	*xfs_log_ticket_cache;
  25
  26/* Local miscellaneous function prototypes */
 
 
 
 
 
 
 
  27STATIC struct xlog *
  28xlog_alloc_log(
  29	struct xfs_mount	*mp,
  30	struct xfs_buftarg	*log_target,
  31	xfs_daddr_t		blk_offset,
  32	int			num_bblks);
  33STATIC int
  34xlog_space_left(
  35	struct xlog		*log,
  36	atomic64_t		*head);
  37STATIC void
  38xlog_dealloc_log(
  39	struct xlog		*log);
  40
  41/* local state machine functions */
  42STATIC void xlog_state_done_syncing(
  43	struct xlog_in_core	*iclog);
  44STATIC void xlog_state_do_callback(
  45	struct xlog		*log);
  46STATIC int
  47xlog_state_get_iclog_space(
  48	struct xlog		*log,
  49	int			len,
  50	struct xlog_in_core	**iclog,
  51	struct xlog_ticket	*ticket,
 
  52	int			*logoffsetp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53STATIC void
  54xlog_grant_push_ail(
  55	struct xlog		*log,
  56	int			need_bytes);
  57STATIC void
  58xlog_sync(
 
 
 
 
  59	struct xlog		*log,
  60	struct xlog_in_core	*iclog,
  61	struct xlog_ticket	*ticket);
 
  62#if defined(DEBUG)
  63STATIC void
 
 
 
 
  64xlog_verify_grant_tail(
  65	struct xlog *log);
  66STATIC void
  67xlog_verify_iclog(
  68	struct xlog		*log,
  69	struct xlog_in_core	*iclog,
  70	int			count);
  71STATIC void
  72xlog_verify_tail_lsn(
  73	struct xlog		*log,
  74	struct xlog_in_core	*iclog);
 
  75#else
 
  76#define xlog_verify_grant_tail(a)
  77#define xlog_verify_iclog(a,b,c)
  78#define xlog_verify_tail_lsn(a,b)
  79#endif
  80
  81STATIC int
  82xlog_iclogs_empty(
  83	struct xlog		*log);
  84
  85static int
  86xfs_log_cover(struct xfs_mount *);
  87
  88/*
  89 * We need to make sure the buffer pointer returned is naturally aligned for the
  90 * biggest basic data type we put into it. We have already accounted for this
  91 * padding when sizing the buffer.
  92 *
  93 * However, this padding does not get written into the log, and hence we have to
  94 * track the space used by the log vectors separately to prevent log space hangs
  95 * due to inaccurate accounting (i.e. a leak) of the used log space through the
  96 * CIL context ticket.
  97 *
  98 * We also add space for the xlog_op_header that describes this region in the
  99 * log. This prepends the data region we return to the caller to copy their data
 100 * into, so do all the static initialisation of the ophdr now. Because the ophdr
 101 * is not 8 byte aligned, we have to be careful to ensure that we align the
 102 * start of the buffer such that the region we return to the call is 8 byte
 103 * aligned and packed against the tail of the ophdr.
 104 */
 105void *
 106xlog_prepare_iovec(
 107	struct xfs_log_vec	*lv,
 108	struct xfs_log_iovec	**vecp,
 109	uint			type)
 110{
 111	struct xfs_log_iovec	*vec = *vecp;
 112	struct xlog_op_header	*oph;
 113	uint32_t		len;
 114	void			*buf;
 115
 116	if (vec) {
 117		ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
 118		vec++;
 119	} else {
 120		vec = &lv->lv_iovecp[0];
 121	}
 122
 123	len = lv->lv_buf_len + sizeof(struct xlog_op_header);
 124	if (!IS_ALIGNED(len, sizeof(uint64_t))) {
 125		lv->lv_buf_len = round_up(len, sizeof(uint64_t)) -
 126					sizeof(struct xlog_op_header);
 127	}
 128
 129	vec->i_type = type;
 130	vec->i_addr = lv->lv_buf + lv->lv_buf_len;
 131
 132	oph = vec->i_addr;
 133	oph->oh_clientid = XFS_TRANSACTION;
 134	oph->oh_res2 = 0;
 135	oph->oh_flags = 0;
 136
 137	buf = vec->i_addr + sizeof(struct xlog_op_header);
 138	ASSERT(IS_ALIGNED((unsigned long)buf, sizeof(uint64_t)));
 139
 140	*vecp = vec;
 141	return buf;
 142}
 143
 144static void
 145xlog_grant_sub_space(
 146	struct xlog		*log,
 147	atomic64_t		*head,
 148	int			bytes)
 149{
 150	int64_t	head_val = atomic64_read(head);
 151	int64_t new, old;
 152
 153	do {
 154		int	cycle, space;
 155
 156		xlog_crack_grant_head_val(head_val, &cycle, &space);
 157
 158		space -= bytes;
 159		if (space < 0) {
 160			space += log->l_logsize;
 161			cycle--;
 162		}
 163
 164		old = head_val;
 165		new = xlog_assign_grant_head_val(cycle, space);
 166		head_val = atomic64_cmpxchg(head, old, new);
 167	} while (head_val != old);
 168}
 169
 170static void
 171xlog_grant_add_space(
 172	struct xlog		*log,
 173	atomic64_t		*head,
 174	int			bytes)
 175{
 176	int64_t	head_val = atomic64_read(head);
 177	int64_t new, old;
 178
 179	do {
 180		int		tmp;
 181		int		cycle, space;
 182
 183		xlog_crack_grant_head_val(head_val, &cycle, &space);
 184
 185		tmp = log->l_logsize - space;
 186		if (tmp > bytes)
 187			space += bytes;
 188		else {
 189			space = bytes - tmp;
 190			cycle++;
 191		}
 192
 193		old = head_val;
 194		new = xlog_assign_grant_head_val(cycle, space);
 195		head_val = atomic64_cmpxchg(head, old, new);
 196	} while (head_val != old);
 197}
 198
 199STATIC void
 200xlog_grant_head_init(
 201	struct xlog_grant_head	*head)
 202{
 203	xlog_assign_grant_head(&head->grant, 1, 0);
 204	INIT_LIST_HEAD(&head->waiters);
 205	spin_lock_init(&head->lock);
 206}
 207
 208STATIC void
 209xlog_grant_head_wake_all(
 210	struct xlog_grant_head	*head)
 211{
 212	struct xlog_ticket	*tic;
 213
 214	spin_lock(&head->lock);
 215	list_for_each_entry(tic, &head->waiters, t_queue)
 216		wake_up_process(tic->t_task);
 217	spin_unlock(&head->lock);
 218}
 219
 220static inline int
 221xlog_ticket_reservation(
 222	struct xlog		*log,
 223	struct xlog_grant_head	*head,
 224	struct xlog_ticket	*tic)
 225{
 226	if (head == &log->l_write_head) {
 227		ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
 228		return tic->t_unit_res;
 
 
 
 
 
 229	}
 230
 231	if (tic->t_flags & XLOG_TIC_PERM_RESERV)
 232		return tic->t_unit_res * tic->t_cnt;
 233
 234	return tic->t_unit_res;
 235}
 236
 237STATIC bool
 238xlog_grant_head_wake(
 239	struct xlog		*log,
 240	struct xlog_grant_head	*head,
 241	int			*free_bytes)
 242{
 243	struct xlog_ticket	*tic;
 244	int			need_bytes;
 245	bool			woken_task = false;
 246
 247	list_for_each_entry(tic, &head->waiters, t_queue) {
 248
 249		/*
 250		 * There is a chance that the size of the CIL checkpoints in
 251		 * progress at the last AIL push target calculation resulted in
 252		 * limiting the target to the log head (l_last_sync_lsn) at the
 253		 * time. This may not reflect where the log head is now as the
 254		 * CIL checkpoints may have completed.
 255		 *
 256		 * Hence when we are woken here, it may be that the head of the
 257		 * log that has moved rather than the tail. As the tail didn't
 258		 * move, there still won't be space available for the
 259		 * reservation we require.  However, if the AIL has already
 260		 * pushed to the target defined by the old log head location, we
 261		 * will hang here waiting for something else to update the AIL
 262		 * push target.
 263		 *
 264		 * Therefore, if there isn't space to wake the first waiter on
 265		 * the grant head, we need to push the AIL again to ensure the
 266		 * target reflects both the current log tail and log head
 267		 * position before we wait for the tail to move again.
 268		 */
 269
 270		need_bytes = xlog_ticket_reservation(log, head, tic);
 271		if (*free_bytes < need_bytes) {
 272			if (!woken_task)
 273				xlog_grant_push_ail(log, need_bytes);
 274			return false;
 275		}
 276
 277		*free_bytes -= need_bytes;
 278		trace_xfs_log_grant_wake_up(log, tic);
 279		wake_up_process(tic->t_task);
 280		woken_task = true;
 281	}
 282
 283	return true;
 284}
 285
 286STATIC int
 287xlog_grant_head_wait(
 288	struct xlog		*log,
 289	struct xlog_grant_head	*head,
 290	struct xlog_ticket	*tic,
 291	int			need_bytes) __releases(&head->lock)
 292					    __acquires(&head->lock)
 293{
 294	list_add_tail(&tic->t_queue, &head->waiters);
 295
 296	do {
 297		if (xlog_is_shutdown(log))
 298			goto shutdown;
 299		xlog_grant_push_ail(log, need_bytes);
 300
 301		__set_current_state(TASK_UNINTERRUPTIBLE);
 302		spin_unlock(&head->lock);
 303
 304		XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
 305
 306		trace_xfs_log_grant_sleep(log, tic);
 307		schedule();
 308		trace_xfs_log_grant_wake(log, tic);
 309
 310		spin_lock(&head->lock);
 311		if (xlog_is_shutdown(log))
 312			goto shutdown;
 313	} while (xlog_space_left(log, &head->grant) < need_bytes);
 314
 315	list_del_init(&tic->t_queue);
 316	return 0;
 317shutdown:
 318	list_del_init(&tic->t_queue);
 319	return -EIO;
 320}
 321
 322/*
 323 * Atomically get the log space required for a log ticket.
 324 *
 325 * Once a ticket gets put onto head->waiters, it will only return after the
 326 * needed reservation is satisfied.
 327 *
 328 * This function is structured so that it has a lock free fast path. This is
 329 * necessary because every new transaction reservation will come through this
 330 * path. Hence any lock will be globally hot if we take it unconditionally on
 331 * every pass.
 332 *
 333 * As tickets are only ever moved on and off head->waiters under head->lock, we
 334 * only need to take that lock if we are going to add the ticket to the queue
 335 * and sleep. We can avoid taking the lock if the ticket was never added to
 336 * head->waiters because the t_queue list head will be empty and we hold the
 337 * only reference to it so it can safely be checked unlocked.
 338 */
 339STATIC int
 340xlog_grant_head_check(
 341	struct xlog		*log,
 342	struct xlog_grant_head	*head,
 343	struct xlog_ticket	*tic,
 344	int			*need_bytes)
 345{
 346	int			free_bytes;
 347	int			error = 0;
 348
 349	ASSERT(!xlog_in_recovery(log));
 350
 351	/*
 352	 * If there are other waiters on the queue then give them a chance at
 353	 * logspace before us.  Wake up the first waiters, if we do not wake
 354	 * up all the waiters then go to sleep waiting for more free space,
 355	 * otherwise try to get some space for this transaction.
 356	 */
 357	*need_bytes = xlog_ticket_reservation(log, head, tic);
 358	free_bytes = xlog_space_left(log, &head->grant);
 359	if (!list_empty_careful(&head->waiters)) {
 360		spin_lock(&head->lock);
 361		if (!xlog_grant_head_wake(log, head, &free_bytes) ||
 362		    free_bytes < *need_bytes) {
 363			error = xlog_grant_head_wait(log, head, tic,
 364						     *need_bytes);
 365		}
 366		spin_unlock(&head->lock);
 367	} else if (free_bytes < *need_bytes) {
 368		spin_lock(&head->lock);
 369		error = xlog_grant_head_wait(log, head, tic, *need_bytes);
 370		spin_unlock(&head->lock);
 371	}
 372
 373	return error;
 374}
 375
 376bool
 377xfs_log_writable(
 378	struct xfs_mount	*mp)
 379{
 380	/*
 381	 * Do not write to the log on norecovery mounts, if the data or log
 382	 * devices are read-only, or if the filesystem is shutdown. Read-only
 383	 * mounts allow internal writes for log recovery and unmount purposes,
 384	 * so don't restrict that case.
 385	 */
 386	if (xfs_has_norecovery(mp))
 387		return false;
 388	if (xfs_readonly_buftarg(mp->m_ddev_targp))
 389		return false;
 390	if (xfs_readonly_buftarg(mp->m_log->l_targ))
 391		return false;
 392	if (xlog_is_shutdown(mp->m_log))
 393		return false;
 394	return true;
 
 
 
 
 395}
 396
 397/*
 398 * Replenish the byte reservation required by moving the grant write head.
 399 */
 400int
 401xfs_log_regrant(
 402	struct xfs_mount	*mp,
 403	struct xlog_ticket	*tic)
 404{
 405	struct xlog		*log = mp->m_log;
 406	int			need_bytes;
 407	int			error = 0;
 408
 409	if (xlog_is_shutdown(log))
 410		return -EIO;
 411
 412	XFS_STATS_INC(mp, xs_try_logspace);
 413
 414	/*
 415	 * This is a new transaction on the ticket, so we need to change the
 416	 * transaction ID so that the next transaction has a different TID in
 417	 * the log. Just add one to the existing tid so that we can see chains
 418	 * of rolling transactions in the log easily.
 419	 */
 420	tic->t_tid++;
 421
 422	xlog_grant_push_ail(log, tic->t_unit_res);
 423
 424	tic->t_curr_res = tic->t_unit_res;
 
 
 425	if (tic->t_cnt > 0)
 426		return 0;
 427
 428	trace_xfs_log_regrant(log, tic);
 429
 430	error = xlog_grant_head_check(log, &log->l_write_head, tic,
 431				      &need_bytes);
 432	if (error)
 433		goto out_error;
 434
 435	xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
 436	trace_xfs_log_regrant_exit(log, tic);
 437	xlog_verify_grant_tail(log);
 438	return 0;
 439
 440out_error:
 441	/*
 442	 * If we are failing, make sure the ticket doesn't have any current
 443	 * reservations.  We don't want to add this back when the ticket/
 444	 * transaction gets cancelled.
 445	 */
 446	tic->t_curr_res = 0;
 447	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
 448	return error;
 449}
 450
 451/*
 452 * Reserve log space and return a ticket corresponding to the reservation.
 453 *
 454 * Each reservation is going to reserve extra space for a log record header.
 455 * When writes happen to the on-disk log, we don't subtract the length of the
 456 * log record header from any reservation.  By wasting space in each
 457 * reservation, we prevent over allocation problems.
 458 */
 459int
 460xfs_log_reserve(
 461	struct xfs_mount	*mp,
 462	int			unit_bytes,
 463	int			cnt,
 464	struct xlog_ticket	**ticp,
 
 465	bool			permanent)
 466{
 467	struct xlog		*log = mp->m_log;
 468	struct xlog_ticket	*tic;
 469	int			need_bytes;
 470	int			error = 0;
 471
 472	if (xlog_is_shutdown(log))
 
 
 473		return -EIO;
 474
 475	XFS_STATS_INC(mp, xs_try_logspace);
 476
 477	ASSERT(*ticp == NULL);
 478	tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
 479	*ticp = tic;
 480
 481	xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
 482					    : tic->t_unit_res);
 483
 484	trace_xfs_log_reserve(log, tic);
 485
 486	error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
 487				      &need_bytes);
 488	if (error)
 489		goto out_error;
 490
 491	xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
 492	xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
 493	trace_xfs_log_reserve_exit(log, tic);
 494	xlog_verify_grant_tail(log);
 495	return 0;
 496
 497out_error:
 498	/*
 499	 * If we are failing, make sure the ticket doesn't have any current
 500	 * reservations.  We don't want to add this back when the ticket/
 501	 * transaction gets cancelled.
 502	 */
 503	tic->t_curr_res = 0;
 504	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
 505	return error;
 506}
 507
 
 508/*
 509 * Run all the pending iclog callbacks and wake log force waiters and iclog
 510 * space waiters so they can process the newly set shutdown state. We really
 511 * don't care what order we process callbacks here because the log is shut down
 512 * and so state cannot change on disk anymore. However, we cannot wake waiters
 513 * until the callbacks have been processed because we may be in unmount and
 514 * we must ensure that all AIL operations the callbacks perform have completed
 515 * before we tear down the AIL.
 516 *
 517 * We avoid processing actively referenced iclogs so that we don't run callbacks
 518 * while the iclog owner might still be preparing the iclog for IO submssion.
 519 * These will be caught by xlog_state_iclog_release() and call this function
 520 * again to process any callbacks that may have been added to that iclog.
 521 */
 522static void
 523xlog_state_shutdown_callbacks(
 524	struct xlog		*log)
 525{
 526	struct xlog_in_core	*iclog;
 527	LIST_HEAD(cb_list);
 528
 529	iclog = log->l_iclog;
 530	do {
 531		if (atomic_read(&iclog->ic_refcnt)) {
 532			/* Reference holder will re-run iclog callbacks. */
 533			continue;
 534		}
 535		list_splice_init(&iclog->ic_callbacks, &cb_list);
 536		spin_unlock(&log->l_icloglock);
 537
 538		xlog_cil_process_committed(&cb_list);
 539
 540		spin_lock(&log->l_icloglock);
 541		wake_up_all(&iclog->ic_write_wait);
 542		wake_up_all(&iclog->ic_force_wait);
 543	} while ((iclog = iclog->ic_next) != log->l_iclog);
 544
 545	wake_up_all(&log->l_flush_wait);
 546}
 547
 548/*
 549 * Flush iclog to disk if this is the last reference to the given iclog and the
 550 * it is in the WANT_SYNC state.
 551 *
 552 * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
 553 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
 554 * written to stable storage, and implies that a commit record is contained
 555 * within the iclog. We need to ensure that the log tail does not move beyond
 556 * the tail that the first commit record in the iclog ordered against, otherwise
 557 * correct recovery of that checkpoint becomes dependent on future operations
 558 * performed on this iclog.
 559 *
 560 * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
 561 * current tail into iclog. Once the iclog tail is set, future operations must
 562 * not modify it, otherwise they potentially violate ordering constraints for
 563 * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
 564 * the iclog will get zeroed on activation of the iclog after sync, so we
 565 * always capture the tail lsn on the iclog on the first NEED_FUA release
 566 * regardless of the number of active reference counts on this iclog.
 567 */
 568int
 569xlog_state_release_iclog(
 570	struct xlog		*log,
 571	struct xlog_in_core	*iclog,
 572	struct xlog_ticket	*ticket)
 
 573{
 574	xfs_lsn_t		tail_lsn;
 575	bool			last_ref;
 576
 577	lockdep_assert_held(&log->l_icloglock);
 578
 579	trace_xlog_iclog_release(iclog, _RET_IP_);
 580	/*
 581	 * Grabbing the current log tail needs to be atomic w.r.t. the writing
 582	 * of the tail LSN into the iclog so we guarantee that the log tail does
 583	 * not move between the first time we know that the iclog needs to be
 584	 * made stable and when we eventually submit it.
 585	 */
 586	if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
 587	     (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
 588	    !iclog->ic_header.h_tail_lsn) {
 589		tail_lsn = xlog_assign_tail_lsn(log->l_mp);
 590		iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
 591	}
 592
 593	last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
 594
 595	if (xlog_is_shutdown(log)) {
 
 
 596		/*
 597		 * If there are no more references to this iclog, process the
 598		 * pending iclog callbacks that were waiting on the release of
 599		 * this iclog.
 600		 */
 601		if (last_ref)
 602			xlog_state_shutdown_callbacks(log);
 603		return -EIO;
 
 
 
 
 
 
 
 604	}
 605
 606	if (!last_ref)
 607		return 0;
 
 608
 609	if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
 610		ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
 611		return 0;
 
 
 
 
 
 612	}
 613
 614	iclog->ic_state = XLOG_STATE_SYNCING;
 615	xlog_verify_tail_lsn(log, iclog);
 616	trace_xlog_iclog_syncing(iclog, _RET_IP_);
 617
 618	spin_unlock(&log->l_icloglock);
 619	xlog_sync(log, iclog, ticket);
 620	spin_lock(&log->l_icloglock);
 621	return 0;
 622}
 623
 624/*
 625 * Mount a log filesystem
 626 *
 627 * mp		- ubiquitous xfs mount point structure
 628 * log_target	- buftarg of on-disk log device
 629 * blk_offset	- Start block # where block size is 512 bytes (BBSIZE)
 630 * num_bblocks	- Number of BBSIZE blocks in on-disk log
 631 *
 632 * Return error or zero.
 633 */
 634int
 635xfs_log_mount(
 636	xfs_mount_t	*mp,
 637	xfs_buftarg_t	*log_target,
 638	xfs_daddr_t	blk_offset,
 639	int		num_bblks)
 640{
 641	struct xlog	*log;
 642	bool		fatal = xfs_has_crc(mp);
 643	int		error = 0;
 644	int		min_logfsbs;
 645
 646	if (!xfs_has_norecovery(mp)) {
 647		xfs_notice(mp, "Mounting V%d Filesystem %pU",
 648			   XFS_SB_VERSION_NUM(&mp->m_sb),
 649			   &mp->m_sb.sb_uuid);
 650	} else {
 651		xfs_notice(mp,
 652"Mounting V%d filesystem %pU in no-recovery mode. Filesystem will be inconsistent.",
 653			   XFS_SB_VERSION_NUM(&mp->m_sb),
 654			   &mp->m_sb.sb_uuid);
 655		ASSERT(xfs_is_readonly(mp));
 656	}
 657
 658	log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
 659	if (IS_ERR(log)) {
 660		error = PTR_ERR(log);
 661		goto out;
 662	}
 663	mp->m_log = log;
 664
 665	/*
 666	 * Validate the given log space and drop a critical message via syslog
 667	 * if the log size is too small that would lead to some unexpected
 668	 * situations in transaction log space reservation stage.
 669	 *
 670	 * Note: we can't just reject the mount if the validation fails.  This
 671	 * would mean that people would have to downgrade their kernel just to
 672	 * remedy the situation as there is no way to grow the log (short of
 673	 * black magic surgery with xfs_db).
 674	 *
 675	 * We can, however, reject mounts for CRC format filesystems, as the
 676	 * mkfs binary being used to make the filesystem should never create a
 677	 * filesystem with a log that is too small.
 678	 */
 679	min_logfsbs = xfs_log_calc_minimum_size(mp);
 680
 681	if (mp->m_sb.sb_logblocks < min_logfsbs) {
 682		xfs_warn(mp,
 683		"Log size %d blocks too small, minimum size is %d blocks",
 684			 mp->m_sb.sb_logblocks, min_logfsbs);
 685		error = -EINVAL;
 686	} else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
 687		xfs_warn(mp,
 688		"Log size %d blocks too large, maximum size is %lld blocks",
 689			 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
 690		error = -EINVAL;
 691	} else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
 692		xfs_warn(mp,
 693		"log size %lld bytes too large, maximum size is %lld bytes",
 694			 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
 695			 XFS_MAX_LOG_BYTES);
 696		error = -EINVAL;
 697	} else if (mp->m_sb.sb_logsunit > 1 &&
 698		   mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) {
 699		xfs_warn(mp,
 700		"log stripe unit %u bytes must be a multiple of block size",
 701			 mp->m_sb.sb_logsunit);
 702		error = -EINVAL;
 703		fatal = true;
 704	}
 705	if (error) {
 706		/*
 707		 * Log check errors are always fatal on v5; or whenever bad
 708		 * metadata leads to a crash.
 709		 */
 710		if (fatal) {
 711			xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
 712			ASSERT(0);
 713			goto out_free_log;
 714		}
 715		xfs_crit(mp, "Log size out of supported range.");
 716		xfs_crit(mp,
 717"Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
 718	}
 719
 720	/*
 721	 * Initialize the AIL now we have a log.
 722	 */
 723	error = xfs_trans_ail_init(mp);
 724	if (error) {
 725		xfs_warn(mp, "AIL initialisation failed: error %d", error);
 726		goto out_free_log;
 727	}
 728	log->l_ailp = mp->m_ail;
 729
 730	/*
 731	 * skip log recovery on a norecovery mount.  pretend it all
 732	 * just worked.
 733	 */
 734	if (!xfs_has_norecovery(mp)) {
 735		/*
 736		 * log recovery ignores readonly state and so we need to clear
 737		 * mount-based read only state so it can write to disk.
 738		 */
 739		bool	readonly = test_and_clear_bit(XFS_OPSTATE_READONLY,
 740						&mp->m_opstate);
 741		error = xlog_recover(log);
 742		if (readonly)
 743			set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
 744		if (error) {
 745			xfs_warn(mp, "log mount/recovery failed: error %d",
 746				error);
 747			xlog_recover_cancel(log);
 748			goto out_destroy_ail;
 749		}
 750	}
 751
 752	error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
 753			       "log");
 754	if (error)
 755		goto out_destroy_ail;
 756
 757	/* Normal transactions can now occur */
 758	clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
 759
 760	/*
 761	 * Now the log has been fully initialised and we know were our
 762	 * space grant counters are, we can initialise the permanent ticket
 763	 * needed for delayed logging to work.
 764	 */
 765	xlog_cil_init_post_recovery(log);
 766
 767	return 0;
 768
 769out_destroy_ail:
 770	xfs_trans_ail_destroy(mp);
 771out_free_log:
 772	xlog_dealloc_log(log);
 773out:
 774	return error;
 775}
 776
 777/*
 778 * Finish the recovery of the file system.  This is separate from the
 779 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
 780 * in the root and real-time bitmap inodes between calling xfs_log_mount() and
 781 * here.
 782 *
 783 * If we finish recovery successfully, start the background log work. If we are
 784 * not doing recovery, then we have a RO filesystem and we don't need to start
 785 * it.
 786 */
 787int
 788xfs_log_mount_finish(
 789	struct xfs_mount	*mp)
 790{
 791	struct xlog		*log = mp->m_log;
 792	bool			readonly;
 793	int			error = 0;
 794
 795	if (xfs_has_norecovery(mp)) {
 796		ASSERT(xfs_is_readonly(mp));
 797		return 0;
 
 
 
 798	}
 799
 800	/*
 801	 * log recovery ignores readonly state and so we need to clear
 802	 * mount-based read only state so it can write to disk.
 803	 */
 804	readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
 805
 806	/*
 807	 * During the second phase of log recovery, we need iget and
 808	 * iput to behave like they do for an active filesystem.
 809	 * xfs_fs_drop_inode needs to be able to prevent the deletion
 810	 * of inodes before we're done replaying log items on those
 811	 * inodes.  Turn it off immediately after recovery finishes
 812	 * so that we don't leak the quota inodes if subsequent mount
 813	 * activities fail.
 814	 *
 815	 * We let all inodes involved in redo item processing end up on
 816	 * the LRU instead of being evicted immediately so that if we do
 817	 * something to an unlinked inode, the irele won't cause
 818	 * premature truncation and freeing of the inode, which results
 819	 * in log recovery failure.  We have to evict the unreferenced
 820	 * lru inodes after clearing SB_ACTIVE because we don't
 821	 * otherwise clean up the lru if there's a subsequent failure in
 822	 * xfs_mountfs, which leads to us leaking the inodes if nothing
 823	 * else (e.g. quotacheck) references the inodes before the
 824	 * mount failure occurs.
 825	 */
 826	mp->m_super->s_flags |= SB_ACTIVE;
 827	xfs_log_work_queue(mp);
 828	if (xlog_recovery_needed(log))
 829		error = xlog_recover_finish(log);
 830	mp->m_super->s_flags &= ~SB_ACTIVE;
 831	evict_inodes(mp->m_super);
 832
 833	/*
 834	 * Drain the buffer LRU after log recovery. This is required for v4
 835	 * filesystems to avoid leaving around buffers with NULL verifier ops,
 836	 * but we do it unconditionally to make sure we're always in a clean
 837	 * cache state after mount.
 838	 *
 839	 * Don't push in the error case because the AIL may have pending intents
 840	 * that aren't removed until recovery is cancelled.
 841	 */
 842	if (xlog_recovery_needed(log)) {
 843		if (!error) {
 844			xfs_log_force(mp, XFS_LOG_SYNC);
 845			xfs_ail_push_all_sync(mp->m_ail);
 846		}
 847		xfs_notice(mp, "Ending recovery (logdev: %s)",
 848				mp->m_logname ? mp->m_logname : "internal");
 849	} else {
 850		xfs_info(mp, "Ending clean mount");
 851	}
 852	xfs_buftarg_drain(mp->m_ddev_targp);
 853
 854	clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
 855	if (readonly)
 856		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
 857
 858	/* Make sure the log is dead if we're returning failure. */
 859	ASSERT(!error || xlog_is_shutdown(log));
 860
 861	return error;
 862}
 863
 864/*
 865 * The mount has failed. Cancel the recovery if it hasn't completed and destroy
 866 * the log.
 867 */
 868void
 869xfs_log_mount_cancel(
 870	struct xfs_mount	*mp)
 871{
 872	xlog_recover_cancel(mp->m_log);
 873	xfs_log_unmount(mp);
 874}
 875
 876/*
 877 * Flush out the iclog to disk ensuring that device caches are flushed and
 878 * the iclog hits stable storage before any completion waiters are woken.
 
 
 
 879 */
 880static inline int
 881xlog_force_iclog(
 882	struct xlog_in_core	*iclog)
 883{
 884	atomic_inc(&iclog->ic_refcnt);
 885	iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
 886	if (iclog->ic_state == XLOG_STATE_ACTIVE)
 887		xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
 888	return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
 889}
 890
 891/*
 892 * Cycle all the iclogbuf locks to make sure all log IO completion
 893 * is done before we tear down these buffers.
 894 */
 895static void
 896xlog_wait_iclog_completion(struct xlog *log)
 897{
 898	int		i;
 899	struct xlog_in_core	*iclog = log->l_iclog;
 900
 901	for (i = 0; i < log->l_iclog_bufs; i++) {
 902		down(&iclog->ic_sema);
 903		up(&iclog->ic_sema);
 904		iclog = iclog->ic_next;
 905	}
 906}
 907
 908/*
 909 * Wait for the iclog and all prior iclogs to be written disk as required by the
 910 * log force state machine. Waiting on ic_force_wait ensures iclog completions
 911 * have been ordered and callbacks run before we are woken here, hence
 912 * guaranteeing that all the iclogs up to this one are on stable storage.
 913 */
 914int
 915xlog_wait_on_iclog(
 916	struct xlog_in_core	*iclog)
 917		__releases(iclog->ic_log->l_icloglock)
 918{
 919	struct xlog		*log = iclog->ic_log;
 920
 921	trace_xlog_iclog_wait_on(iclog, _RET_IP_);
 922	if (!xlog_is_shutdown(log) &&
 923	    iclog->ic_state != XLOG_STATE_ACTIVE &&
 924	    iclog->ic_state != XLOG_STATE_DIRTY) {
 925		XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
 926		xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
 927	} else {
 928		spin_unlock(&log->l_icloglock);
 929	}
 930
 931	if (xlog_is_shutdown(log))
 932		return -EIO;
 933	return 0;
 934}
 935
 936/*
 937 * Write out an unmount record using the ticket provided. We have to account for
 938 * the data space used in the unmount ticket as this write is not done from a
 939 * transaction context that has already done the accounting for us.
 940 */
 941static int
 942xlog_write_unmount_record(
 943	struct xlog		*log,
 944	struct xlog_ticket	*ticket)
 945{
 946	struct  {
 947		struct xlog_op_header ophdr;
 948		struct xfs_unmount_log_format ulf;
 949	} unmount_rec = {
 950		.ophdr = {
 951			.oh_clientid = XFS_LOG,
 952			.oh_tid = cpu_to_be32(ticket->t_tid),
 953			.oh_flags = XLOG_UNMOUNT_TRANS,
 954		},
 955		.ulf = {
 956			.magic = XLOG_UNMOUNT_TYPE,
 957		},
 958	};
 959	struct xfs_log_iovec reg = {
 960		.i_addr = &unmount_rec,
 961		.i_len = sizeof(unmount_rec),
 962		.i_type = XLOG_REG_TYPE_UNMOUNT,
 963	};
 964	struct xfs_log_vec vec = {
 965		.lv_niovecs = 1,
 966		.lv_iovecp = &reg,
 967	};
 968	LIST_HEAD(lv_chain);
 969	list_add(&vec.lv_list, &lv_chain);
 970
 971	BUILD_BUG_ON((sizeof(struct xlog_op_header) +
 972		      sizeof(struct xfs_unmount_log_format)) !=
 973							sizeof(unmount_rec));
 974
 975	/* account for space used by record data */
 976	ticket->t_curr_res -= sizeof(unmount_rec);
 977
 978	return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len);
 979}
 980
 981/*
 982 * Mark the filesystem clean by writing an unmount record to the head of the
 983 * log.
 984 */
 985static void
 986xlog_unmount_write(
 987	struct xlog		*log)
 988{
 989	struct xfs_mount	*mp = log->l_mp;
 990	struct xlog_in_core	*iclog;
 991	struct xlog_ticket	*tic = NULL;
 
 
 992	int			error;
 993
 994	error = xfs_log_reserve(mp, 600, 1, &tic, 0);
 995	if (error)
 996		goto out_err;
 997
 998	error = xlog_write_unmount_record(log, tic);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999	/*
1000	 * At this point, we're umounting anyway, so there's no point in
1001	 * transitioning log state to shutdown. Just continue...
1002	 */
1003out_err:
1004	if (error)
1005		xfs_alert(mp, "%s: unmount record failed", __func__);
1006
1007	spin_lock(&log->l_icloglock);
1008	iclog = log->l_iclog;
1009	error = xlog_force_iclog(iclog);
1010	xlog_wait_on_iclog(iclog);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1011
1012	if (tic) {
1013		trace_xfs_log_umount_write(log, tic);
1014		xfs_log_ticket_ungrant(log, tic);
 
1015	}
1016}
1017
1018static void
1019xfs_log_unmount_verify_iclog(
1020	struct xlog		*log)
1021{
1022	struct xlog_in_core	*iclog = log->l_iclog;
1023
1024	do {
1025		ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
1026		ASSERT(iclog->ic_offset == 0);
1027	} while ((iclog = iclog->ic_next) != log->l_iclog);
1028}
1029
1030/*
1031 * Unmount record used to have a string "Unmount filesystem--" in the
1032 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
1033 * We just write the magic number now since that particular field isn't
1034 * currently architecture converted and "Unmount" is a bit foo.
1035 * As far as I know, there weren't any dependencies on the old behaviour.
1036 */
1037static void
1038xfs_log_unmount_write(
1039	struct xfs_mount	*mp)
1040{
1041	struct xlog		*log = mp->m_log;
 
 
 
 
 
1042
1043	if (!xfs_log_writable(mp))
1044		return;
 
 
 
 
 
 
 
1045
1046	xfs_log_force(mp, XFS_LOG_SYNC);
 
1047
1048	if (xlog_is_shutdown(log))
1049		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1050
1051	/*
1052	 * If we think the summary counters are bad, avoid writing the unmount
1053	 * record to force log recovery at next mount, after which the summary
1054	 * counters will be recalculated.  Refer to xlog_check_unmount_rec for
1055	 * more details.
1056	 */
1057	if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
1058			XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
1059		xfs_alert(mp, "%s: will fix summary counters at next mount",
1060				__func__);
1061		return;
 
 
 
 
1062	}
1063
1064	xfs_log_unmount_verify_iclog(log);
1065	xlog_unmount_write(log);
1066}
1067
1068/*
1069 * Empty the log for unmount/freeze.
1070 *
1071 * To do this, we first need to shut down the background log work so it is not
1072 * trying to cover the log as we clean up. We then need to unpin all objects in
1073 * the log so we can then flush them out. Once they have completed their IO and
1074 * run the callbacks removing themselves from the AIL, we can cover the log.
 
1075 */
1076int
1077xfs_log_quiesce(
1078	struct xfs_mount	*mp)
1079{
1080	/*
1081	 * Clear log incompat features since we're quiescing the log.  Report
1082	 * failures, though it's not fatal to have a higher log feature
1083	 * protection level than the log contents actually require.
1084	 */
1085	if (xfs_clear_incompat_log_features(mp)) {
1086		int error;
1087
1088		error = xfs_sync_sb(mp, false);
1089		if (error)
1090			xfs_warn(mp,
1091	"Failed to clear log incompat features on quiesce");
1092	}
1093
1094	cancel_delayed_work_sync(&mp->m_log->l_work);
1095	xfs_log_force(mp, XFS_LOG_SYNC);
1096
1097	/*
1098	 * The superblock buffer is uncached and while xfs_ail_push_all_sync()
1099	 * will push it, xfs_buftarg_wait() will not wait for it. Further,
1100	 * xfs_buf_iowait() cannot be used because it was pushed with the
1101	 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
1102	 * the IO to complete.
1103	 */
1104	xfs_ail_push_all_sync(mp->m_ail);
1105	xfs_buftarg_wait(mp->m_ddev_targp);
1106	xfs_buf_lock(mp->m_sb_bp);
1107	xfs_buf_unlock(mp->m_sb_bp);
1108
1109	return xfs_log_cover(mp);
1110}
1111
1112void
1113xfs_log_clean(
1114	struct xfs_mount	*mp)
1115{
1116	xfs_log_quiesce(mp);
1117	xfs_log_unmount_write(mp);
1118}
1119
1120/*
1121 * Shut down and release the AIL and Log.
1122 *
1123 * During unmount, we need to ensure we flush all the dirty metadata objects
1124 * from the AIL so that the log is empty before we write the unmount record to
1125 * the log. Once this is done, we can tear down the AIL and the log.
1126 */
1127void
1128xfs_log_unmount(
1129	struct xfs_mount	*mp)
1130{
1131	xfs_log_clean(mp);
1132
1133	/*
1134	 * If shutdown has come from iclog IO context, the log
1135	 * cleaning will have been skipped and so we need to wait
1136	 * for the iclog to complete shutdown processing before we
1137	 * tear anything down.
1138	 */
1139	xlog_wait_iclog_completion(mp->m_log);
1140
1141	xfs_buftarg_drain(mp->m_ddev_targp);
1142
1143	xfs_trans_ail_destroy(mp);
1144
1145	xfs_sysfs_del(&mp->m_log->l_kobj);
1146
1147	xlog_dealloc_log(mp->m_log);
1148}
1149
1150void
1151xfs_log_item_init(
1152	struct xfs_mount	*mp,
1153	struct xfs_log_item	*item,
1154	int			type,
1155	const struct xfs_item_ops *ops)
1156{
1157	item->li_log = mp->m_log;
1158	item->li_ailp = mp->m_ail;
1159	item->li_type = type;
1160	item->li_ops = ops;
1161	item->li_lv = NULL;
1162
1163	INIT_LIST_HEAD(&item->li_ail);
1164	INIT_LIST_HEAD(&item->li_cil);
1165	INIT_LIST_HEAD(&item->li_bio_list);
1166	INIT_LIST_HEAD(&item->li_trans);
1167}
1168
1169/*
1170 * Wake up processes waiting for log space after we have moved the log tail.
1171 */
1172void
1173xfs_log_space_wake(
1174	struct xfs_mount	*mp)
1175{
1176	struct xlog		*log = mp->m_log;
1177	int			free_bytes;
1178
1179	if (xlog_is_shutdown(log))
1180		return;
1181
1182	if (!list_empty_careful(&log->l_write_head.waiters)) {
1183		ASSERT(!xlog_in_recovery(log));
1184
1185		spin_lock(&log->l_write_head.lock);
1186		free_bytes = xlog_space_left(log, &log->l_write_head.grant);
1187		xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1188		spin_unlock(&log->l_write_head.lock);
1189	}
1190
1191	if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1192		ASSERT(!xlog_in_recovery(log));
1193
1194		spin_lock(&log->l_reserve_head.lock);
1195		free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1196		xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1197		spin_unlock(&log->l_reserve_head.lock);
1198	}
1199}
1200
1201/*
1202 * Determine if we have a transaction that has gone to disk that needs to be
1203 * covered. To begin the transition to the idle state firstly the log needs to
1204 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1205 * we start attempting to cover the log.
1206 *
1207 * Only if we are then in a state where covering is needed, the caller is
1208 * informed that dummy transactions are required to move the log into the idle
1209 * state.
1210 *
1211 * If there are any items in the AIl or CIL, then we do not want to attempt to
1212 * cover the log as we may be in a situation where there isn't log space
1213 * available to run a dummy transaction and this can lead to deadlocks when the
1214 * tail of the log is pinned by an item that is modified in the CIL.  Hence
1215 * there's no point in running a dummy transaction at this point because we
1216 * can't start trying to idle the log until both the CIL and AIL are empty.
1217 */
1218static bool
1219xfs_log_need_covered(
1220	struct xfs_mount	*mp)
1221{
1222	struct xlog		*log = mp->m_log;
1223	bool			needed = false;
 
 
 
1224
1225	if (!xlog_cil_empty(log))
1226		return false;
1227
1228	spin_lock(&log->l_icloglock);
1229	switch (log->l_covered_state) {
1230	case XLOG_STATE_COVER_DONE:
1231	case XLOG_STATE_COVER_DONE2:
1232	case XLOG_STATE_COVER_IDLE:
1233		break;
1234	case XLOG_STATE_COVER_NEED:
1235	case XLOG_STATE_COVER_NEED2:
1236		if (xfs_ail_min_lsn(log->l_ailp))
1237			break;
1238		if (!xlog_iclogs_empty(log))
1239			break;
1240
1241		needed = true;
1242		if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1243			log->l_covered_state = XLOG_STATE_COVER_DONE;
1244		else
1245			log->l_covered_state = XLOG_STATE_COVER_DONE2;
1246		break;
1247	default:
1248		needed = true;
1249		break;
1250	}
1251	spin_unlock(&log->l_icloglock);
1252	return needed;
1253}
1254
1255/*
1256 * Explicitly cover the log. This is similar to background log covering but
1257 * intended for usage in quiesce codepaths. The caller is responsible to ensure
1258 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1259 * must all be empty.
1260 */
1261static int
1262xfs_log_cover(
1263	struct xfs_mount	*mp)
1264{
1265	int			error = 0;
1266	bool			need_covered;
1267
1268	ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
1269	        !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
1270		xlog_is_shutdown(mp->m_log));
1271
1272	if (!xfs_log_writable(mp))
1273		return 0;
1274
1275	/*
1276	 * xfs_log_need_covered() is not idempotent because it progresses the
1277	 * state machine if the log requires covering. Therefore, we must call
1278	 * this function once and use the result until we've issued an sb sync.
1279	 * Do so first to make that abundantly clear.
1280	 *
1281	 * Fall into the covering sequence if the log needs covering or the
1282	 * mount has lazy superblock accounting to sync to disk. The sb sync
1283	 * used for covering accumulates the in-core counters, so covering
1284	 * handles this for us.
1285	 */
1286	need_covered = xfs_log_need_covered(mp);
1287	if (!need_covered && !xfs_has_lazysbcount(mp))
1288		return 0;
1289
1290	/*
1291	 * To cover the log, commit the superblock twice (at most) in
1292	 * independent checkpoints. The first serves as a reference for the
1293	 * tail pointer. The sync transaction and AIL push empties the AIL and
1294	 * updates the in-core tail to the LSN of the first checkpoint. The
1295	 * second commit updates the on-disk tail with the in-core LSN,
1296	 * covering the log. Push the AIL one more time to leave it empty, as
1297	 * we found it.
1298	 */
1299	do {
1300		error = xfs_sync_sb(mp, true);
1301		if (error)
1302			break;
1303		xfs_ail_push_all_sync(mp->m_ail);
1304	} while (xfs_log_need_covered(mp));
1305
1306	return error;
1307}
1308
1309/*
1310 * We may be holding the log iclog lock upon entering this routine.
1311 */
1312xfs_lsn_t
1313xlog_assign_tail_lsn_locked(
1314	struct xfs_mount	*mp)
1315{
1316	struct xlog		*log = mp->m_log;
1317	struct xfs_log_item	*lip;
1318	xfs_lsn_t		tail_lsn;
1319
1320	assert_spin_locked(&mp->m_ail->ail_lock);
1321
1322	/*
1323	 * To make sure we always have a valid LSN for the log tail we keep
1324	 * track of the last LSN which was committed in log->l_last_sync_lsn,
1325	 * and use that when the AIL was empty.
1326	 */
1327	lip = xfs_ail_min(mp->m_ail);
1328	if (lip)
1329		tail_lsn = lip->li_lsn;
1330	else
1331		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
1332	trace_xfs_log_assign_tail_lsn(log, tail_lsn);
1333	atomic64_set(&log->l_tail_lsn, tail_lsn);
1334	return tail_lsn;
1335}
1336
1337xfs_lsn_t
1338xlog_assign_tail_lsn(
1339	struct xfs_mount	*mp)
1340{
1341	xfs_lsn_t		tail_lsn;
1342
1343	spin_lock(&mp->m_ail->ail_lock);
1344	tail_lsn = xlog_assign_tail_lsn_locked(mp);
1345	spin_unlock(&mp->m_ail->ail_lock);
1346
1347	return tail_lsn;
1348}
1349
1350/*
1351 * Return the space in the log between the tail and the head.  The head
1352 * is passed in the cycle/bytes formal parms.  In the special case where
1353 * the reserve head has wrapped passed the tail, this calculation is no
1354 * longer valid.  In this case, just return 0 which means there is no space
1355 * in the log.  This works for all places where this function is called
1356 * with the reserve head.  Of course, if the write head were to ever
1357 * wrap the tail, we should blow up.  Rather than catch this case here,
1358 * we depend on other ASSERTions in other parts of the code.   XXXmiken
1359 *
1360 * If reservation head is behind the tail, we have a problem. Warn about it,
1361 * but then treat it as if the log is empty.
1362 *
1363 * If the log is shut down, the head and tail may be invalid or out of whack, so
1364 * shortcut invalidity asserts in this case so that we don't trigger them
1365 * falsely.
1366 */
1367STATIC int
1368xlog_space_left(
1369	struct xlog	*log,
1370	atomic64_t	*head)
1371{
 
1372	int		tail_bytes;
1373	int		tail_cycle;
1374	int		head_cycle;
1375	int		head_bytes;
1376
1377	xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1378	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1379	tail_bytes = BBTOB(tail_bytes);
1380	if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1381		return log->l_logsize - (head_bytes - tail_bytes);
1382	if (tail_cycle + 1 < head_cycle)
1383		return 0;
1384
1385	/* Ignore potential inconsistency when shutdown. */
1386	if (xlog_is_shutdown(log))
1387		return log->l_logsize;
1388
1389	if (tail_cycle < head_cycle) {
1390		ASSERT(tail_cycle == (head_cycle - 1));
1391		return tail_bytes - head_bytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1392	}
1393
1394	/*
1395	 * The reservation head is behind the tail. In this case we just want to
1396	 * return the size of the log as the amount of space left.
1397	 */
1398	xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
1399	xfs_alert(log->l_mp, "  tail_cycle = %d, tail_bytes = %d",
1400		  tail_cycle, tail_bytes);
1401	xfs_alert(log->l_mp, "  GH   cycle = %d, GH   bytes = %d",
1402		  head_cycle, head_bytes);
1403	ASSERT(0);
1404	return log->l_logsize;
1405}
1406
1407
1408static void
1409xlog_ioend_work(
1410	struct work_struct	*work)
1411{
1412	struct xlog_in_core     *iclog =
1413		container_of(work, struct xlog_in_core, ic_end_io_work);
1414	struct xlog		*log = iclog->ic_log;
 
1415	int			error;
1416
1417	error = blk_status_to_errno(iclog->ic_bio.bi_status);
1418#ifdef DEBUG
1419	/* treat writes with injected CRC errors as failed */
1420	if (iclog->ic_fail_crc)
1421		error = -EIO;
1422#endif
1423
1424	/*
1425	 * Race to shutdown the filesystem if we see an error.
1426	 */
1427	if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1428		xfs_alert(log->l_mp, "log I/O error %d", error);
1429		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
 
 
 
 
 
 
 
 
1430	}
1431
1432	xlog_state_done_syncing(iclog);
1433	bio_uninit(&iclog->ic_bio);
1434
1435	/*
1436	 * Drop the lock to signal that we are done. Nothing references the
1437	 * iclog after this, so an unmount waiting on this lock can now tear it
1438	 * down safely. As such, it is unsafe to reference the iclog after the
1439	 * unlock as we could race with it being freed.
1440	 */
1441	up(&iclog->ic_sema);
1442}
1443
1444/*
1445 * Return size of each in-core log record buffer.
1446 *
1447 * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1448 *
1449 * If the filesystem blocksize is too large, we may need to choose a
1450 * larger size since the directory code currently logs entire blocks.
1451 */
1452STATIC void
1453xlog_get_iclog_buffer_size(
1454	struct xfs_mount	*mp,
1455	struct xlog		*log)
1456{
1457	if (mp->m_logbufs <= 0)
1458		mp->m_logbufs = XLOG_MAX_ICLOGS;
1459	if (mp->m_logbsize <= 0)
1460		mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
1461
1462	log->l_iclog_bufs = mp->m_logbufs;
1463	log->l_iclog_size = mp->m_logbsize;
1464
1465	/*
1466	 * # headers = size / 32k - one header holds cycles from 32k of data.
1467	 */
1468	log->l_iclog_heads =
1469		DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE);
1470	log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
1471}
1472
1473void
1474xfs_log_work_queue(
1475	struct xfs_mount        *mp)
1476{
1477	queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
1478				msecs_to_jiffies(xfs_syncd_centisecs * 10));
1479}
1480
1481/*
1482 * Clear the log incompat flags if we have the opportunity.
1483 *
1484 * This only happens if we're about to log the second dummy transaction as part
1485 * of covering the log and we can get the log incompat feature usage lock.
1486 */
1487static inline void
1488xlog_clear_incompat(
1489	struct xlog		*log)
1490{
1491	struct xfs_mount	*mp = log->l_mp;
1492
1493	if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
1494				XFS_SB_FEAT_INCOMPAT_LOG_ALL))
1495		return;
1496
1497	if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
1498		return;
1499
1500	if (!down_write_trylock(&log->l_incompat_users))
1501		return;
1502
1503	xfs_clear_incompat_log_features(mp);
1504	up_write(&log->l_incompat_users);
1505}
1506
1507/*
1508 * Every sync period we need to unpin all items in the AIL and push them to
1509 * disk. If there is nothing dirty, then we might need to cover the log to
1510 * indicate that the filesystem is idle.
1511 */
1512static void
1513xfs_log_worker(
1514	struct work_struct	*work)
1515{
1516	struct xlog		*log = container_of(to_delayed_work(work),
1517						struct xlog, l_work);
1518	struct xfs_mount	*mp = log->l_mp;
1519
1520	/* dgc: errors ignored - not fatal and nowhere to report them */
1521	if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
1522		/*
1523		 * Dump a transaction into the log that contains no real change.
1524		 * This is needed to stamp the current tail LSN into the log
1525		 * during the covering operation.
1526		 *
1527		 * We cannot use an inode here for this - that will push dirty
1528		 * state back up into the VFS and then periodic inode flushing
1529		 * will prevent log covering from making progress. Hence we
1530		 * synchronously log the superblock instead to ensure the
1531		 * superblock is immediately unpinned and can be written back.
1532		 */
1533		xlog_clear_incompat(log);
1534		xfs_sync_sb(mp, true);
1535	} else
1536		xfs_log_force(mp, 0);
1537
1538	/* start pushing all the metadata that is currently dirty */
1539	xfs_ail_push_all(mp->m_ail);
1540
1541	/* queue us up again */
1542	xfs_log_work_queue(mp);
1543}
1544
1545/*
1546 * This routine initializes some of the log structure for a given mount point.
1547 * Its primary purpose is to fill in enough, so recovery can occur.  However,
1548 * some other stuff may be filled in too.
1549 */
1550STATIC struct xlog *
1551xlog_alloc_log(
1552	struct xfs_mount	*mp,
1553	struct xfs_buftarg	*log_target,
1554	xfs_daddr_t		blk_offset,
1555	int			num_bblks)
1556{
1557	struct xlog		*log;
1558	xlog_rec_header_t	*head;
1559	xlog_in_core_t		**iclogp;
1560	xlog_in_core_t		*iclog, *prev_iclog=NULL;
1561	int			i;
1562	int			error = -ENOMEM;
1563	uint			log2_size = 0;
1564
1565	log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1566	if (!log) {
1567		xfs_warn(mp, "Log allocation failed: No memory!");
1568		goto out;
1569	}
1570
1571	log->l_mp	   = mp;
1572	log->l_targ	   = log_target;
1573	log->l_logsize     = BBTOB(num_bblks);
1574	log->l_logBBstart  = blk_offset;
1575	log->l_logBBsize   = num_bblks;
1576	log->l_covered_state = XLOG_STATE_COVER_IDLE;
1577	set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
1578	INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1579
1580	log->l_prev_block  = -1;
1581	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1582	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1583	xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1584	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
1585
1586	if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
1587		log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
1588	else
1589		log->l_iclog_roundoff = BBSIZE;
1590
1591	xlog_grant_head_init(&log->l_reserve_head);
1592	xlog_grant_head_init(&log->l_write_head);
1593
1594	error = -EFSCORRUPTED;
1595	if (xfs_has_sector(mp)) {
1596	        log2_size = mp->m_sb.sb_logsectlog;
1597		if (log2_size < BBSHIFT) {
1598			xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1599				log2_size, BBSHIFT);
1600			goto out_free_log;
1601		}
1602
1603	        log2_size -= BBSHIFT;
1604		if (log2_size > mp->m_sectbb_log) {
1605			xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1606				log2_size, mp->m_sectbb_log);
1607			goto out_free_log;
1608		}
1609
1610		/* for larger sector sizes, must have v2 or external log */
1611		if (log2_size && log->l_logBBstart > 0 &&
1612			    !xfs_has_logv2(mp)) {
1613			xfs_warn(mp,
1614		"log sector size (0x%x) invalid for configuration.",
1615				log2_size);
1616			goto out_free_log;
1617		}
1618	}
1619	log->l_sectBBsize = 1 << log2_size;
1620
1621	init_rwsem(&log->l_incompat_users);
1622
1623	xlog_get_iclog_buffer_size(mp, log);
1624
1625	spin_lock_init(&log->l_icloglock);
1626	init_waitqueue_head(&log->l_flush_wait);
1627
1628	iclogp = &log->l_iclog;
1629	/*
1630	 * The amount of memory to allocate for the iclog structure is
1631	 * rather funky due to the way the structure is defined.  It is
1632	 * done this way so that we can use different sizes for machines
1633	 * with different amounts of memory.  See the definition of
1634	 * xlog_in_core_t in xfs_log_priv.h for details.
1635	 */
1636	ASSERT(log->l_iclog_size >= 4096);
1637	for (i = 0; i < log->l_iclog_bufs; i++) {
 
1638		size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1639				sizeof(struct bio_vec);
1640
1641		iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
1642		if (!iclog)
1643			goto out_free_iclog;
1644
1645		*iclogp = iclog;
1646		iclog->ic_prev = prev_iclog;
1647		prev_iclog = iclog;
1648
1649		iclog->ic_data = kvzalloc(log->l_iclog_size,
1650				GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1651		if (!iclog->ic_data)
1652			goto out_free_iclog;
 
 
 
1653		head = &iclog->ic_header;
1654		memset(head, 0, sizeof(xlog_rec_header_t));
1655		head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1656		head->h_version = cpu_to_be32(
1657			xfs_has_logv2(log->l_mp) ? 2 : 1);
1658		head->h_size = cpu_to_be32(log->l_iclog_size);
1659		/* new fields */
1660		head->h_fmt = cpu_to_be32(XLOG_FMT);
1661		memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1662
1663		iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1664		iclog->ic_state = XLOG_STATE_ACTIVE;
1665		iclog->ic_log = log;
1666		atomic_set(&iclog->ic_refcnt, 0);
 
1667		INIT_LIST_HEAD(&iclog->ic_callbacks);
1668		iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
1669
1670		init_waitqueue_head(&iclog->ic_force_wait);
1671		init_waitqueue_head(&iclog->ic_write_wait);
1672		INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1673		sema_init(&iclog->ic_sema, 1);
1674
1675		iclogp = &iclog->ic_next;
1676	}
1677	*iclogp = log->l_iclog;			/* complete ring */
1678	log->l_iclog->ic_prev = prev_iclog;	/* re-write 1st prev ptr */
1679
1680	log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1681			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM |
1682				    WQ_HIGHPRI),
1683			0, mp->m_super->s_id);
1684	if (!log->l_ioend_workqueue)
1685		goto out_free_iclog;
1686
1687	error = xlog_cil_init(log);
1688	if (error)
1689		goto out_destroy_workqueue;
1690	return log;
1691
1692out_destroy_workqueue:
1693	destroy_workqueue(log->l_ioend_workqueue);
1694out_free_iclog:
1695	for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1696		prev_iclog = iclog->ic_next;
1697		kmem_free(iclog->ic_data);
1698		kmem_free(iclog);
1699		if (prev_iclog == log->l_iclog)
1700			break;
1701	}
1702out_free_log:
1703	kmem_free(log);
1704out:
1705	return ERR_PTR(error);
1706}	/* xlog_alloc_log */
1707
 
1708/*
1709 * Compute the LSN that we'd need to push the log tail towards in order to have
1710 * (a) enough on-disk log space to log the number of bytes specified, (b) at
1711 * least 25% of the log space free, and (c) at least 256 blocks free.  If the
1712 * log free space already meets all three thresholds, this function returns
1713 * NULLCOMMITLSN.
1714 */
1715xfs_lsn_t
1716xlog_grant_push_threshold(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1717	struct xlog	*log,
1718	int		need_bytes)
1719{
1720	xfs_lsn_t	threshold_lsn = 0;
1721	xfs_lsn_t	last_sync_lsn;
1722	int		free_blocks;
1723	int		free_bytes;
1724	int		threshold_block;
1725	int		threshold_cycle;
1726	int		free_threshold;
1727
1728	ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1729
1730	free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1731	free_blocks = BTOBBT(free_bytes);
1732
1733	/*
1734	 * Set the threshold for the minimum number of free blocks in the
1735	 * log to the maximum of what the caller needs, one quarter of the
1736	 * log, and 256 blocks.
1737	 */
1738	free_threshold = BTOBB(need_bytes);
1739	free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
1740	free_threshold = max(free_threshold, 256);
1741	if (free_blocks >= free_threshold)
1742		return NULLCOMMITLSN;
1743
1744	xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1745						&threshold_block);
1746	threshold_block += free_threshold;
1747	if (threshold_block >= log->l_logBBsize) {
1748		threshold_block -= log->l_logBBsize;
1749		threshold_cycle += 1;
1750	}
1751	threshold_lsn = xlog_assign_lsn(threshold_cycle,
1752					threshold_block);
1753	/*
1754	 * Don't pass in an lsn greater than the lsn of the last
1755	 * log record known to be on disk. Use a snapshot of the last sync lsn
1756	 * so that it doesn't change between the compare and the set.
1757	 */
1758	last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1759	if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1760		threshold_lsn = last_sync_lsn;
1761
1762	return threshold_lsn;
1763}
1764
1765/*
1766 * Push the tail of the log if we need to do so to maintain the free log space
1767 * thresholds set out by xlog_grant_push_threshold.  We may need to adopt a
1768 * policy which pushes on an lsn which is further along in the log once we
1769 * reach the high water mark.  In this manner, we would be creating a low water
1770 * mark.
1771 */
1772STATIC void
1773xlog_grant_push_ail(
1774	struct xlog	*log,
1775	int		need_bytes)
1776{
1777	xfs_lsn_t	threshold_lsn;
1778
1779	threshold_lsn = xlog_grant_push_threshold(log, need_bytes);
1780	if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log))
1781		return;
1782
1783	/*
1784	 * Get the transaction layer to kick the dirty buffers out to
1785	 * disk asynchronously. No point in trying to do this if
1786	 * the filesystem is shutting down.
1787	 */
1788	xfs_ail_push(log->l_ailp, threshold_lsn);
 
1789}
1790
1791/*
1792 * Stamp cycle number in every block
1793 */
1794STATIC void
1795xlog_pack_data(
1796	struct xlog		*log,
1797	struct xlog_in_core	*iclog,
1798	int			roundoff)
1799{
1800	int			i, j, k;
1801	int			size = iclog->ic_offset + roundoff;
1802	__be32			cycle_lsn;
1803	char			*dp;
1804
1805	cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1806
1807	dp = iclog->ic_datap;
1808	for (i = 0; i < BTOBB(size); i++) {
1809		if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
1810			break;
1811		iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1812		*(__be32 *)dp = cycle_lsn;
1813		dp += BBSIZE;
1814	}
1815
1816	if (xfs_has_logv2(log->l_mp)) {
1817		xlog_in_core_2_t *xhdr = iclog->ic_data;
1818
1819		for ( ; i < BTOBB(size); i++) {
1820			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1821			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1822			xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
1823			*(__be32 *)dp = cycle_lsn;
1824			dp += BBSIZE;
1825		}
1826
1827		for (i = 1; i < log->l_iclog_heads; i++)
1828			xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
1829	}
1830}
1831
1832/*
1833 * Calculate the checksum for a log buffer.
1834 *
1835 * This is a little more complicated than it should be because the various
1836 * headers and the actual data are non-contiguous.
1837 */
1838__le32
1839xlog_cksum(
1840	struct xlog		*log,
1841	struct xlog_rec_header	*rhead,
1842	char			*dp,
1843	int			size)
1844{
1845	uint32_t		crc;
1846
1847	/* first generate the crc for the record header ... */
1848	crc = xfs_start_cksum_update((char *)rhead,
1849			      sizeof(struct xlog_rec_header),
1850			      offsetof(struct xlog_rec_header, h_crc));
1851
1852	/* ... then for additional cycle data for v2 logs ... */
1853	if (xfs_has_logv2(log->l_mp)) {
1854		union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
1855		int		i;
1856		int		xheads;
1857
1858		xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE);
 
 
1859
1860		for (i = 1; i < xheads; i++) {
1861			crc = crc32c(crc, &xhdr[i].hic_xheader,
1862				     sizeof(struct xlog_rec_ext_header));
1863		}
1864	}
1865
1866	/* ... and finally for the payload */
1867	crc = crc32c(crc, dp, size);
1868
1869	return xfs_end_cksum(crc);
1870}
1871
1872static void
1873xlog_bio_end_io(
1874	struct bio		*bio)
1875{
1876	struct xlog_in_core	*iclog = bio->bi_private;
1877
1878	queue_work(iclog->ic_log->l_ioend_workqueue,
1879		   &iclog->ic_end_io_work);
1880}
1881
1882static int
1883xlog_map_iclog_data(
1884	struct bio		*bio,
1885	void			*data,
1886	size_t			count)
1887{
1888	do {
1889		struct page	*page = kmem_to_page(data);
1890		unsigned int	off = offset_in_page(data);
1891		size_t		len = min_t(size_t, count, PAGE_SIZE - off);
1892
1893		if (bio_add_page(bio, page, len, off) != len)
1894			return -EIO;
1895
1896		data += len;
1897		count -= len;
1898	} while (count);
1899
1900	return 0;
1901}
1902
1903STATIC void
1904xlog_write_iclog(
1905	struct xlog		*log,
1906	struct xlog_in_core	*iclog,
1907	uint64_t		bno,
1908	unsigned int		count)
 
1909{
1910	ASSERT(bno < log->l_logBBsize);
1911	trace_xlog_iclog_write(iclog, _RET_IP_);
1912
1913	/*
1914	 * We lock the iclogbufs here so that we can serialise against I/O
1915	 * completion during unmount.  We might be processing a shutdown
1916	 * triggered during unmount, and that can occur asynchronously to the
1917	 * unmount thread, and hence we need to ensure that completes before
1918	 * tearing down the iclogbufs.  Hence we need to hold the buffer lock
1919	 * across the log IO to archieve that.
1920	 */
1921	down(&iclog->ic_sema);
1922	if (xlog_is_shutdown(log)) {
1923		/*
1924		 * It would seem logical to return EIO here, but we rely on
1925		 * the log state machine to propagate I/O errors instead of
1926		 * doing it here.  We kick of the state machine and unlock
1927		 * the buffer manually, the code needs to be kept in sync
1928		 * with the I/O completion path.
1929		 */
1930		xlog_state_done_syncing(iclog);
1931		up(&iclog->ic_sema);
1932		return;
1933	}
1934
1935	/*
1936	 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
1937	 * IOs coming immediately after this one. This prevents the block layer
1938	 * writeback throttle from throttling log writes behind background
1939	 * metadata writeback and causing priority inversions.
1940	 */
1941	bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
1942		 howmany(count, PAGE_SIZE),
1943		 REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
1944	iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1945	iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1946	iclog->ic_bio.bi_private = iclog;
1947
1948	if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
1949		iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1950		/*
1951		 * For external log devices, we also need to flush the data
1952		 * device cache first to ensure all metadata writeback covered
1953		 * by the LSN in this iclog is on stable storage. This is slow,
1954		 * but it *must* complete before we issue the external log IO.
1955		 *
1956		 * If the flush fails, we cannot conclude that past metadata
1957		 * writeback from the log succeeded.  Repeating the flush is
1958		 * not possible, hence we must shut down with log IO error to
1959		 * avoid shutdown re-entering this path and erroring out again.
1960		 */
1961		if (log->l_targ != log->l_mp->m_ddev_targp &&
1962		    blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) {
1963			xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1964			return;
1965		}
1966	}
1967	if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
1968		iclog->ic_bio.bi_opf |= REQ_FUA;
1969
1970	iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
1971
1972	if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
1973		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1974		return;
1975	}
1976	if (is_vmalloc_addr(iclog->ic_data))
1977		flush_kernel_vmap_range(iclog->ic_data, count);
1978
1979	/*
1980	 * If this log buffer would straddle the end of the log we will have
1981	 * to split it up into two bios, so that we can continue at the start.
1982	 */
1983	if (bno + BTOBB(count) > log->l_logBBsize) {
1984		struct bio *split;
1985
1986		split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1987				  GFP_NOIO, &fs_bio_set);
1988		bio_chain(split, &iclog->ic_bio);
1989		submit_bio(split);
1990
1991		/* restart at logical offset zero for the remainder */
1992		iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1993	}
1994
1995	submit_bio(&iclog->ic_bio);
1996}
1997
1998/*
1999 * We need to bump cycle number for the part of the iclog that is
2000 * written to the start of the log. Watch out for the header magic
2001 * number case, though.
2002 */
2003static void
2004xlog_split_iclog(
2005	struct xlog		*log,
2006	void			*data,
2007	uint64_t		bno,
2008	unsigned int		count)
2009{
2010	unsigned int		split_offset = BBTOB(log->l_logBBsize - bno);
2011	unsigned int		i;
2012
2013	for (i = split_offset; i < count; i += BBSIZE) {
2014		uint32_t cycle = get_unaligned_be32(data + i);
2015
2016		if (++cycle == XLOG_HEADER_MAGIC_NUM)
2017			cycle++;
2018		put_unaligned_be32(cycle, data + i);
2019	}
2020}
2021
2022static int
2023xlog_calc_iclog_size(
2024	struct xlog		*log,
2025	struct xlog_in_core	*iclog,
2026	uint32_t		*roundoff)
2027{
2028	uint32_t		count_init, count;
 
 
 
 
2029
2030	/* Add for LR header */
2031	count_init = log->l_iclog_hsize + iclog->ic_offset;
2032	count = roundup(count_init, log->l_iclog_roundoff);
2033
2034	*roundoff = count - count_init;
 
 
 
 
 
 
2035
2036	ASSERT(count >= count_init);
2037	ASSERT(*roundoff < log->l_iclog_roundoff);
 
 
 
 
 
2038	return count;
2039}
2040
2041/*
2042 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
2043 * fashion.  Previously, we should have moved the current iclog
2044 * ptr in the log to point to the next available iclog.  This allows further
2045 * write to continue while this code syncs out an iclog ready to go.
2046 * Before an in-core log can be written out, the data section must be scanned
2047 * to save away the 1st word of each BBSIZE block into the header.  We replace
2048 * it with the current cycle count.  Each BBSIZE block is tagged with the
2049 * cycle count because there in an implicit assumption that drives will
2050 * guarantee that entire 512 byte blocks get written at once.  In other words,
2051 * we can't have part of a 512 byte block written and part not written.  By
2052 * tagging each block, we will know which blocks are valid when recovering
2053 * after an unclean shutdown.
2054 *
2055 * This routine is single threaded on the iclog.  No other thread can be in
2056 * this routine with the same iclog.  Changing contents of iclog can there-
2057 * fore be done without grabbing the state machine lock.  Updating the global
2058 * log will require grabbing the lock though.
2059 *
2060 * The entire log manager uses a logical block numbering scheme.  Only
2061 * xlog_write_iclog knows about the fact that the log may not start with
2062 * block zero on a given device.
2063 */
2064STATIC void
2065xlog_sync(
2066	struct xlog		*log,
2067	struct xlog_in_core	*iclog,
2068	struct xlog_ticket	*ticket)
2069{
2070	unsigned int		count;		/* byte count of bwrite */
2071	unsigned int		roundoff;       /* roundoff to BB or stripe */
2072	uint64_t		bno;
2073	unsigned int		size;
 
2074
2075	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2076	trace_xlog_iclog_sync(iclog, _RET_IP_);
2077
2078	count = xlog_calc_iclog_size(log, iclog, &roundoff);
2079
2080	/*
2081	 * If we have a ticket, account for the roundoff via the ticket
2082	 * reservation to avoid touching the hot grant heads needlessly.
2083	 * Otherwise, we have to move grant heads directly.
2084	 */
2085	if (ticket) {
2086		ticket->t_curr_res -= roundoff;
2087	} else {
2088		xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
2089		xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
2090	}
2091
2092	/* put cycle number in every block */
2093	xlog_pack_data(log, iclog, roundoff);
2094
2095	/* real byte length */
2096	size = iclog->ic_offset;
2097	if (xfs_has_logv2(log->l_mp))
2098		size += roundoff;
2099	iclog->ic_header.h_len = cpu_to_be32(size);
2100
2101	XFS_STATS_INC(log->l_mp, xs_log_writes);
2102	XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
2103
2104	bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
2105
2106	/* Do we need to split this write into 2 parts? */
2107	if (bno + BTOBB(count) > log->l_logBBsize)
2108		xlog_split_iclog(log, &iclog->ic_header, bno, count);
 
 
2109
2110	/* calculcate the checksum */
2111	iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
2112					    iclog->ic_datap, size);
2113	/*
2114	 * Intentionally corrupt the log record CRC based on the error injection
2115	 * frequency, if defined. This facilitates testing log recovery in the
2116	 * event of torn writes. Hence, set the IOABORT state to abort the log
2117	 * write on I/O completion and shutdown the fs. The subsequent mount
2118	 * detects the bad CRC and attempts to recover.
2119	 */
2120#ifdef DEBUG
2121	if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
2122		iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
2123		iclog->ic_fail_crc = true;
2124		xfs_warn(log->l_mp,
2125	"Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
2126			 be64_to_cpu(iclog->ic_header.h_lsn));
2127	}
2128#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2129	xlog_verify_iclog(log, iclog, count);
2130	xlog_write_iclog(log, iclog, bno, count);
2131}
2132
2133/*
2134 * Deallocate a log structure
2135 */
2136STATIC void
2137xlog_dealloc_log(
2138	struct xlog	*log)
2139{
2140	xlog_in_core_t	*iclog, *next_iclog;
2141	int		i;
2142
 
 
2143	/*
2144	 * Destroy the CIL after waiting for iclog IO completion because an
2145	 * iclog EIO error will try to shut down the log, which accesses the
2146	 * CIL to wake up the waiters.
2147	 */
2148	xlog_cil_destroy(log);
 
 
 
 
 
2149
2150	iclog = log->l_iclog;
2151	for (i = 0; i < log->l_iclog_bufs; i++) {
2152		next_iclog = iclog->ic_next;
2153		kmem_free(iclog->ic_data);
2154		kmem_free(iclog);
2155		iclog = next_iclog;
2156	}
2157
2158	log->l_mp->m_log = NULL;
2159	destroy_workqueue(log->l_ioend_workqueue);
2160	kmem_free(log);
2161}
2162
2163/*
2164 * Update counters atomically now that memcpy is done.
2165 */
 
2166static inline void
2167xlog_state_finish_copy(
2168	struct xlog		*log,
2169	struct xlog_in_core	*iclog,
2170	int			record_cnt,
2171	int			copy_bytes)
2172{
2173	lockdep_assert_held(&log->l_icloglock);
2174
2175	be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
2176	iclog->ic_offset += copy_bytes;
2177}
 
 
 
 
 
2178
2179/*
2180 * print out info relating to regions written which consume
2181 * the reservation
2182 */
2183void
2184xlog_print_tic_res(
2185	struct xfs_mount	*mp,
2186	struct xlog_ticket	*ticket)
2187{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2188	xfs_warn(mp, "ticket reservation summary:");
2189	xfs_warn(mp, "  unit res    = %d bytes", ticket->t_unit_res);
2190	xfs_warn(mp, "  current res = %d bytes", ticket->t_curr_res);
2191	xfs_warn(mp, "  original count  = %d", ticket->t_ocnt);
2192	xfs_warn(mp, "  remaining count = %d", ticket->t_cnt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2193}
2194
2195/*
2196 * Print a summary of the transaction.
2197 */
2198void
2199xlog_print_trans(
2200	struct xfs_trans	*tp)
2201{
2202	struct xfs_mount	*mp = tp->t_mountp;
2203	struct xfs_log_item	*lip;
2204
2205	/* dump core transaction and ticket info */
2206	xfs_warn(mp, "transaction summary:");
2207	xfs_warn(mp, "  log res   = %d", tp->t_log_res);
2208	xfs_warn(mp, "  log count = %d", tp->t_log_count);
2209	xfs_warn(mp, "  flags     = 0x%x", tp->t_flags);
2210
2211	xlog_print_tic_res(mp, tp->t_ticket);
2212
2213	/* dump each log item */
2214	list_for_each_entry(lip, &tp->t_items, li_trans) {
2215		struct xfs_log_vec	*lv = lip->li_lv;
2216		struct xfs_log_iovec	*vec;
2217		int			i;
2218
2219		xfs_warn(mp, "log item: ");
2220		xfs_warn(mp, "  type	= 0x%x", lip->li_type);
2221		xfs_warn(mp, "  flags	= 0x%lx", lip->li_flags);
2222		if (!lv)
2223			continue;
2224		xfs_warn(mp, "  niovecs	= %d", lv->lv_niovecs);
2225		xfs_warn(mp, "  size	= %d", lv->lv_size);
2226		xfs_warn(mp, "  bytes	= %d", lv->lv_bytes);
2227		xfs_warn(mp, "  buf len	= %d", lv->lv_buf_len);
2228
2229		/* dump each iovec for the log item */
2230		vec = lv->lv_iovecp;
2231		for (i = 0; i < lv->lv_niovecs; i++) {
2232			int dumplen = min(vec->i_len, 32);
2233
2234			xfs_warn(mp, "  iovec[%d]", i);
2235			xfs_warn(mp, "    type	= 0x%x", vec->i_type);
2236			xfs_warn(mp, "    len	= %d", vec->i_len);
2237			xfs_warn(mp, "    first %d bytes of iovec[%d]:", dumplen, i);
2238			xfs_hex_dump(vec->i_addr, dumplen);
2239
2240			vec++;
2241		}
2242	}
2243}
2244
2245static inline void
2246xlog_write_iovec(
2247	struct xlog_in_core	*iclog,
2248	uint32_t		*log_offset,
2249	void			*data,
2250	uint32_t		write_len,
2251	int			*bytes_left,
2252	uint32_t		*record_cnt,
2253	uint32_t		*data_cnt)
2254{
2255	ASSERT(*log_offset < iclog->ic_log->l_iclog_size);
2256	ASSERT(*log_offset % sizeof(int32_t) == 0);
2257	ASSERT(write_len % sizeof(int32_t) == 0);
2258
2259	memcpy(iclog->ic_datap + *log_offset, data, write_len);
2260	*log_offset += write_len;
2261	*bytes_left -= write_len;
2262	(*record_cnt)++;
2263	*data_cnt += write_len;
2264}
2265
2266/*
2267 * Write log vectors into a single iclog which is guaranteed by the caller
2268 * to have enough space to write the entire log vector into.
2269 */
2270static void
2271xlog_write_full(
2272	struct xfs_log_vec	*lv,
2273	struct xlog_ticket	*ticket,
2274	struct xlog_in_core	*iclog,
2275	uint32_t		*log_offset,
2276	uint32_t		*len,
2277	uint32_t		*record_cnt,
2278	uint32_t		*data_cnt)
2279{
2280	int			index;
 
 
 
2281
2282	ASSERT(*log_offset + *len <= iclog->ic_size ||
2283		iclog->ic_state == XLOG_STATE_WANT_SYNC);
 
 
 
 
 
 
2284
2285	/*
2286	 * Ordered log vectors have no regions to write so this
2287	 * loop will naturally skip them.
2288	 */
2289	for (index = 0; index < lv->lv_niovecs; index++) {
2290		struct xfs_log_iovec	*reg = &lv->lv_iovecp[index];
2291		struct xlog_op_header	*ophdr = reg->i_addr;
2292
2293		ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2294		xlog_write_iovec(iclog, log_offset, reg->i_addr,
2295				reg->i_len, len, record_cnt, data_cnt);
2296	}
2297}
2298
2299static int
2300xlog_write_get_more_iclog_space(
2301	struct xlog_ticket	*ticket,
2302	struct xlog_in_core	**iclogp,
2303	uint32_t		*log_offset,
2304	uint32_t		len,
2305	uint32_t		*record_cnt,
2306	uint32_t		*data_cnt)
2307{
2308	struct xlog_in_core	*iclog = *iclogp;
2309	struct xlog		*log = iclog->ic_log;
2310	int			error;
2311
2312	spin_lock(&log->l_icloglock);
2313	ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
2314	xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2315	error = xlog_state_release_iclog(log, iclog, ticket);
2316	spin_unlock(&log->l_icloglock);
2317	if (error)
2318		return error;
2319
2320	error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2321					log_offset);
2322	if (error)
2323		return error;
2324	*record_cnt = 0;
2325	*data_cnt = 0;
2326	*iclogp = iclog;
2327	return 0;
2328}
2329
2330/*
2331 * Write log vectors into a single iclog which is smaller than the current chain
2332 * length. We write until we cannot fit a full record into the remaining space
2333 * and then stop. We return the log vector that is to be written that cannot
2334 * wholly fit in the iclog.
2335 */
2336static int
2337xlog_write_partial(
2338	struct xfs_log_vec	*lv,
2339	struct xlog_ticket	*ticket,
2340	struct xlog_in_core	**iclogp,
2341	uint32_t		*log_offset,
2342	uint32_t		*len,
2343	uint32_t		*record_cnt,
2344	uint32_t		*data_cnt)
2345{
2346	struct xlog_in_core	*iclog = *iclogp;
2347	struct xlog_op_header	*ophdr;
2348	int			index = 0;
2349	uint32_t		rlen;
2350	int			error;
2351
2352	/* walk the logvec, copying until we run out of space in the iclog */
2353	for (index = 0; index < lv->lv_niovecs; index++) {
2354		struct xfs_log_iovec	*reg = &lv->lv_iovecp[index];
2355		uint32_t		reg_offset = 0;
 
2356
2357		/*
2358		 * The first region of a continuation must have a non-zero
2359		 * length otherwise log recovery will just skip over it and
2360		 * start recovering from the next opheader it finds. Because we
2361		 * mark the next opheader as a continuation, recovery will then
2362		 * incorrectly add the continuation to the previous region and
2363		 * that breaks stuff.
2364		 *
2365		 * Hence if there isn't space for region data after the
2366		 * opheader, then we need to start afresh with a new iclog.
2367		 */
2368		if (iclog->ic_size - *log_offset <=
2369					sizeof(struct xlog_op_header)) {
2370			error = xlog_write_get_more_iclog_space(ticket,
2371					&iclog, log_offset, *len, record_cnt,
2372					data_cnt);
2373			if (error)
2374				return error;
2375		}
2376
2377		ophdr = reg->i_addr;
2378		rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset);
2379
2380		ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2381		ophdr->oh_len = cpu_to_be32(rlen - sizeof(struct xlog_op_header));
2382		if (rlen != reg->i_len)
2383			ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2384
2385		xlog_write_iovec(iclog, log_offset, reg->i_addr,
2386				rlen, len, record_cnt, data_cnt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2387
2388		/* If we wrote the whole region, move to the next. */
2389		if (rlen == reg->i_len)
2390			continue;
2391
2392		/*
2393		 * We now have a partially written iovec, but it can span
2394		 * multiple iclogs so we loop here. First we release the iclog
2395		 * we currently have, then we get a new iclog and add a new
2396		 * opheader. Then we continue copying from where we were until
2397		 * we either complete the iovec or fill the iclog. If we
2398		 * complete the iovec, then we increment the index and go right
2399		 * back to the top of the outer loop. if we fill the iclog, we
2400		 * run the inner loop again.
2401		 *
2402		 * This is complicated by the tail of a region using all the
2403		 * space in an iclog and hence requiring us to release the iclog
2404		 * and get a new one before returning to the outer loop. We must
2405		 * always guarantee that we exit this inner loop with at least
2406		 * space for log transaction opheaders left in the current
2407		 * iclog, hence we cannot just terminate the loop at the end
2408		 * of the of the continuation. So we loop while there is no
2409		 * space left in the current iclog, and check for the end of the
2410		 * continuation after getting a new iclog.
2411		 */
2412		do {
2413			/*
2414			 * Ensure we include the continuation opheader in the
2415			 * space we need in the new iclog by adding that size
2416			 * to the length we require. This continuation opheader
2417			 * needs to be accounted to the ticket as the space it
2418			 * consumes hasn't been accounted to the lv we are
2419			 * writing.
2420			 */
2421			error = xlog_write_get_more_iclog_space(ticket,
2422					&iclog, log_offset,
2423					*len + sizeof(struct xlog_op_header),
2424					record_cnt, data_cnt);
2425			if (error)
2426				return error;
2427
2428			ophdr = iclog->ic_datap + *log_offset;
2429			ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2430			ophdr->oh_clientid = XFS_TRANSACTION;
2431			ophdr->oh_res2 = 0;
2432			ophdr->oh_flags = XLOG_WAS_CONT_TRANS;
2433
2434			ticket->t_curr_res -= sizeof(struct xlog_op_header);
2435			*log_offset += sizeof(struct xlog_op_header);
2436			*data_cnt += sizeof(struct xlog_op_header);
 
 
 
2437
2438			/*
2439			 * If rlen fits in the iclog, then end the region
2440			 * continuation. Otherwise we're going around again.
2441			 */
2442			reg_offset += rlen;
2443			rlen = reg->i_len - reg_offset;
2444			if (rlen <= iclog->ic_size - *log_offset)
2445				ophdr->oh_flags |= XLOG_END_TRANS;
2446			else
2447				ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2448
2449			rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset);
2450			ophdr->oh_len = cpu_to_be32(rlen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2451
2452			xlog_write_iovec(iclog, log_offset,
2453					reg->i_addr + reg_offset,
2454					rlen, len, record_cnt, data_cnt);
2455
2456		} while (ophdr->oh_flags & XLOG_CONTINUE_TRANS);
 
 
 
2457	}
2458
2459	/*
2460	 * No more iovecs remain in this logvec so return the next log vec to
2461	 * the caller so it can go back to fast path copying.
2462	 */
2463	*iclogp = iclog;
2464	return 0;
2465}
2466
2467/*
2468 * Write some region out to in-core log
2469 *
2470 * This will be called when writing externally provided regions or when
2471 * writing out a commit record for a given transaction.
2472 *
2473 * General algorithm:
2474 *	1. Find total length of this write.  This may include adding to the
2475 *		lengths passed in.
2476 *	2. Check whether we violate the tickets reservation.
2477 *	3. While writing to this iclog
2478 *	    A. Reserve as much space in this iclog as can get
2479 *	    B. If this is first write, save away start lsn
2480 *	    C. While writing this region:
2481 *		1. If first write of transaction, write start record
2482 *		2. Write log operation header (header per region)
2483 *		3. Find out if we can fit entire region into this iclog
2484 *		4. Potentially, verify destination memcpy ptr
2485 *		5. Memcpy (partial) region
2486 *		6. If partial copy, release iclog; otherwise, continue
2487 *			copying more regions into current iclog
2488 *	4. Mark want sync bit (in simulation mode)
2489 *	5. Release iclog for potential flush to on-disk log.
2490 *
2491 * ERRORS:
2492 * 1.	Panic if reservation is overrun.  This should never happen since
2493 *	reservation amounts are generated internal to the filesystem.
2494 * NOTES:
2495 * 1. Tickets are single threaded data structures.
2496 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2497 *	syncing routine.  When a single log_write region needs to span
2498 *	multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2499 *	on all log operation writes which don't contain the end of the
2500 *	region.  The XLOG_END_TRANS bit is used for the in-core log
2501 *	operation which contains the end of the continued log_write region.
2502 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2503 *	we don't really know exactly how much space will be used.  As a result,
2504 *	we don't update ic_offset until the end when we know exactly how many
2505 *	bytes have been written out.
2506 */
2507int
2508xlog_write(
2509	struct xlog		*log,
2510	struct xfs_cil_ctx	*ctx,
2511	struct list_head	*lv_chain,
2512	struct xlog_ticket	*ticket,
2513	uint32_t		len)
2514
 
2515{
2516	struct xlog_in_core	*iclog = NULL;
 
2517	struct xfs_log_vec	*lv;
2518	uint32_t		record_cnt = 0;
2519	uint32_t		data_cnt = 0;
2520	int			error = 0;
2521	int			log_offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2522
2523	if (ticket->t_curr_res < 0) {
2524		xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2525		     "ctx ticket reservation ran out. Need to up reservation");
2526		xlog_print_tic_res(log->l_mp, ticket);
2527		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
2528	}
2529
2530	error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2531					   &log_offset);
2532	if (error)
2533		return error;
 
 
2534
2535	ASSERT(log_offset <= iclog->ic_size - 1);
 
 
 
2536
2537	/*
2538	 * If we have a context pointer, pass it the first iclog we are
2539	 * writing to so it can record state needed for iclog write
2540	 * ordering.
2541	 */
2542	if (ctx)
2543		xlog_cil_set_ctx_write_state(ctx, iclog);
2544
2545	list_for_each_entry(lv, lv_chain, lv_list) {
2546		/*
2547		 * If the entire log vec does not fit in the iclog, punt it to
2548		 * the partial copy loop which can handle this case.
2549		 */
2550		if (lv->lv_niovecs &&
2551		    lv->lv_bytes > iclog->ic_size - log_offset) {
2552			error = xlog_write_partial(lv, ticket, &iclog,
2553					&log_offset, &len, &record_cnt,
2554					&data_cnt);
2555			if (error) {
2556				/*
2557				 * We have no iclog to release, so just return
2558				 * the error immediately.
2559				 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2560				return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2561			}
2562		} else {
2563			xlog_write_full(lv, ticket, iclog, &log_offset,
2564					 &len, &record_cnt, &data_cnt);
2565		}
2566	}
 
2567	ASSERT(len == 0);
2568
2569	/*
2570	 * We've already been guaranteed that the last writes will fit inside
2571	 * the current iclog, and hence it will already have the space used by
2572	 * those writes accounted to it. Hence we do not need to update the
2573	 * iclog with the number of bytes written here.
2574	 */
2575	spin_lock(&log->l_icloglock);
2576	xlog_state_finish_copy(log, iclog, record_cnt, 0);
2577	error = xlog_state_release_iclog(log, iclog, ticket);
2578	spin_unlock(&log->l_icloglock);
2579
2580	return error;
 
 
2581}
2582
2583static void
2584xlog_state_activate_iclog(
2585	struct xlog_in_core	*iclog,
2586	int			*iclogs_changed)
2587{
2588	ASSERT(list_empty_careful(&iclog->ic_callbacks));
2589	trace_xlog_iclog_activate(iclog, _RET_IP_);
2590
2591	/*
2592	 * If the number of ops in this iclog indicate it just contains the
2593	 * dummy transaction, we can change state into IDLE (the second time
2594	 * around). Otherwise we should change the state into NEED a dummy.
2595	 * We don't need to cover the dummy.
2596	 */
2597	if (*iclogs_changed == 0 &&
2598	    iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2599		*iclogs_changed = 1;
2600	} else {
2601		/*
2602		 * We have two dirty iclogs so start over.  This could also be
2603		 * num of ops indicating this is not the dummy going out.
2604		 */
2605		*iclogs_changed = 2;
2606	}
2607
2608	iclog->ic_state	= XLOG_STATE_ACTIVE;
2609	iclog->ic_offset = 0;
2610	iclog->ic_header.h_num_logops = 0;
2611	memset(iclog->ic_header.h_cycle_data, 0,
2612		sizeof(iclog->ic_header.h_cycle_data));
2613	iclog->ic_header.h_lsn = 0;
2614	iclog->ic_header.h_tail_lsn = 0;
2615}
2616
2617/*
2618 * Loop through all iclogs and mark all iclogs currently marked DIRTY as
2619 * ACTIVE after iclog I/O has completed.
 
 
 
 
 
 
 
 
 
 
 
 
2620 */
2621static void
2622xlog_state_activate_iclogs(
2623	struct xlog		*log,
2624	int			*iclogs_changed)
2625{
2626	struct xlog_in_core	*iclog = log->l_iclog;
 
 
 
 
 
2627
 
 
2628	do {
2629		if (iclog->ic_state == XLOG_STATE_DIRTY)
2630			xlog_state_activate_iclog(iclog, iclogs_changed);
2631		/*
2632		 * The ordering of marking iclogs ACTIVE must be maintained, so
2633		 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2634		 */
2635		else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2636			break;
2637	} while ((iclog = iclog->ic_next) != log->l_iclog);
2638}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2639
2640static int
2641xlog_covered_state(
2642	int			prev_state,
2643	int			iclogs_changed)
2644{
2645	/*
2646	 * We go to NEED for any non-covering writes. We go to NEED2 if we just
2647	 * wrote the first covering record (DONE). We go to IDLE if we just
2648	 * wrote the second covering record (DONE2) and remain in IDLE until a
2649	 * non-covering write occurs.
2650	 */
2651	switch (prev_state) {
2652	case XLOG_STATE_COVER_IDLE:
2653		if (iclogs_changed == 1)
2654			return XLOG_STATE_COVER_IDLE;
2655		fallthrough;
2656	case XLOG_STATE_COVER_NEED:
2657	case XLOG_STATE_COVER_NEED2:
2658		break;
2659	case XLOG_STATE_COVER_DONE:
2660		if (iclogs_changed == 1)
2661			return XLOG_STATE_COVER_NEED2;
2662		break;
2663	case XLOG_STATE_COVER_DONE2:
2664		if (iclogs_changed == 1)
2665			return XLOG_STATE_COVER_IDLE;
2666		break;
2667	default:
2668		ASSERT(0);
2669	}
2670
2671	return XLOG_STATE_COVER_NEED;
2672}
2673
2674STATIC void
2675xlog_state_clean_iclog(
2676	struct xlog		*log,
2677	struct xlog_in_core	*dirty_iclog)
2678{
2679	int			iclogs_changed = 0;
2680
2681	trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
 
 
 
 
 
 
 
 
 
 
 
 
 
2682
2683	dirty_iclog->ic_state = XLOG_STATE_DIRTY;
 
 
 
 
 
2684
2685	xlog_state_activate_iclogs(log, &iclogs_changed);
2686	wake_up_all(&dirty_iclog->ic_force_wait);
 
 
 
 
2687
2688	if (iclogs_changed) {
2689		log->l_covered_state = xlog_covered_state(log->l_covered_state,
2690				iclogs_changed);
2691	}
2692}
2693
2694STATIC xfs_lsn_t
2695xlog_get_lowest_lsn(
2696	struct xlog		*log)
2697{
2698	struct xlog_in_core	*iclog = log->l_iclog;
2699	xfs_lsn_t		lowest_lsn = 0, lsn;
2700
2701	do {
2702		if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2703		    iclog->ic_state == XLOG_STATE_DIRTY)
2704			continue;
2705
2706		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2707		if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
2708			lowest_lsn = lsn;
2709	} while ((iclog = iclog->ic_next) != log->l_iclog);
2710
2711	return lowest_lsn;
2712}
2713
2714/*
2715 * Completion of a iclog IO does not imply that a transaction has completed, as
2716 * transactions can be large enough to span many iclogs. We cannot change the
2717 * tail of the log half way through a transaction as this may be the only
2718 * transaction in the log and moving the tail to point to the middle of it
2719 * will prevent recovery from finding the start of the transaction. Hence we
2720 * should only update the last_sync_lsn if this iclog contains transaction
2721 * completion callbacks on it.
2722 *
2723 * We have to do this before we drop the icloglock to ensure we are the only one
2724 * that can update it.
2725 *
2726 * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
2727 * the reservation grant head pushing. This is due to the fact that the push
2728 * target is bound by the current last_sync_lsn value. Hence if we have a large
2729 * amount of log space bound up in this committing transaction then the
2730 * last_sync_lsn value may be the limiting factor preventing tail pushing from
2731 * freeing space in the log. Hence once we've updated the last_sync_lsn we
2732 * should push the AIL to ensure the push target (and hence the grant head) is
2733 * no longer bound by the old log head location and can move forwards and make
2734 * progress again.
2735 */
2736static void
2737xlog_state_set_callback(
2738	struct xlog		*log,
2739	struct xlog_in_core	*iclog,
2740	xfs_lsn_t		header_lsn)
2741{
2742	trace_xlog_iclog_callback(iclog, _RET_IP_);
2743	iclog->ic_state = XLOG_STATE_CALLBACK;
2744
2745	ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2746			   header_lsn) <= 0);
2747
2748	if (list_empty_careful(&iclog->ic_callbacks))
2749		return;
2750
2751	atomic64_set(&log->l_last_sync_lsn, header_lsn);
2752	xlog_grant_push_ail(log, 0);
2753}
2754
2755/*
2756 * Return true if we need to stop processing, false to continue to the next
2757 * iclog. The caller will need to run callbacks if the iclog is returned in the
2758 * XLOG_STATE_CALLBACK state.
2759 */
2760static bool
2761xlog_state_iodone_process_iclog(
2762	struct xlog		*log,
2763	struct xlog_in_core	*iclog)
 
 
2764{
2765	xfs_lsn_t		lowest_lsn;
2766	xfs_lsn_t		header_lsn;
2767
2768	switch (iclog->ic_state) {
2769	case XLOG_STATE_ACTIVE:
2770	case XLOG_STATE_DIRTY:
2771		/*
2772		 * Skip all iclogs in the ACTIVE & DIRTY states:
2773		 */
2774		return false;
2775	case XLOG_STATE_DONE_SYNC:
2776		/*
2777		 * Now that we have an iclog that is in the DONE_SYNC state, do
2778		 * one more check here to see if we have chased our tail around.
2779		 * If this is not the lowest lsn iclog, then we will leave it
2780		 * for another completion to process.
2781		 */
2782		header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2783		lowest_lsn = xlog_get_lowest_lsn(log);
2784		if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
2785			return false;
2786		xlog_state_set_callback(log, iclog, header_lsn);
2787		return false;
2788	default:
2789		/*
2790		 * Can only perform callbacks in order.  Since this iclog is not
2791		 * in the DONE_SYNC state, we skip the rest and just try to
2792		 * clean up.
2793		 */
 
 
 
 
 
 
 
 
 
2794		return true;
2795	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2796}
2797
2798/*
2799 * Loop over all the iclogs, running attached callbacks on them. Return true if
2800 * we ran any callbacks, indicating that we dropped the icloglock. We don't need
2801 * to handle transient shutdown state here at all because
2802 * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
2803 * cleanup of the callbacks.
 
 
2804 */
2805static bool
2806xlog_state_do_iclog_callbacks(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2807	struct xlog		*log)
2808		__releases(&log->l_icloglock)
2809		__acquires(&log->l_icloglock)
2810{
2811	struct xlog_in_core	*first_iclog = log->l_iclog;
2812	struct xlog_in_core	*iclog = first_iclog;
2813	bool			ran_callback = false;
2814
2815	do {
2816		LIST_HEAD(cb_list);
2817
2818		if (xlog_state_iodone_process_iclog(log, iclog))
 
 
 
 
 
 
 
 
 
 
 
2819			break;
2820		if (iclog->ic_state != XLOG_STATE_CALLBACK) {
2821			iclog = iclog->ic_next;
2822			continue;
2823		}
2824		list_splice_init(&iclog->ic_callbacks, &cb_list);
2825		spin_unlock(&log->l_icloglock);
2826
2827		trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
2828		xlog_cil_process_committed(&cb_list);
2829		trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
2830		ran_callback = true;
2831
2832		spin_lock(&log->l_icloglock);
2833		xlog_state_clean_iclog(log, iclog);
2834		iclog = iclog->ic_next;
2835	} while (iclog != first_iclog);
2836
2837	return ran_callback;
2838}
 
 
 
2839
2840
2841/*
2842 * Loop running iclog completion callbacks until there are no more iclogs in a
2843 * state that can run callbacks.
2844 */
2845STATIC void
2846xlog_state_do_callback(
2847	struct xlog		*log)
 
 
2848{
 
 
 
 
 
2849	int			flushcnt = 0;
2850	int			repeats = 0;
2851
2852	spin_lock(&log->l_icloglock);
2853	while (xlog_state_do_iclog_callbacks(log)) {
2854		if (xlog_is_shutdown(log))
2855			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2856
2857		if (++repeats > 5000) {
 
 
2858			flushcnt += repeats;
2859			repeats = 0;
2860			xfs_warn(log->l_mp,
2861				"%s: possible infinite loop (%d iterations)",
2862				__func__, flushcnt);
2863		}
2864	}
2865
2866	if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
 
 
 
2867		wake_up_all(&log->l_flush_wait);
2868
2869	spin_unlock(&log->l_icloglock);
2870}
2871
2872
2873/*
2874 * Finish transitioning this iclog to the dirty state.
2875 *
 
 
 
 
 
 
 
2876 * Callbacks could take time, so they are done outside the scope of the
2877 * global state machine log lock.
2878 */
2879STATIC void
2880xlog_state_done_syncing(
2881	struct xlog_in_core	*iclog)
 
2882{
2883	struct xlog		*log = iclog->ic_log;
2884
2885	spin_lock(&log->l_icloglock);
 
 
 
2886	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2887	trace_xlog_iclog_sync_done(iclog, _RET_IP_);
2888
2889	/*
2890	 * If we got an error, either on the first buffer, or in the case of
2891	 * split log writes, on the second, we shut down the file system and
2892	 * no iclogs should ever be attempted to be written to disk again.
 
2893	 */
2894	if (!xlog_is_shutdown(log)) {
2895		ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2896		iclog->ic_state = XLOG_STATE_DONE_SYNC;
2897	}
2898
2899	/*
2900	 * Someone could be sleeping prior to writing out the next
2901	 * iclog buffer, we wake them all, one will get to do the
2902	 * I/O, the others get to wait for the result.
2903	 */
2904	wake_up_all(&iclog->ic_write_wait);
2905	spin_unlock(&log->l_icloglock);
2906	xlog_state_do_callback(log);
2907}
 
2908
2909/*
2910 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2911 * sleep.  We wait on the flush queue on the head iclog as that should be
2912 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2913 * we will wait here and all new writes will sleep until a sync completes.
2914 *
2915 * The in-core logs are used in a circular fashion. They are not used
2916 * out-of-order even when an iclog past the head is free.
2917 *
2918 * return:
2919 *	* log_offset where xlog_write() can start writing into the in-core
2920 *		log's data space.
2921 *	* in-core log pointer to which xlog_write() should write.
2922 *	* boolean indicating this is a continued write to an in-core log.
2923 *		If this is the last write, then the in-core log's offset field
2924 *		needs to be incremented, depending on the amount of data which
2925 *		is copied.
2926 */
2927STATIC int
2928xlog_state_get_iclog_space(
2929	struct xlog		*log,
2930	int			len,
2931	struct xlog_in_core	**iclogp,
2932	struct xlog_ticket	*ticket,
 
2933	int			*logoffsetp)
2934{
2935	int		  log_offset;
2936	xlog_rec_header_t *head;
2937	xlog_in_core_t	  *iclog;
 
2938
2939restart:
2940	spin_lock(&log->l_icloglock);
2941	if (xlog_is_shutdown(log)) {
2942		spin_unlock(&log->l_icloglock);
2943		return -EIO;
2944	}
2945
2946	iclog = log->l_iclog;
2947	if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2948		XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2949
2950		/* Wait for log writes to have flushed */
2951		xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2952		goto restart;
2953	}
2954
2955	head = &iclog->ic_header;
2956
2957	atomic_inc(&iclog->ic_refcnt);	/* prevents sync */
2958	log_offset = iclog->ic_offset;
2959
2960	trace_xlog_iclog_get_space(iclog, _RET_IP_);
2961
2962	/* On the 1st write to an iclog, figure out lsn.  This works
2963	 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2964	 * committing to.  If the offset is set, that's how many blocks
2965	 * must be written.
2966	 */
2967	if (log_offset == 0) {
2968		ticket->t_curr_res -= log->l_iclog_hsize;
 
 
 
2969		head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2970		head->h_lsn = cpu_to_be64(
2971			xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2972		ASSERT(log->l_curr_block >= 0);
2973	}
2974
2975	/* If there is enough room to write everything, then do it.  Otherwise,
2976	 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2977	 * bit is on, so this will get flushed out.  Don't update ic_offset
2978	 * until you know exactly how many bytes get copied.  Therefore, wait
2979	 * until later to update ic_offset.
2980	 *
2981	 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2982	 * can fit into remaining data section.
2983	 */
2984	if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2985		int		error = 0;
2986
2987		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2988
2989		/*
2990		 * If we are the only one writing to this iclog, sync it to
2991		 * disk.  We need to do an atomic compare and decrement here to
2992		 * avoid racing with concurrent atomic_dec_and_lock() calls in
2993		 * xlog_state_release_iclog() when there is more than one
2994		 * reference to the iclog.
2995		 */
2996		if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2997			error = xlog_state_release_iclog(log, iclog, ticket);
2998		spin_unlock(&log->l_icloglock);
2999		if (error)
3000			return error;
 
 
 
 
3001		goto restart;
3002	}
3003
3004	/* Do we have enough room to write the full amount in the remainder
3005	 * of this iclog?  Or must we continue a write on the next iclog and
3006	 * mark this iclog as completely taken?  In the case where we switch
3007	 * iclogs (to mark it taken), this particular iclog will release/sync
3008	 * to disk in xlog_write().
3009	 */
3010	if (len <= iclog->ic_size - iclog->ic_offset)
 
3011		iclog->ic_offset += len;
3012	else
 
3013		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
 
3014	*iclogp = iclog;
3015
3016	ASSERT(iclog->ic_offset <= iclog->ic_size);
3017	spin_unlock(&log->l_icloglock);
3018
3019	*logoffsetp = log_offset;
3020	return 0;
3021}
3022
3023/*
3024 * The first cnt-1 times a ticket goes through here we don't need to move the
3025 * grant write head because the permanent reservation has reserved cnt times the
3026 * unit amount.  Release part of current permanent unit reservation and reset
3027 * current reservation to be one units worth.  Also move grant reservation head
3028 * forward.
3029 */
3030void
3031xfs_log_ticket_regrant(
3032	struct xlog		*log,
3033	struct xlog_ticket	*ticket)
3034{
3035	trace_xfs_log_ticket_regrant(log, ticket);
3036
3037	if (ticket->t_cnt > 0)
3038		ticket->t_cnt--;
3039
3040	xlog_grant_sub_space(log, &log->l_reserve_head.grant,
3041					ticket->t_curr_res);
3042	xlog_grant_sub_space(log, &log->l_write_head.grant,
3043					ticket->t_curr_res);
3044	ticket->t_curr_res = ticket->t_unit_res;
 
3045
3046	trace_xfs_log_ticket_regrant_sub(log, ticket);
3047
3048	/* just return if we still have some of the pre-reserved space */
3049	if (!ticket->t_cnt) {
3050		xlog_grant_add_space(log, &log->l_reserve_head.grant,
3051				     ticket->t_unit_res);
3052		trace_xfs_log_ticket_regrant_exit(log, ticket);
3053
3054		ticket->t_curr_res = ticket->t_unit_res;
3055	}
 
 
 
 
 
 
3056
3057	xfs_log_ticket_put(ticket);
3058}
3059
3060/*
3061 * Give back the space left from a reservation.
3062 *
3063 * All the information we need to make a correct determination of space left
3064 * is present.  For non-permanent reservations, things are quite easy.  The
3065 * count should have been decremented to zero.  We only need to deal with the
3066 * space remaining in the current reservation part of the ticket.  If the
3067 * ticket contains a permanent reservation, there may be left over space which
3068 * needs to be released.  A count of N means that N-1 refills of the current
3069 * reservation can be done before we need to ask for more space.  The first
3070 * one goes to fill up the first current reservation.  Once we run out of
3071 * space, the count will stay at zero and the only space remaining will be
3072 * in the current reservation field.
3073 */
3074void
3075xfs_log_ticket_ungrant(
3076	struct xlog		*log,
3077	struct xlog_ticket	*ticket)
3078{
3079	int			bytes;
3080
3081	trace_xfs_log_ticket_ungrant(log, ticket);
3082
3083	if (ticket->t_cnt > 0)
3084		ticket->t_cnt--;
3085
3086	trace_xfs_log_ticket_ungrant_sub(log, ticket);
 
3087
3088	/*
3089	 * If this is a permanent reservation ticket, we may be able to free
3090	 * up more space based on the remaining count.
3091	 */
3092	bytes = ticket->t_curr_res;
3093	if (ticket->t_cnt > 0) {
3094		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
3095		bytes += ticket->t_unit_res*ticket->t_cnt;
3096	}
3097
3098	xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
3099	xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
3100
3101	trace_xfs_log_ticket_ungrant_exit(log, ticket);
3102
3103	xfs_log_space_wake(log->l_mp);
3104	xfs_log_ticket_put(ticket);
3105}
3106
3107/*
3108 * This routine will mark the current iclog in the ring as WANT_SYNC and move
3109 * the current iclog pointer to the next iclog in the ring.
 
 
 
 
 
3110 */
3111void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3112xlog_state_switch_iclogs(
3113	struct xlog		*log,
3114	struct xlog_in_core	*iclog,
3115	int			eventual_size)
3116{
3117	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3118	assert_spin_locked(&log->l_icloglock);
3119	trace_xlog_iclog_switch(iclog, _RET_IP_);
3120
3121	if (!eventual_size)
3122		eventual_size = iclog->ic_offset;
3123	iclog->ic_state = XLOG_STATE_WANT_SYNC;
3124	iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3125	log->l_prev_block = log->l_curr_block;
3126	log->l_prev_cycle = log->l_curr_cycle;
3127
3128	/* roll log?: ic_offset changed later */
3129	log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
3130
3131	/* Round up to next log-sunit */
3132	if (log->l_iclog_roundoff > BBSIZE) {
3133		uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
 
3134		log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
3135	}
3136
3137	if (log->l_curr_block >= log->l_logBBsize) {
3138		/*
3139		 * Rewind the current block before the cycle is bumped to make
3140		 * sure that the combined LSN never transiently moves forward
3141		 * when the log wraps to the next cycle. This is to support the
3142		 * unlocked sample of these fields from xlog_valid_lsn(). Most
3143		 * other cases should acquire l_icloglock.
3144		 */
3145		log->l_curr_block -= log->l_logBBsize;
3146		ASSERT(log->l_curr_block >= 0);
3147		smp_wmb();
3148		log->l_curr_cycle++;
3149		if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
3150			log->l_curr_cycle++;
3151	}
3152	ASSERT(iclog == log->l_iclog);
3153	log->l_iclog = iclog->ic_next;
3154}
3155
3156/*
3157 * Force the iclog to disk and check if the iclog has been completed before
3158 * xlog_force_iclog() returns. This can happen on synchronous (e.g.
3159 * pmem) or fast async storage because we drop the icloglock to issue the IO.
3160 * If completion has already occurred, tell the caller so that it can avoid an
3161 * unnecessary wait on the iclog.
3162 */
3163static int
3164xlog_force_and_check_iclog(
3165	struct xlog_in_core	*iclog,
3166	bool			*completed)
3167{
3168	xfs_lsn_t		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3169	int			error;
3170
3171	*completed = false;
3172	error = xlog_force_iclog(iclog);
3173	if (error)
3174		return error;
3175
3176	/*
3177	 * If the iclog has already been completed and reused the header LSN
3178	 * will have been rewritten by completion
3179	 */
3180	if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
3181		*completed = true;
3182	return 0;
3183}
3184
3185/*
3186 * Write out all data in the in-core log as of this exact moment in time.
3187 *
3188 * Data may be written to the in-core log during this call.  However,
3189 * we don't guarantee this data will be written out.  A change from past
3190 * implementation means this routine will *not* write out zero length LRs.
3191 *
3192 * Basically, we try and perform an intelligent scan of the in-core logs.
3193 * If we determine there is no flushable data, we just return.  There is no
3194 * flushable data if:
3195 *
3196 *	1. the current iclog is active and has no data; the previous iclog
3197 *		is in the active or dirty state.
3198 *	2. the current iclog is drity, and the previous iclog is in the
3199 *		active or dirty state.
3200 *
3201 * We may sleep if:
3202 *
3203 *	1. the current iclog is not in the active nor dirty state.
3204 *	2. the current iclog dirty, and the previous iclog is not in the
3205 *		active nor dirty state.
3206 *	3. the current iclog is active, and there is another thread writing
3207 *		to this particular iclog.
3208 *	4. a) the current iclog is active and has no other writers
3209 *	   b) when we return from flushing out this iclog, it is still
3210 *		not in the active nor dirty state.
3211 */
3212int
3213xfs_log_force(
3214	struct xfs_mount	*mp,
3215	uint			flags)
3216{
3217	struct xlog		*log = mp->m_log;
3218	struct xlog_in_core	*iclog;
 
3219
3220	XFS_STATS_INC(mp, xs_log_force);
3221	trace_xfs_log_force(mp, 0, _RET_IP_);
3222
3223	xlog_cil_force(log);
3224
3225	spin_lock(&log->l_icloglock);
3226	if (xlog_is_shutdown(log))
3227		goto out_error;
3228
3229	iclog = log->l_iclog;
3230	trace_xlog_iclog_force(iclog, _RET_IP_);
 
3231
3232	if (iclog->ic_state == XLOG_STATE_DIRTY ||
3233	    (iclog->ic_state == XLOG_STATE_ACTIVE &&
3234	     atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
3235		/*
3236		 * If the head is dirty or (active and empty), then we need to
3237		 * look at the previous iclog.
3238		 *
3239		 * If the previous iclog is active or dirty we are done.  There
3240		 * is nothing to sync out. Otherwise, we attach ourselves to the
3241		 * previous iclog and go to sleep.
3242		 */
3243		iclog = iclog->ic_prev;
 
 
 
3244	} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3245		if (atomic_read(&iclog->ic_refcnt) == 0) {
3246			/* We have exclusive access to this iclog. */
3247			bool	completed;
 
 
 
 
 
 
 
 
 
3248
3249			if (xlog_force_and_check_iclog(iclog, &completed))
3250				goto out_error;
3251
3252			if (completed)
 
 
3253				goto out_unlock;
3254		} else {
3255			/*
3256			 * Someone else is still writing to this iclog, so we
3257			 * need to ensure that when they release the iclog it
3258			 * gets synced immediately as we may be waiting on it.
 
 
3259			 */
3260			xlog_state_switch_iclogs(log, iclog, 0);
3261		}
 
 
 
 
 
 
3262	}
3263
3264	/*
3265	 * The iclog we are about to wait on may contain the checkpoint pushed
3266	 * by the above xlog_cil_force() call, but it may not have been pushed
3267	 * to disk yet. Like the ACTIVE case above, we need to make sure caches
3268	 * are flushed when this iclog is written.
3269	 */
3270	if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
3271		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
 
 
3272
3273	if (flags & XFS_LOG_SYNC)
3274		return xlog_wait_on_iclog(iclog);
3275out_unlock:
3276	spin_unlock(&log->l_icloglock);
3277	return 0;
3278out_error:
3279	spin_unlock(&log->l_icloglock);
3280	return -EIO;
3281}
3282
3283/*
3284 * Force the log to a specific LSN.
3285 *
3286 * If an iclog with that lsn can be found:
3287 *	If it is in the DIRTY state, just return.
3288 *	If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3289 *		state and go to sleep or return.
3290 *	If it is in any other state, go to sleep or return.
3291 *
3292 * Synchronous forces are implemented with a wait queue.  All callers trying
3293 * to force a given lsn to disk must wait on the queue attached to the
3294 * specific in-core log.  When given in-core log finally completes its write
3295 * to disk, that thread will wake up all threads waiting on the queue.
3296 */
3297static int
3298xlog_force_lsn(
3299	struct xlog		*log,
3300	xfs_lsn_t		lsn,
3301	uint			flags,
3302	int			*log_flushed,
3303	bool			already_slept)
3304{
 
3305	struct xlog_in_core	*iclog;
3306	bool			completed;
3307
3308	spin_lock(&log->l_icloglock);
3309	if (xlog_is_shutdown(log))
 
3310		goto out_error;
3311
3312	iclog = log->l_iclog;
3313	while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3314		trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
3315		iclog = iclog->ic_next;
3316		if (iclog == log->l_iclog)
3317			goto out_unlock;
3318	}
3319
3320	switch (iclog->ic_state) {
3321	case XLOG_STATE_ACTIVE:
 
 
3322		/*
3323		 * We sleep here if we haven't already slept (e.g. this is the
3324		 * first time we've looked at the correct iclog buf) and the
3325		 * buffer before us is going to be sync'ed.  The reason for this
3326		 * is that if we are doing sync transactions here, by waiting
3327		 * for the previous I/O to complete, we can allow a few more
3328		 * transactions into this iclog before we close it down.
3329		 *
3330		 * Otherwise, we mark the buffer WANT_SYNC, and bump up the
3331		 * refcnt so we can release the log (which drops the ref count).
3332		 * The state switch keeps new transaction commits from using
3333		 * this buffer.  When the current commits finish writing into
3334		 * the buffer, the refcount will drop to zero and the buffer
3335		 * will go out then.
3336		 */
3337		if (!already_slept &&
3338		    (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
3339		     iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
 
 
 
 
3340			xlog_wait(&iclog->ic_prev->ic_write_wait,
3341					&log->l_icloglock);
3342			return -EAGAIN;
3343		}
3344		if (xlog_force_and_check_iclog(iclog, &completed))
3345			goto out_error;
 
 
 
3346		if (log_flushed)
3347			*log_flushed = 1;
3348		if (completed)
3349			goto out_unlock;
3350		break;
3351	case XLOG_STATE_WANT_SYNC:
3352		/*
3353		 * This iclog may contain the checkpoint pushed by the
3354		 * xlog_cil_force_seq() call, but there are other writers still
3355		 * accessing it so it hasn't been pushed to disk yet. Like the
3356		 * ACTIVE case above, we need to make sure caches are flushed
3357		 * when this iclog is written.
3358		 */
3359		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3360		break;
3361	default:
3362		/*
3363		 * The entire checkpoint was written by the CIL force and is on
3364		 * its way to disk already. It will be stable when it
3365		 * completes, so we don't need to manipulate caches here at all.
3366		 * We just need to wait for completion if necessary.
3367		 */
3368		break;
3369	}
3370
3371	if (flags & XFS_LOG_SYNC)
3372		return xlog_wait_on_iclog(iclog);
 
 
 
 
 
 
 
 
 
 
 
3373out_unlock:
3374	spin_unlock(&log->l_icloglock);
3375	return 0;
3376out_error:
3377	spin_unlock(&log->l_icloglock);
3378	return -EIO;
3379}
3380
3381/*
3382 * Force the log to a specific checkpoint sequence.
3383 *
3384 * First force the CIL so that all the required changes have been flushed to the
3385 * iclogs. If the CIL force completed it will return a commit LSN that indicates
3386 * the iclog that needs to be flushed to stable storage. If the caller needs
3387 * a synchronous log force, we will wait on the iclog with the LSN returned by
3388 * xlog_cil_force_seq() to be completed.
 
 
 
 
 
3389 */
3390int
3391xfs_log_force_seq(
3392	struct xfs_mount	*mp,
3393	xfs_csn_t		seq,
3394	uint			flags,
3395	int			*log_flushed)
3396{
3397	struct xlog		*log = mp->m_log;
3398	xfs_lsn_t		lsn;
3399	int			ret;
3400	ASSERT(seq != 0);
3401
3402	XFS_STATS_INC(mp, xs_log_force);
3403	trace_xfs_log_force(mp, seq, _RET_IP_);
3404
3405	lsn = xlog_cil_force_seq(log, seq);
3406	if (lsn == NULLCOMMITLSN)
3407		return 0;
3408
3409	ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
3410	if (ret == -EAGAIN) {
3411		XFS_STATS_INC(mp, xs_log_force_sleep);
3412		ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
3413	}
3414	return ret;
3415}
3416
3417/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3418 * Free a used ticket when its refcount falls to zero.
3419 */
3420void
3421xfs_log_ticket_put(
3422	xlog_ticket_t	*ticket)
3423{
3424	ASSERT(atomic_read(&ticket->t_ref) > 0);
3425	if (atomic_dec_and_test(&ticket->t_ref))
3426		kmem_cache_free(xfs_log_ticket_cache, ticket);
3427}
3428
3429xlog_ticket_t *
3430xfs_log_ticket_get(
3431	xlog_ticket_t	*ticket)
3432{
3433	ASSERT(atomic_read(&ticket->t_ref) > 0);
3434	atomic_inc(&ticket->t_ref);
3435	return ticket;
3436}
3437
3438/*
3439 * Figure out the total log space unit (in bytes) that would be
3440 * required for a log ticket.
3441 */
3442static int
3443xlog_calc_unit_res(
3444	struct xlog		*log,
3445	int			unit_bytes,
3446	int			*niclogs)
3447{
 
3448	int			iclog_space;
3449	uint			num_headers;
3450
3451	/*
3452	 * Permanent reservations have up to 'cnt'-1 active log operations
3453	 * in the log.  A unit in this case is the amount of space for one
3454	 * of these log operations.  Normal reservations have a cnt of 1
3455	 * and their unit amount is the total amount of space required.
3456	 *
3457	 * The following lines of code account for non-transaction data
3458	 * which occupy space in the on-disk log.
3459	 *
3460	 * Normal form of a transaction is:
3461	 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3462	 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3463	 *
3464	 * We need to account for all the leadup data and trailer data
3465	 * around the transaction data.
3466	 * And then we need to account for the worst case in terms of using
3467	 * more space.
3468	 * The worst case will happen if:
3469	 * - the placement of the transaction happens to be such that the
3470	 *   roundoff is at its maximum
3471	 * - the transaction data is synced before the commit record is synced
3472	 *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3473	 *   Therefore the commit record is in its own Log Record.
3474	 *   This can happen as the commit record is called with its
3475	 *   own region to xlog_write().
3476	 *   This then means that in the worst case, roundoff can happen for
3477	 *   the commit-rec as well.
3478	 *   The commit-rec is smaller than padding in this scenario and so it is
3479	 *   not added separately.
3480	 */
3481
3482	/* for trans header */
3483	unit_bytes += sizeof(xlog_op_header_t);
3484	unit_bytes += sizeof(xfs_trans_header_t);
3485
3486	/* for start-rec */
3487	unit_bytes += sizeof(xlog_op_header_t);
3488
3489	/*
3490	 * for LR headers - the space for data in an iclog is the size minus
3491	 * the space used for the headers. If we use the iclog size, then we
3492	 * undercalculate the number of headers required.
3493	 *
3494	 * Furthermore - the addition of op headers for split-recs might
3495	 * increase the space required enough to require more log and op
3496	 * headers, so take that into account too.
3497	 *
3498	 * IMPORTANT: This reservation makes the assumption that if this
3499	 * transaction is the first in an iclog and hence has the LR headers
3500	 * accounted to it, then the remaining space in the iclog is
3501	 * exclusively for this transaction.  i.e. if the transaction is larger
3502	 * than the iclog, it will be the only thing in that iclog.
3503	 * Fundamentally, this means we must pass the entire log vector to
3504	 * xlog_write to guarantee this.
3505	 */
3506	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3507	num_headers = howmany(unit_bytes, iclog_space);
3508
3509	/* for split-recs - ophdrs added when data split over LRs */
3510	unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3511
3512	/* add extra header reservations if we overrun */
3513	while (!num_headers ||
3514	       howmany(unit_bytes, iclog_space) > num_headers) {
3515		unit_bytes += sizeof(xlog_op_header_t);
3516		num_headers++;
3517	}
3518	unit_bytes += log->l_iclog_hsize * num_headers;
3519
3520	/* for commit-rec LR header - note: padding will subsume the ophdr */
3521	unit_bytes += log->l_iclog_hsize;
3522
3523	/* roundoff padding for transaction data and one for commit record */
3524	unit_bytes += 2 * log->l_iclog_roundoff;
 
 
 
 
 
 
3525
3526	if (niclogs)
3527		*niclogs = num_headers;
3528	return unit_bytes;
3529}
3530
3531int
3532xfs_log_calc_unit_res(
3533	struct xfs_mount	*mp,
3534	int			unit_bytes)
3535{
3536	return xlog_calc_unit_res(mp->m_log, unit_bytes, NULL);
3537}
3538
3539/*
3540 * Allocate and initialise a new log ticket.
3541 */
3542struct xlog_ticket *
3543xlog_ticket_alloc(
3544	struct xlog		*log,
3545	int			unit_bytes,
3546	int			cnt,
3547	bool			permanent)
 
 
3548{
3549	struct xlog_ticket	*tic;
3550	int			unit_res;
3551
3552	tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL);
 
 
3553
3554	unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
3555
3556	atomic_set(&tic->t_ref, 1);
3557	tic->t_task		= current;
3558	INIT_LIST_HEAD(&tic->t_queue);
3559	tic->t_unit_res		= unit_res;
3560	tic->t_curr_res		= unit_res;
3561	tic->t_cnt		= cnt;
3562	tic->t_ocnt		= cnt;
3563	tic->t_tid		= get_random_u32();
 
 
3564	if (permanent)
3565		tic->t_flags |= XLOG_TIC_PERM_RESERV;
3566
 
 
3567	return tic;
3568}
3569
 
 
 
 
 
 
 
3570#if defined(DEBUG)
3571/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3572 * Check to make sure the grant write head didn't just over lap the tail.  If
3573 * the cycles are the same, we can't be overlapping.  Otherwise, make sure that
3574 * the cycles differ by exactly one and check the byte count.
3575 *
3576 * This check is run unlocked, so can give false positives. Rather than assert
3577 * on failures, use a warn-once flag and a panic tag to allow the admin to
3578 * determine if they want to panic the machine when such an error occurs. For
3579 * debug kernels this will have the same effect as using an assert but, unlinke
3580 * an assert, it can be turned off at runtime.
3581 */
3582STATIC void
3583xlog_verify_grant_tail(
3584	struct xlog	*log)
3585{
3586	int		tail_cycle, tail_blocks;
3587	int		cycle, space;
3588
3589	xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3590	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3591	if (tail_cycle != cycle) {
3592		if (cycle - 1 != tail_cycle &&
3593		    !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3594			xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3595				"%s: cycle - 1 != tail_cycle", __func__);
 
3596		}
3597
3598		if (space > BBTOB(tail_blocks) &&
3599		    !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3600			xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3601				"%s: space > BBTOB(tail_blocks)", __func__);
 
3602		}
3603	}
3604}
3605
3606/* check if it will fit */
3607STATIC void
3608xlog_verify_tail_lsn(
3609	struct xlog		*log,
3610	struct xlog_in_core	*iclog)
 
3611{
3612	xfs_lsn_t	tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
3613	int		blocks;
3614
3615    if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3616	blocks =
3617	    log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3618	if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3619		xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3620    } else {
3621	ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3622
3623	if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3624		xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3625
3626	blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3627	if (blocks < BTOBB(iclog->ic_offset) + 1)
3628		xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3629    }
3630}
3631
3632/*
3633 * Perform a number of checks on the iclog before writing to disk.
3634 *
3635 * 1. Make sure the iclogs are still circular
3636 * 2. Make sure we have a good magic number
3637 * 3. Make sure we don't have magic numbers in the data
3638 * 4. Check fields of each log operation header for:
3639 *	A. Valid client identifier
3640 *	B. tid ptr value falls in valid ptr space (user space code)
3641 *	C. Length in log record header is correct according to the
3642 *		individual operation headers within record.
3643 * 5. When a bwrite will occur within 5 blocks of the front of the physical
3644 *	log, check the preceding blocks of the physical log to make sure all
3645 *	the cycle numbers agree with the current cycle number.
3646 */
3647STATIC void
3648xlog_verify_iclog(
3649	struct xlog		*log,
3650	struct xlog_in_core	*iclog,
3651	int			count)
3652{
3653	xlog_op_header_t	*ophead;
3654	xlog_in_core_t		*icptr;
3655	xlog_in_core_2_t	*xhdr;
3656	void			*base_ptr, *ptr, *p;
3657	ptrdiff_t		field_offset;
3658	uint8_t			clientid;
3659	int			len, i, j, k, op_len;
3660	int			idx;
3661
3662	/* check validity of iclog pointers */
3663	spin_lock(&log->l_icloglock);
3664	icptr = log->l_iclog;
3665	for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3666		ASSERT(icptr);
3667
3668	if (icptr != log->l_iclog)
3669		xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3670	spin_unlock(&log->l_icloglock);
3671
3672	/* check log magic numbers */
3673	if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3674		xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3675
3676	base_ptr = ptr = &iclog->ic_header;
3677	p = &iclog->ic_header;
3678	for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
3679		if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3680			xfs_emerg(log->l_mp, "%s: unexpected magic num",
3681				__func__);
3682	}
3683
3684	/* check fields */
3685	len = be32_to_cpu(iclog->ic_header.h_num_logops);
3686	base_ptr = ptr = iclog->ic_datap;
3687	ophead = ptr;
3688	xhdr = iclog->ic_data;
3689	for (i = 0; i < len; i++) {
3690		ophead = ptr;
3691
3692		/* clientid is only 1 byte */
3693		p = &ophead->oh_clientid;
3694		field_offset = p - base_ptr;
3695		if (field_offset & 0x1ff) {
3696			clientid = ophead->oh_clientid;
3697		} else {
3698			idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
3699			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3700				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3701				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3702				clientid = xlog_get_client_id(
3703					xhdr[j].hic_xheader.xh_cycle_data[k]);
3704			} else {
3705				clientid = xlog_get_client_id(
3706					iclog->ic_header.h_cycle_data[idx]);
3707			}
3708		}
3709		if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) {
3710			xfs_warn(log->l_mp,
3711				"%s: op %d invalid clientid %d op "PTR_FMT" offset 0x%lx",
3712				__func__, i, clientid, ophead,
3713				(unsigned long)field_offset);
3714		}
3715
3716		/* check length */
3717		p = &ophead->oh_len;
3718		field_offset = p - base_ptr;
3719		if (field_offset & 0x1ff) {
3720			op_len = be32_to_cpu(ophead->oh_len);
3721		} else {
3722			idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
 
3723			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3724				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3725				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3726				op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3727			} else {
3728				op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3729			}
3730		}
3731		ptr += sizeof(xlog_op_header_t) + op_len;
3732	}
3733}
3734#endif
3735
3736/*
3737 * Perform a forced shutdown on the log.
3738 *
3739 * This can be called from low level log code to trigger a shutdown, or from the
3740 * high level mount shutdown code when the mount shuts down.
3741 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3742 * Our main objectives here are to make sure that:
3743 *	a. if the shutdown was not due to a log IO error, flush the logs to
3744 *	   disk. Anything modified after this is ignored.
3745 *	b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3746 *	   parties to find out. Nothing new gets queued after this is done.
3747 *	c. Tasks sleeping on log reservations, pinned objects and
3748 *	   other resources get woken up.
3749 *	d. The mount is also marked as shut down so that log triggered shutdowns
3750 *	   still behave the same as if they called xfs_forced_shutdown().
3751 *
3752 * Return true if the shutdown cause was a log IO error and we actually shut the
3753 * log down.
3754 */
3755bool
3756xlog_force_shutdown(
3757	struct xlog	*log,
3758	uint32_t	shutdown_flags)
3759{
3760	bool		log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
 
3761
3762	if (!log)
3763		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3764
3765	/*
3766	 * Flush all the completed transactions to disk before marking the log
3767	 * being shut down. We need to do this first as shutting down the log
3768	 * before the force will prevent the log force from flushing the iclogs
3769	 * to disk.
3770	 *
3771	 * When we are in recovery, there are no transactions to flush, and
3772	 * we don't want to touch the log because we don't want to perturb the
3773	 * current head/tail for future recovery attempts. Hence we need to
3774	 * avoid a log force in this case.
3775	 *
3776	 * If we are shutting down due to a log IO error, then we must avoid
3777	 * trying to write the log as that may just result in more IO errors and
3778	 * an endless shutdown/force loop.
3779	 */
3780	if (!log_error && !xlog_in_recovery(log))
3781		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3782
3783	/*
3784	 * Atomically set the shutdown state. If the shutdown state is already
3785	 * set, there someone else is performing the shutdown and so we are done
3786	 * here. This should never happen because we should only ever get called
3787	 * once by the first shutdown caller.
3788	 *
3789	 * Much of the log state machine transitions assume that shutdown state
3790	 * cannot change once they hold the log->l_icloglock. Hence we need to
3791	 * hold that lock here, even though we use the atomic test_and_set_bit()
3792	 * operation to set the shutdown state.
3793	 */
3794	spin_lock(&log->l_icloglock);
3795	if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
3796		spin_unlock(&log->l_icloglock);
3797		return false;
3798	}
3799	spin_unlock(&log->l_icloglock);
3800
3801	/*
3802	 * If this log shutdown also sets the mount shutdown state, issue a
3803	 * shutdown warning message.
3804	 */
3805	if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
3806		xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
3807"Filesystem has been shut down due to log error (0x%x).",
3808				shutdown_flags);
3809		xfs_alert(log->l_mp,
3810"Please unmount the filesystem and rectify the problem(s).");
3811		if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
3812			xfs_stack_trace();
3813	}
3814
3815	/*
3816	 * We don't want anybody waiting for log reservations after this. That
3817	 * means we have to wake up everybody queued up on reserveq as well as
3818	 * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
3819	 * we don't enqueue anything once the SHUTDOWN flag is set, and this
3820	 * action is protected by the grant locks.
3821	 */
3822	xlog_grant_head_wake_all(&log->l_reserve_head);
3823	xlog_grant_head_wake_all(&log->l_write_head);
3824
3825	/*
3826	 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
3827	 * as if the log writes were completed. The abort handling in the log
3828	 * item committed callback functions will do this again under lock to
3829	 * avoid races.
3830	 */
3831	spin_lock(&log->l_cilp->xc_push_lock);
3832	wake_up_all(&log->l_cilp->xc_start_wait);
3833	wake_up_all(&log->l_cilp->xc_commit_wait);
3834	spin_unlock(&log->l_cilp->xc_push_lock);
 
3835
3836	spin_lock(&log->l_icloglock);
3837	xlog_state_shutdown_callbacks(log);
3838	spin_unlock(&log->l_icloglock);
3839
3840	wake_up_var(&log->l_opstate);
3841	return log_error;
 
 
 
 
 
 
 
 
 
3842}
3843
3844STATIC int
3845xlog_iclogs_empty(
3846	struct xlog	*log)
3847{
3848	xlog_in_core_t	*iclog;
3849
3850	iclog = log->l_iclog;
3851	do {
3852		/* endianness does not matter here, zero is zero in
3853		 * any language.
3854		 */
3855		if (iclog->ic_header.h_num_logops)
3856			return 0;
3857		iclog = iclog->ic_next;
3858	} while (iclog != log->l_iclog);
3859	return 1;
3860}
3861
3862/*
3863 * Verify that an LSN stamped into a piece of metadata is valid. This is
3864 * intended for use in read verifiers on v5 superblocks.
3865 */
3866bool
3867xfs_log_check_lsn(
3868	struct xfs_mount	*mp,
3869	xfs_lsn_t		lsn)
3870{
3871	struct xlog		*log = mp->m_log;
3872	bool			valid;
3873
3874	/*
3875	 * norecovery mode skips mount-time log processing and unconditionally
3876	 * resets the in-core LSN. We can't validate in this mode, but
3877	 * modifications are not allowed anyways so just return true.
3878	 */
3879	if (xfs_has_norecovery(mp))
3880		return true;
3881
3882	/*
3883	 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
3884	 * handled by recovery and thus safe to ignore here.
3885	 */
3886	if (lsn == NULLCOMMITLSN)
3887		return true;
3888
3889	valid = xlog_valid_lsn(mp->m_log, lsn);
3890
3891	/* warn the user about what's gone wrong before verifier failure */
3892	if (!valid) {
3893		spin_lock(&log->l_icloglock);
3894		xfs_warn(mp,
3895"Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
3896"Please unmount and run xfs_repair (>= v4.3) to resolve.",
3897			 CYCLE_LSN(lsn), BLOCK_LSN(lsn),
3898			 log->l_curr_cycle, log->l_curr_block);
3899		spin_unlock(&log->l_icloglock);
3900	}
3901
3902	return valid;
3903}
3904
3905/*
3906 * Notify the log that we're about to start using a feature that is protected
3907 * by a log incompat feature flag.  This will prevent log covering from
3908 * clearing those flags.
3909 */
3910void
3911xlog_use_incompat_feat(
3912	struct xlog		*log)
3913{
3914	down_read(&log->l_incompat_users);
3915}
3916
3917/* Notify the log that we've finished using log incompat features. */
3918void
3919xlog_drop_incompat_feat(
3920	struct xlog		*log)
3921{
3922	up_read(&log->l_incompat_users);
3923}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
  13#include "xfs_errortag.h"
  14#include "xfs_error.h"
  15#include "xfs_trans.h"
  16#include "xfs_trans_priv.h"
  17#include "xfs_log.h"
  18#include "xfs_log_priv.h"
  19#include "xfs_trace.h"
  20#include "xfs_sysfs.h"
  21#include "xfs_sb.h"
  22#include "xfs_health.h"
  23
  24kmem_zone_t	*xfs_log_ticket_zone;
  25
  26/* Local miscellaneous function prototypes */
  27STATIC int
  28xlog_commit_record(
  29	struct xlog		*log,
  30	struct xlog_ticket	*ticket,
  31	struct xlog_in_core	**iclog,
  32	xfs_lsn_t		*commitlsnp);
  33
  34STATIC struct xlog *
  35xlog_alloc_log(
  36	struct xfs_mount	*mp,
  37	struct xfs_buftarg	*log_target,
  38	xfs_daddr_t		blk_offset,
  39	int			num_bblks);
  40STATIC int
  41xlog_space_left(
  42	struct xlog		*log,
  43	atomic64_t		*head);
  44STATIC void
  45xlog_dealloc_log(
  46	struct xlog		*log);
  47
  48/* local state machine functions */
  49STATIC void xlog_state_done_syncing(
  50	struct xlog_in_core	*iclog,
  51	bool			aborted);
 
  52STATIC int
  53xlog_state_get_iclog_space(
  54	struct xlog		*log,
  55	int			len,
  56	struct xlog_in_core	**iclog,
  57	struct xlog_ticket	*ticket,
  58	int			*continued_write,
  59	int			*logoffsetp);
  60STATIC int
  61xlog_state_release_iclog(
  62	struct xlog		*log,
  63	struct xlog_in_core	*iclog);
  64STATIC void
  65xlog_state_switch_iclogs(
  66	struct xlog		*log,
  67	struct xlog_in_core	*iclog,
  68	int			eventual_size);
  69STATIC void
  70xlog_state_want_sync(
  71	struct xlog		*log,
  72	struct xlog_in_core	*iclog);
  73
  74STATIC void
  75xlog_grant_push_ail(
  76	struct xlog		*log,
  77	int			need_bytes);
  78STATIC void
  79xlog_regrant_reserve_log_space(
  80	struct xlog		*log,
  81	struct xlog_ticket	*ticket);
  82STATIC void
  83xlog_ungrant_log_space(
  84	struct xlog		*log,
 
  85	struct xlog_ticket	*ticket);
  86
  87#if defined(DEBUG)
  88STATIC void
  89xlog_verify_dest_ptr(
  90	struct xlog		*log,
  91	void			*ptr);
  92STATIC void
  93xlog_verify_grant_tail(
  94	struct xlog *log);
  95STATIC void
  96xlog_verify_iclog(
  97	struct xlog		*log,
  98	struct xlog_in_core	*iclog,
  99	int			count);
 100STATIC void
 101xlog_verify_tail_lsn(
 102	struct xlog		*log,
 103	struct xlog_in_core	*iclog,
 104	xfs_lsn_t		tail_lsn);
 105#else
 106#define xlog_verify_dest_ptr(a,b)
 107#define xlog_verify_grant_tail(a)
 108#define xlog_verify_iclog(a,b,c)
 109#define xlog_verify_tail_lsn(a,b,c)
 110#endif
 111
 112STATIC int
 113xlog_iclogs_empty(
 114	struct xlog		*log);
 115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116static void
 117xlog_grant_sub_space(
 118	struct xlog		*log,
 119	atomic64_t		*head,
 120	int			bytes)
 121{
 122	int64_t	head_val = atomic64_read(head);
 123	int64_t new, old;
 124
 125	do {
 126		int	cycle, space;
 127
 128		xlog_crack_grant_head_val(head_val, &cycle, &space);
 129
 130		space -= bytes;
 131		if (space < 0) {
 132			space += log->l_logsize;
 133			cycle--;
 134		}
 135
 136		old = head_val;
 137		new = xlog_assign_grant_head_val(cycle, space);
 138		head_val = atomic64_cmpxchg(head, old, new);
 139	} while (head_val != old);
 140}
 141
 142static void
 143xlog_grant_add_space(
 144	struct xlog		*log,
 145	atomic64_t		*head,
 146	int			bytes)
 147{
 148	int64_t	head_val = atomic64_read(head);
 149	int64_t new, old;
 150
 151	do {
 152		int		tmp;
 153		int		cycle, space;
 154
 155		xlog_crack_grant_head_val(head_val, &cycle, &space);
 156
 157		tmp = log->l_logsize - space;
 158		if (tmp > bytes)
 159			space += bytes;
 160		else {
 161			space = bytes - tmp;
 162			cycle++;
 163		}
 164
 165		old = head_val;
 166		new = xlog_assign_grant_head_val(cycle, space);
 167		head_val = atomic64_cmpxchg(head, old, new);
 168	} while (head_val != old);
 169}
 170
 171STATIC void
 172xlog_grant_head_init(
 173	struct xlog_grant_head	*head)
 174{
 175	xlog_assign_grant_head(&head->grant, 1, 0);
 176	INIT_LIST_HEAD(&head->waiters);
 177	spin_lock_init(&head->lock);
 178}
 179
 180STATIC void
 181xlog_grant_head_wake_all(
 182	struct xlog_grant_head	*head)
 183{
 184	struct xlog_ticket	*tic;
 185
 186	spin_lock(&head->lock);
 187	list_for_each_entry(tic, &head->waiters, t_queue)
 188		wake_up_process(tic->t_task);
 189	spin_unlock(&head->lock);
 190}
 191
 192static inline int
 193xlog_ticket_reservation(
 194	struct xlog		*log,
 195	struct xlog_grant_head	*head,
 196	struct xlog_ticket	*tic)
 197{
 198	if (head == &log->l_write_head) {
 199		ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
 200		return tic->t_unit_res;
 201	} else {
 202		if (tic->t_flags & XLOG_TIC_PERM_RESERV)
 203			return tic->t_unit_res * tic->t_cnt;
 204		else
 205			return tic->t_unit_res;
 206	}
 
 
 
 
 
 207}
 208
 209STATIC bool
 210xlog_grant_head_wake(
 211	struct xlog		*log,
 212	struct xlog_grant_head	*head,
 213	int			*free_bytes)
 214{
 215	struct xlog_ticket	*tic;
 216	int			need_bytes;
 217	bool			woken_task = false;
 218
 219	list_for_each_entry(tic, &head->waiters, t_queue) {
 220
 221		/*
 222		 * There is a chance that the size of the CIL checkpoints in
 223		 * progress at the last AIL push target calculation resulted in
 224		 * limiting the target to the log head (l_last_sync_lsn) at the
 225		 * time. This may not reflect where the log head is now as the
 226		 * CIL checkpoints may have completed.
 227		 *
 228		 * Hence when we are woken here, it may be that the head of the
 229		 * log that has moved rather than the tail. As the tail didn't
 230		 * move, there still won't be space available for the
 231		 * reservation we require.  However, if the AIL has already
 232		 * pushed to the target defined by the old log head location, we
 233		 * will hang here waiting for something else to update the AIL
 234		 * push target.
 235		 *
 236		 * Therefore, if there isn't space to wake the first waiter on
 237		 * the grant head, we need to push the AIL again to ensure the
 238		 * target reflects both the current log tail and log head
 239		 * position before we wait for the tail to move again.
 240		 */
 241
 242		need_bytes = xlog_ticket_reservation(log, head, tic);
 243		if (*free_bytes < need_bytes) {
 244			if (!woken_task)
 245				xlog_grant_push_ail(log, need_bytes);
 246			return false;
 247		}
 248
 249		*free_bytes -= need_bytes;
 250		trace_xfs_log_grant_wake_up(log, tic);
 251		wake_up_process(tic->t_task);
 252		woken_task = true;
 253	}
 254
 255	return true;
 256}
 257
 258STATIC int
 259xlog_grant_head_wait(
 260	struct xlog		*log,
 261	struct xlog_grant_head	*head,
 262	struct xlog_ticket	*tic,
 263	int			need_bytes) __releases(&head->lock)
 264					    __acquires(&head->lock)
 265{
 266	list_add_tail(&tic->t_queue, &head->waiters);
 267
 268	do {
 269		if (XLOG_FORCED_SHUTDOWN(log))
 270			goto shutdown;
 271		xlog_grant_push_ail(log, need_bytes);
 272
 273		__set_current_state(TASK_UNINTERRUPTIBLE);
 274		spin_unlock(&head->lock);
 275
 276		XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
 277
 278		trace_xfs_log_grant_sleep(log, tic);
 279		schedule();
 280		trace_xfs_log_grant_wake(log, tic);
 281
 282		spin_lock(&head->lock);
 283		if (XLOG_FORCED_SHUTDOWN(log))
 284			goto shutdown;
 285	} while (xlog_space_left(log, &head->grant) < need_bytes);
 286
 287	list_del_init(&tic->t_queue);
 288	return 0;
 289shutdown:
 290	list_del_init(&tic->t_queue);
 291	return -EIO;
 292}
 293
 294/*
 295 * Atomically get the log space required for a log ticket.
 296 *
 297 * Once a ticket gets put onto head->waiters, it will only return after the
 298 * needed reservation is satisfied.
 299 *
 300 * This function is structured so that it has a lock free fast path. This is
 301 * necessary because every new transaction reservation will come through this
 302 * path. Hence any lock will be globally hot if we take it unconditionally on
 303 * every pass.
 304 *
 305 * As tickets are only ever moved on and off head->waiters under head->lock, we
 306 * only need to take that lock if we are going to add the ticket to the queue
 307 * and sleep. We can avoid taking the lock if the ticket was never added to
 308 * head->waiters because the t_queue list head will be empty and we hold the
 309 * only reference to it so it can safely be checked unlocked.
 310 */
 311STATIC int
 312xlog_grant_head_check(
 313	struct xlog		*log,
 314	struct xlog_grant_head	*head,
 315	struct xlog_ticket	*tic,
 316	int			*need_bytes)
 317{
 318	int			free_bytes;
 319	int			error = 0;
 320
 321	ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 322
 323	/*
 324	 * If there are other waiters on the queue then give them a chance at
 325	 * logspace before us.  Wake up the first waiters, if we do not wake
 326	 * up all the waiters then go to sleep waiting for more free space,
 327	 * otherwise try to get some space for this transaction.
 328	 */
 329	*need_bytes = xlog_ticket_reservation(log, head, tic);
 330	free_bytes = xlog_space_left(log, &head->grant);
 331	if (!list_empty_careful(&head->waiters)) {
 332		spin_lock(&head->lock);
 333		if (!xlog_grant_head_wake(log, head, &free_bytes) ||
 334		    free_bytes < *need_bytes) {
 335			error = xlog_grant_head_wait(log, head, tic,
 336						     *need_bytes);
 337		}
 338		spin_unlock(&head->lock);
 339	} else if (free_bytes < *need_bytes) {
 340		spin_lock(&head->lock);
 341		error = xlog_grant_head_wait(log, head, tic, *need_bytes);
 342		spin_unlock(&head->lock);
 343	}
 344
 345	return error;
 346}
 347
 348static void
 349xlog_tic_reset_res(xlog_ticket_t *tic)
 
 350{
 351	tic->t_res_num = 0;
 352	tic->t_res_arr_sum = 0;
 353	tic->t_res_num_ophdrs = 0;
 354}
 355
 356static void
 357xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)
 358{
 359	if (tic->t_res_num == XLOG_TIC_LEN_MAX) {
 360		/* add to overflow and start again */
 361		tic->t_res_o_flow += tic->t_res_arr_sum;
 362		tic->t_res_num = 0;
 363		tic->t_res_arr_sum = 0;
 364	}
 365
 366	tic->t_res_arr[tic->t_res_num].r_len = len;
 367	tic->t_res_arr[tic->t_res_num].r_type = type;
 368	tic->t_res_arr_sum += len;
 369	tic->t_res_num++;
 370}
 371
 372/*
 373 * Replenish the byte reservation required by moving the grant write head.
 374 */
 375int
 376xfs_log_regrant(
 377	struct xfs_mount	*mp,
 378	struct xlog_ticket	*tic)
 379{
 380	struct xlog		*log = mp->m_log;
 381	int			need_bytes;
 382	int			error = 0;
 383
 384	if (XLOG_FORCED_SHUTDOWN(log))
 385		return -EIO;
 386
 387	XFS_STATS_INC(mp, xs_try_logspace);
 388
 389	/*
 390	 * This is a new transaction on the ticket, so we need to change the
 391	 * transaction ID so that the next transaction has a different TID in
 392	 * the log. Just add one to the existing tid so that we can see chains
 393	 * of rolling transactions in the log easily.
 394	 */
 395	tic->t_tid++;
 396
 397	xlog_grant_push_ail(log, tic->t_unit_res);
 398
 399	tic->t_curr_res = tic->t_unit_res;
 400	xlog_tic_reset_res(tic);
 401
 402	if (tic->t_cnt > 0)
 403		return 0;
 404
 405	trace_xfs_log_regrant(log, tic);
 406
 407	error = xlog_grant_head_check(log, &log->l_write_head, tic,
 408				      &need_bytes);
 409	if (error)
 410		goto out_error;
 411
 412	xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
 413	trace_xfs_log_regrant_exit(log, tic);
 414	xlog_verify_grant_tail(log);
 415	return 0;
 416
 417out_error:
 418	/*
 419	 * If we are failing, make sure the ticket doesn't have any current
 420	 * reservations.  We don't want to add this back when the ticket/
 421	 * transaction gets cancelled.
 422	 */
 423	tic->t_curr_res = 0;
 424	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
 425	return error;
 426}
 427
 428/*
 429 * Reserve log space and return a ticket corresponding to the reservation.
 430 *
 431 * Each reservation is going to reserve extra space for a log record header.
 432 * When writes happen to the on-disk log, we don't subtract the length of the
 433 * log record header from any reservation.  By wasting space in each
 434 * reservation, we prevent over allocation problems.
 435 */
 436int
 437xfs_log_reserve(
 438	struct xfs_mount	*mp,
 439	int		 	unit_bytes,
 440	int		 	cnt,
 441	struct xlog_ticket	**ticp,
 442	uint8_t		 	client,
 443	bool			permanent)
 444{
 445	struct xlog		*log = mp->m_log;
 446	struct xlog_ticket	*tic;
 447	int			need_bytes;
 448	int			error = 0;
 449
 450	ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
 451
 452	if (XLOG_FORCED_SHUTDOWN(log))
 453		return -EIO;
 454
 455	XFS_STATS_INC(mp, xs_try_logspace);
 456
 457	ASSERT(*ticp == NULL);
 458	tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 0);
 459	*ticp = tic;
 460
 461	xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
 462					    : tic->t_unit_res);
 463
 464	trace_xfs_log_reserve(log, tic);
 465
 466	error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
 467				      &need_bytes);
 468	if (error)
 469		goto out_error;
 470
 471	xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
 472	xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
 473	trace_xfs_log_reserve_exit(log, tic);
 474	xlog_verify_grant_tail(log);
 475	return 0;
 476
 477out_error:
 478	/*
 479	 * If we are failing, make sure the ticket doesn't have any current
 480	 * reservations.  We don't want to add this back when the ticket/
 481	 * transaction gets cancelled.
 482	 */
 483	tic->t_curr_res = 0;
 484	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
 485	return error;
 486}
 487
 488
 489/*
 490 * NOTES:
 491 *
 492 *	1. currblock field gets updated at startup and after in-core logs
 493 *		marked as with WANT_SYNC.
 
 
 
 
 
 
 
 
 494 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495
 496/*
 497 * This routine is called when a user of a log manager ticket is done with
 498 * the reservation.  If the ticket was ever used, then a commit record for
 499 * the associated transaction is written out as a log operation header with
 500 * no data.  The flag XLOG_TIC_INITED is set when the first write occurs with
 501 * a given ticket.  If the ticket was one with a permanent reservation, then
 502 * a few operations are done differently.  Permanent reservation tickets by
 503 * default don't release the reservation.  They just commit the current
 504 * transaction with the belief that the reservation is still needed.  A flag
 505 * must be passed in before permanent reservations are actually released.
 506 * When these type of tickets are not released, they need to be set into
 507 * the inited state again.  By doing this, a start record will be written
 508 * out when the next write occurs.
 
 
 
 
 
 
 509 */
 510xfs_lsn_t
 511xfs_log_done(
 512	struct xfs_mount	*mp,
 513	struct xlog_ticket	*ticket,
 514	struct xlog_in_core	**iclog,
 515	bool			regrant)
 516{
 517	struct xlog		*log = mp->m_log;
 518	xfs_lsn_t		lsn = 0;
 
 
 519
 520	if (XLOG_FORCED_SHUTDOWN(log) ||
 521	    /*
 522	     * If nothing was ever written, don't write out commit record.
 523	     * If we get an error, just continue and give back the log ticket.
 524	     */
 525	    (((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
 526	     (xlog_commit_record(log, ticket, iclog, &lsn)))) {
 527		lsn = (xfs_lsn_t) -1;
 528		regrant = false;
 
 
 
 529	}
 530
 
 531
 532	if (!regrant) {
 533		trace_xfs_log_done_nonperm(log, ticket);
 534
 535		/*
 536		 * Release ticket if not permanent reservation or a specific
 537		 * request has been made to release a permanent reservation.
 
 538		 */
 539		xlog_ungrant_log_space(log, ticket);
 540	} else {
 541		trace_xfs_log_done_perm(log, ticket);
 542
 543		xlog_regrant_reserve_log_space(log, ticket);
 544		/* If this ticket was a permanent reservation and we aren't
 545		 * trying to release it, reset the inited flags; so next time
 546		 * we write, a start record will be written out.
 547		 */
 548		ticket->t_flags |= XLOG_TIC_INITED;
 549	}
 550
 551	xfs_log_ticket_put(ticket);
 552	return lsn;
 553}
 554
 555int
 556xfs_log_release_iclog(
 557	struct xfs_mount	*mp,
 558	struct xlog_in_core	*iclog)
 559{
 560	if (xlog_state_release_iclog(mp->m_log, iclog)) {
 561		xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
 562		return -EIO;
 563	}
 564
 
 
 
 
 
 
 
 565	return 0;
 566}
 567
 568/*
 569 * Mount a log filesystem
 570 *
 571 * mp		- ubiquitous xfs mount point structure
 572 * log_target	- buftarg of on-disk log device
 573 * blk_offset	- Start block # where block size is 512 bytes (BBSIZE)
 574 * num_bblocks	- Number of BBSIZE blocks in on-disk log
 575 *
 576 * Return error or zero.
 577 */
 578int
 579xfs_log_mount(
 580	xfs_mount_t	*mp,
 581	xfs_buftarg_t	*log_target,
 582	xfs_daddr_t	blk_offset,
 583	int		num_bblks)
 584{
 585	bool		fatal = xfs_sb_version_hascrc(&mp->m_sb);
 
 586	int		error = 0;
 587	int		min_logfsbs;
 588
 589	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
 590		xfs_notice(mp, "Mounting V%d Filesystem",
 591			   XFS_SB_VERSION_NUM(&mp->m_sb));
 
 592	} else {
 593		xfs_notice(mp,
 594"Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.",
 595			   XFS_SB_VERSION_NUM(&mp->m_sb));
 596		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 
 597	}
 598
 599	mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
 600	if (IS_ERR(mp->m_log)) {
 601		error = PTR_ERR(mp->m_log);
 602		goto out;
 603	}
 
 604
 605	/*
 606	 * Validate the given log space and drop a critical message via syslog
 607	 * if the log size is too small that would lead to some unexpected
 608	 * situations in transaction log space reservation stage.
 609	 *
 610	 * Note: we can't just reject the mount if the validation fails.  This
 611	 * would mean that people would have to downgrade their kernel just to
 612	 * remedy the situation as there is no way to grow the log (short of
 613	 * black magic surgery with xfs_db).
 614	 *
 615	 * We can, however, reject mounts for CRC format filesystems, as the
 616	 * mkfs binary being used to make the filesystem should never create a
 617	 * filesystem with a log that is too small.
 618	 */
 619	min_logfsbs = xfs_log_calc_minimum_size(mp);
 620
 621	if (mp->m_sb.sb_logblocks < min_logfsbs) {
 622		xfs_warn(mp,
 623		"Log size %d blocks too small, minimum size is %d blocks",
 624			 mp->m_sb.sb_logblocks, min_logfsbs);
 625		error = -EINVAL;
 626	} else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
 627		xfs_warn(mp,
 628		"Log size %d blocks too large, maximum size is %lld blocks",
 629			 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
 630		error = -EINVAL;
 631	} else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
 632		xfs_warn(mp,
 633		"log size %lld bytes too large, maximum size is %lld bytes",
 634			 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
 635			 XFS_MAX_LOG_BYTES);
 636		error = -EINVAL;
 637	} else if (mp->m_sb.sb_logsunit > 1 &&
 638		   mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) {
 639		xfs_warn(mp,
 640		"log stripe unit %u bytes must be a multiple of block size",
 641			 mp->m_sb.sb_logsunit);
 642		error = -EINVAL;
 643		fatal = true;
 644	}
 645	if (error) {
 646		/*
 647		 * Log check errors are always fatal on v5; or whenever bad
 648		 * metadata leads to a crash.
 649		 */
 650		if (fatal) {
 651			xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
 652			ASSERT(0);
 653			goto out_free_log;
 654		}
 655		xfs_crit(mp, "Log size out of supported range.");
 656		xfs_crit(mp,
 657"Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
 658	}
 659
 660	/*
 661	 * Initialize the AIL now we have a log.
 662	 */
 663	error = xfs_trans_ail_init(mp);
 664	if (error) {
 665		xfs_warn(mp, "AIL initialisation failed: error %d", error);
 666		goto out_free_log;
 667	}
 668	mp->m_log->l_ailp = mp->m_ail;
 669
 670	/*
 671	 * skip log recovery on a norecovery mount.  pretend it all
 672	 * just worked.
 673	 */
 674	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
 675		int	readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
 676
 677		if (readonly)
 678			mp->m_flags &= ~XFS_MOUNT_RDONLY;
 679
 680		error = xlog_recover(mp->m_log);
 681
 682		if (readonly)
 683			mp->m_flags |= XFS_MOUNT_RDONLY;
 684		if (error) {
 685			xfs_warn(mp, "log mount/recovery failed: error %d",
 686				error);
 687			xlog_recover_cancel(mp->m_log);
 688			goto out_destroy_ail;
 689		}
 690	}
 691
 692	error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
 693			       "log");
 694	if (error)
 695		goto out_destroy_ail;
 696
 697	/* Normal transactions can now occur */
 698	mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
 699
 700	/*
 701	 * Now the log has been fully initialised and we know were our
 702	 * space grant counters are, we can initialise the permanent ticket
 703	 * needed for delayed logging to work.
 704	 */
 705	xlog_cil_init_post_recovery(mp->m_log);
 706
 707	return 0;
 708
 709out_destroy_ail:
 710	xfs_trans_ail_destroy(mp);
 711out_free_log:
 712	xlog_dealloc_log(mp->m_log);
 713out:
 714	return error;
 715}
 716
 717/*
 718 * Finish the recovery of the file system.  This is separate from the
 719 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
 720 * in the root and real-time bitmap inodes between calling xfs_log_mount() and
 721 * here.
 722 *
 723 * If we finish recovery successfully, start the background log work. If we are
 724 * not doing recovery, then we have a RO filesystem and we don't need to start
 725 * it.
 726 */
 727int
 728xfs_log_mount_finish(
 729	struct xfs_mount	*mp)
 730{
 731	int	error = 0;
 732	bool	readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
 733	bool	recovered = mp->m_log->l_flags & XLOG_RECOVERY_NEEDED;
 734
 735	if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
 736		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 737		return 0;
 738	} else if (readonly) {
 739		/* Allow unlinked processing to proceed */
 740		mp->m_flags &= ~XFS_MOUNT_RDONLY;
 741	}
 742
 743	/*
 
 
 
 
 
 
 744	 * During the second phase of log recovery, we need iget and
 745	 * iput to behave like they do for an active filesystem.
 746	 * xfs_fs_drop_inode needs to be able to prevent the deletion
 747	 * of inodes before we're done replaying log items on those
 748	 * inodes.  Turn it off immediately after recovery finishes
 749	 * so that we don't leak the quota inodes if subsequent mount
 750	 * activities fail.
 751	 *
 752	 * We let all inodes involved in redo item processing end up on
 753	 * the LRU instead of being evicted immediately so that if we do
 754	 * something to an unlinked inode, the irele won't cause
 755	 * premature truncation and freeing of the inode, which results
 756	 * in log recovery failure.  We have to evict the unreferenced
 757	 * lru inodes after clearing SB_ACTIVE because we don't
 758	 * otherwise clean up the lru if there's a subsequent failure in
 759	 * xfs_mountfs, which leads to us leaking the inodes if nothing
 760	 * else (e.g. quotacheck) references the inodes before the
 761	 * mount failure occurs.
 762	 */
 763	mp->m_super->s_flags |= SB_ACTIVE;
 764	error = xlog_recover_finish(mp->m_log);
 765	if (!error)
 766		xfs_log_work_queue(mp);
 767	mp->m_super->s_flags &= ~SB_ACTIVE;
 768	evict_inodes(mp->m_super);
 769
 770	/*
 771	 * Drain the buffer LRU after log recovery. This is required for v4
 772	 * filesystems to avoid leaving around buffers with NULL verifier ops,
 773	 * but we do it unconditionally to make sure we're always in a clean
 774	 * cache state after mount.
 775	 *
 776	 * Don't push in the error case because the AIL may have pending intents
 777	 * that aren't removed until recovery is cancelled.
 778	 */
 779	if (!error && recovered) {
 780		xfs_log_force(mp, XFS_LOG_SYNC);
 781		xfs_ail_push_all_sync(mp->m_ail);
 
 
 
 
 
 
 782	}
 783	xfs_wait_buftarg(mp->m_ddev_targp);
 784
 
 785	if (readonly)
 786		mp->m_flags |= XFS_MOUNT_RDONLY;
 
 
 
 787
 788	return error;
 789}
 790
 791/*
 792 * The mount has failed. Cancel the recovery if it hasn't completed and destroy
 793 * the log.
 794 */
 795void
 796xfs_log_mount_cancel(
 797	struct xfs_mount	*mp)
 798{
 799	xlog_recover_cancel(mp->m_log);
 800	xfs_log_unmount(mp);
 801}
 802
 803/*
 804 * Final log writes as part of unmount.
 805 *
 806 * Mark the filesystem clean as unmount happens.  Note that during relocation
 807 * this routine needs to be executed as part of source-bag while the
 808 * deallocation must not be done until source-end.
 809 */
 
 
 
 
 
 
 
 
 
 
 810
 811/* Actually write the unmount record to disk. */
 
 
 
 812static void
 813xfs_log_write_unmount_record(
 814	struct xfs_mount	*mp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 815{
 816	/* the data section must be 32 bit size aligned */
 817	struct xfs_unmount_log_format magic = {
 818		.magic = XLOG_UNMOUNT_TYPE,
 
 
 
 
 
 
 
 
 
 819	};
 820	struct xfs_log_iovec reg = {
 821		.i_addr = &magic,
 822		.i_len = sizeof(magic),
 823		.i_type = XLOG_REG_TYPE_UNMOUNT,
 824	};
 825	struct xfs_log_vec vec = {
 826		.lv_niovecs = 1,
 827		.lv_iovecp = &reg,
 828	};
 829	struct xlog		*log = mp->m_log;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 830	struct xlog_in_core	*iclog;
 831	struct xlog_ticket	*tic = NULL;
 832	xfs_lsn_t		lsn;
 833	uint			flags = XLOG_UNMOUNT_TRANS;
 834	int			error;
 835
 836	error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
 837	if (error)
 838		goto out_err;
 839
 840	/*
 841	 * If we think the summary counters are bad, clear the unmount header
 842	 * flag in the unmount record so that the summary counters will be
 843	 * recalculated during log recovery at next mount.  Refer to
 844	 * xlog_check_unmount_rec for more details.
 845	 */
 846	if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
 847			XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
 848		xfs_alert(mp, "%s: will fix summary counters at next mount",
 849				__func__);
 850		flags &= ~XLOG_UNMOUNT_TRANS;
 851	}
 852
 853	/* remove inited flag, and account for space used */
 854	tic->t_flags = 0;
 855	tic->t_curr_res -= sizeof(magic);
 856	error = xlog_write(log, &vec, tic, &lsn, NULL, flags);
 857	/*
 858	 * At this point, we're umounting anyway, so there's no point in
 859	 * transitioning log state to IOERROR. Just continue...
 860	 */
 861out_err:
 862	if (error)
 863		xfs_alert(mp, "%s: unmount record failed", __func__);
 864
 865	spin_lock(&log->l_icloglock);
 866	iclog = log->l_iclog;
 867	atomic_inc(&iclog->ic_refcnt);
 868	xlog_state_want_sync(log, iclog);
 869	spin_unlock(&log->l_icloglock);
 870	error = xlog_state_release_iclog(log, iclog);
 871
 872	spin_lock(&log->l_icloglock);
 873	switch (iclog->ic_state) {
 874	default:
 875		if (!XLOG_FORCED_SHUTDOWN(log)) {
 876			xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
 877			break;
 878		}
 879		/* fall through */
 880	case XLOG_STATE_ACTIVE:
 881	case XLOG_STATE_DIRTY:
 882		spin_unlock(&log->l_icloglock);
 883		break;
 884	}
 885
 886	if (tic) {
 887		trace_xfs_log_umount_write(log, tic);
 888		xlog_ungrant_log_space(log, tic);
 889		xfs_log_ticket_put(tic);
 890	}
 891}
 892
 
 
 
 
 
 
 
 
 
 
 
 
 893/*
 894 * Unmount record used to have a string "Unmount filesystem--" in the
 895 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
 896 * We just write the magic number now since that particular field isn't
 897 * currently architecture converted and "Unmount" is a bit foo.
 898 * As far as I know, there weren't any dependencies on the old behaviour.
 899 */
 900
 901static int
 902xfs_log_unmount_write(xfs_mount_t *mp)
 903{
 904	struct xlog	 *log = mp->m_log;
 905	xlog_in_core_t	 *iclog;
 906#ifdef DEBUG
 907	xlog_in_core_t	 *first_iclog;
 908#endif
 909	int		 error;
 910
 911	/*
 912	 * Don't write out unmount record on norecovery mounts or ro devices.
 913	 * Or, if we are doing a forced umount (typically because of IO errors).
 914	 */
 915	if (mp->m_flags & XFS_MOUNT_NORECOVERY ||
 916	    xfs_readonly_buftarg(log->l_targ)) {
 917		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 918		return 0;
 919	}
 920
 921	error = xfs_log_force(mp, XFS_LOG_SYNC);
 922	ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
 923
 924#ifdef DEBUG
 925	first_iclog = iclog = log->l_iclog;
 926	do {
 927		if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
 928			ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
 929			ASSERT(iclog->ic_offset == 0);
 930		}
 931		iclog = iclog->ic_next;
 932	} while (iclog != first_iclog);
 933#endif
 934	if (! (XLOG_FORCED_SHUTDOWN(log))) {
 935		xfs_log_write_unmount_record(mp);
 936	} else {
 937		/*
 938		 * We're already in forced_shutdown mode, couldn't
 939		 * even attempt to write out the unmount transaction.
 940		 *
 941		 * Go through the motions of sync'ing and releasing
 942		 * the iclog, even though no I/O will actually happen,
 943		 * we need to wait for other log I/Os that may already
 944		 * be in progress.  Do this as a separate section of
 945		 * code so we'll know if we ever get stuck here that
 946		 * we're in this odd situation of trying to unmount
 947		 * a file system that went into forced_shutdown as
 948		 * the result of an unmount..
 949		 */
 950		spin_lock(&log->l_icloglock);
 951		iclog = log->l_iclog;
 952		atomic_inc(&iclog->ic_refcnt);
 953
 954		xlog_state_want_sync(log, iclog);
 955		spin_unlock(&log->l_icloglock);
 956		error =  xlog_state_release_iclog(log, iclog);
 957
 958		spin_lock(&log->l_icloglock);
 959
 960		if ( ! (   iclog->ic_state == XLOG_STATE_ACTIVE
 961			|| iclog->ic_state == XLOG_STATE_DIRTY
 962			|| iclog->ic_state == XLOG_STATE_IOERROR) ) {
 963
 964				xlog_wait(&iclog->ic_force_wait,
 965							&log->l_icloglock);
 966		} else {
 967			spin_unlock(&log->l_icloglock);
 968		}
 969	}
 970
 971	return error;
 972}	/* xfs_log_unmount_write */
 
 973
 974/*
 975 * Empty the log for unmount/freeze.
 976 *
 977 * To do this, we first need to shut down the background log work so it is not
 978 * trying to cover the log as we clean up. We then need to unpin all objects in
 979 * the log so we can then flush them out. Once they have completed their IO and
 980 * run the callbacks removing themselves from the AIL, we can write the unmount
 981 * record.
 982 */
 983void
 984xfs_log_quiesce(
 985	struct xfs_mount	*mp)
 986{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 987	cancel_delayed_work_sync(&mp->m_log->l_work);
 988	xfs_log_force(mp, XFS_LOG_SYNC);
 989
 990	/*
 991	 * The superblock buffer is uncached and while xfs_ail_push_all_sync()
 992	 * will push it, xfs_wait_buftarg() will not wait for it. Further,
 993	 * xfs_buf_iowait() cannot be used because it was pushed with the
 994	 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
 995	 * the IO to complete.
 996	 */
 997	xfs_ail_push_all_sync(mp->m_ail);
 998	xfs_wait_buftarg(mp->m_ddev_targp);
 999	xfs_buf_lock(mp->m_sb_bp);
1000	xfs_buf_unlock(mp->m_sb_bp);
1001
 
 
 
 
 
 
 
 
1002	xfs_log_unmount_write(mp);
1003}
1004
1005/*
1006 * Shut down and release the AIL and Log.
1007 *
1008 * During unmount, we need to ensure we flush all the dirty metadata objects
1009 * from the AIL so that the log is empty before we write the unmount record to
1010 * the log. Once this is done, we can tear down the AIL and the log.
1011 */
1012void
1013xfs_log_unmount(
1014	struct xfs_mount	*mp)
1015{
1016	xfs_log_quiesce(mp);
 
 
 
 
 
 
 
 
 
 
1017
1018	xfs_trans_ail_destroy(mp);
1019
1020	xfs_sysfs_del(&mp->m_log->l_kobj);
1021
1022	xlog_dealloc_log(mp->m_log);
1023}
1024
1025void
1026xfs_log_item_init(
1027	struct xfs_mount	*mp,
1028	struct xfs_log_item	*item,
1029	int			type,
1030	const struct xfs_item_ops *ops)
1031{
1032	item->li_mountp = mp;
1033	item->li_ailp = mp->m_ail;
1034	item->li_type = type;
1035	item->li_ops = ops;
1036	item->li_lv = NULL;
1037
1038	INIT_LIST_HEAD(&item->li_ail);
1039	INIT_LIST_HEAD(&item->li_cil);
1040	INIT_LIST_HEAD(&item->li_bio_list);
1041	INIT_LIST_HEAD(&item->li_trans);
1042}
1043
1044/*
1045 * Wake up processes waiting for log space after we have moved the log tail.
1046 */
1047void
1048xfs_log_space_wake(
1049	struct xfs_mount	*mp)
1050{
1051	struct xlog		*log = mp->m_log;
1052	int			free_bytes;
1053
1054	if (XLOG_FORCED_SHUTDOWN(log))
1055		return;
1056
1057	if (!list_empty_careful(&log->l_write_head.waiters)) {
1058		ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
1059
1060		spin_lock(&log->l_write_head.lock);
1061		free_bytes = xlog_space_left(log, &log->l_write_head.grant);
1062		xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1063		spin_unlock(&log->l_write_head.lock);
1064	}
1065
1066	if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1067		ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
1068
1069		spin_lock(&log->l_reserve_head.lock);
1070		free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1071		xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1072		spin_unlock(&log->l_reserve_head.lock);
1073	}
1074}
1075
1076/*
1077 * Determine if we have a transaction that has gone to disk that needs to be
1078 * covered. To begin the transition to the idle state firstly the log needs to
1079 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1080 * we start attempting to cover the log.
1081 *
1082 * Only if we are then in a state where covering is needed, the caller is
1083 * informed that dummy transactions are required to move the log into the idle
1084 * state.
1085 *
1086 * If there are any items in the AIl or CIL, then we do not want to attempt to
1087 * cover the log as we may be in a situation where there isn't log space
1088 * available to run a dummy transaction and this can lead to deadlocks when the
1089 * tail of the log is pinned by an item that is modified in the CIL.  Hence
1090 * there's no point in running a dummy transaction at this point because we
1091 * can't start trying to idle the log until both the CIL and AIL are empty.
1092 */
1093static int
1094xfs_log_need_covered(xfs_mount_t *mp)
 
1095{
1096	struct xlog	*log = mp->m_log;
1097	int		needed = 0;
1098
1099	if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
1100		return 0;
1101
1102	if (!xlog_cil_empty(log))
1103		return 0;
1104
1105	spin_lock(&log->l_icloglock);
1106	switch (log->l_covered_state) {
1107	case XLOG_STATE_COVER_DONE:
1108	case XLOG_STATE_COVER_DONE2:
1109	case XLOG_STATE_COVER_IDLE:
1110		break;
1111	case XLOG_STATE_COVER_NEED:
1112	case XLOG_STATE_COVER_NEED2:
1113		if (xfs_ail_min_lsn(log->l_ailp))
1114			break;
1115		if (!xlog_iclogs_empty(log))
1116			break;
1117
1118		needed = 1;
1119		if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1120			log->l_covered_state = XLOG_STATE_COVER_DONE;
1121		else
1122			log->l_covered_state = XLOG_STATE_COVER_DONE2;
1123		break;
1124	default:
1125		needed = 1;
1126		break;
1127	}
1128	spin_unlock(&log->l_icloglock);
1129	return needed;
1130}
1131
1132/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133 * We may be holding the log iclog lock upon entering this routine.
1134 */
1135xfs_lsn_t
1136xlog_assign_tail_lsn_locked(
1137	struct xfs_mount	*mp)
1138{
1139	struct xlog		*log = mp->m_log;
1140	struct xfs_log_item	*lip;
1141	xfs_lsn_t		tail_lsn;
1142
1143	assert_spin_locked(&mp->m_ail->ail_lock);
1144
1145	/*
1146	 * To make sure we always have a valid LSN for the log tail we keep
1147	 * track of the last LSN which was committed in log->l_last_sync_lsn,
1148	 * and use that when the AIL was empty.
1149	 */
1150	lip = xfs_ail_min(mp->m_ail);
1151	if (lip)
1152		tail_lsn = lip->li_lsn;
1153	else
1154		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
1155	trace_xfs_log_assign_tail_lsn(log, tail_lsn);
1156	atomic64_set(&log->l_tail_lsn, tail_lsn);
1157	return tail_lsn;
1158}
1159
1160xfs_lsn_t
1161xlog_assign_tail_lsn(
1162	struct xfs_mount	*mp)
1163{
1164	xfs_lsn_t		tail_lsn;
1165
1166	spin_lock(&mp->m_ail->ail_lock);
1167	tail_lsn = xlog_assign_tail_lsn_locked(mp);
1168	spin_unlock(&mp->m_ail->ail_lock);
1169
1170	return tail_lsn;
1171}
1172
1173/*
1174 * Return the space in the log between the tail and the head.  The head
1175 * is passed in the cycle/bytes formal parms.  In the special case where
1176 * the reserve head has wrapped passed the tail, this calculation is no
1177 * longer valid.  In this case, just return 0 which means there is no space
1178 * in the log.  This works for all places where this function is called
1179 * with the reserve head.  Of course, if the write head were to ever
1180 * wrap the tail, we should blow up.  Rather than catch this case here,
1181 * we depend on other ASSERTions in other parts of the code.   XXXmiken
1182 *
1183 * This code also handles the case where the reservation head is behind
1184 * the tail.  The details of this case are described below, but the end
1185 * result is that we return the size of the log as the amount of space left.
 
 
 
1186 */
1187STATIC int
1188xlog_space_left(
1189	struct xlog	*log,
1190	atomic64_t	*head)
1191{
1192	int		free_bytes;
1193	int		tail_bytes;
1194	int		tail_cycle;
1195	int		head_cycle;
1196	int		head_bytes;
1197
1198	xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1199	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1200	tail_bytes = BBTOB(tail_bytes);
1201	if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1202		free_bytes = log->l_logsize - (head_bytes - tail_bytes);
1203	else if (tail_cycle + 1 < head_cycle)
1204		return 0;
1205	else if (tail_cycle < head_cycle) {
 
 
 
 
 
1206		ASSERT(tail_cycle == (head_cycle - 1));
1207		free_bytes = tail_bytes - head_bytes;
1208	} else {
1209		/*
1210		 * The reservation head is behind the tail.
1211		 * In this case we just want to return the size of the
1212		 * log as the amount of space left.
1213		 */
1214		xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
1215		xfs_alert(log->l_mp,
1216			  "  tail_cycle = %d, tail_bytes = %d",
1217			  tail_cycle, tail_bytes);
1218		xfs_alert(log->l_mp,
1219			  "  GH   cycle = %d, GH   bytes = %d",
1220			  head_cycle, head_bytes);
1221		ASSERT(0);
1222		free_bytes = log->l_logsize;
1223	}
1224	return free_bytes;
 
 
 
 
 
 
 
 
 
 
 
1225}
1226
1227
1228static void
1229xlog_ioend_work(
1230	struct work_struct	*work)
1231{
1232	struct xlog_in_core     *iclog =
1233		container_of(work, struct xlog_in_core, ic_end_io_work);
1234	struct xlog		*log = iclog->ic_log;
1235	bool			aborted = false;
1236	int			error;
1237
1238	error = blk_status_to_errno(iclog->ic_bio.bi_status);
1239#ifdef DEBUG
1240	/* treat writes with injected CRC errors as failed */
1241	if (iclog->ic_fail_crc)
1242		error = -EIO;
1243#endif
1244
1245	/*
1246	 * Race to shutdown the filesystem if we see an error.
1247	 */
1248	if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1249		xfs_alert(log->l_mp, "log I/O error %d", error);
1250		xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
1251		/*
1252		 * This flag will be propagated to the trans-committed
1253		 * callback routines to let them know that the log-commit
1254		 * didn't succeed.
1255		 */
1256		aborted = true;
1257	} else if (iclog->ic_state & XLOG_STATE_IOERROR) {
1258		aborted = true;
1259	}
1260
1261	xlog_state_done_syncing(iclog, aborted);
1262	bio_uninit(&iclog->ic_bio);
1263
1264	/*
1265	 * Drop the lock to signal that we are done. Nothing references the
1266	 * iclog after this, so an unmount waiting on this lock can now tear it
1267	 * down safely. As such, it is unsafe to reference the iclog after the
1268	 * unlock as we could race with it being freed.
1269	 */
1270	up(&iclog->ic_sema);
1271}
1272
1273/*
1274 * Return size of each in-core log record buffer.
1275 *
1276 * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1277 *
1278 * If the filesystem blocksize is too large, we may need to choose a
1279 * larger size since the directory code currently logs entire blocks.
1280 */
1281STATIC void
1282xlog_get_iclog_buffer_size(
1283	struct xfs_mount	*mp,
1284	struct xlog		*log)
1285{
1286	if (mp->m_logbufs <= 0)
1287		mp->m_logbufs = XLOG_MAX_ICLOGS;
1288	if (mp->m_logbsize <= 0)
1289		mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
1290
1291	log->l_iclog_bufs = mp->m_logbufs;
1292	log->l_iclog_size = mp->m_logbsize;
1293
1294	/*
1295	 * # headers = size / 32k - one header holds cycles from 32k of data.
1296	 */
1297	log->l_iclog_heads =
1298		DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE);
1299	log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
1300}
1301
1302void
1303xfs_log_work_queue(
1304	struct xfs_mount        *mp)
1305{
1306	queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
1307				msecs_to_jiffies(xfs_syncd_centisecs * 10));
1308}
1309
1310/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1311 * Every sync period we need to unpin all items in the AIL and push them to
1312 * disk. If there is nothing dirty, then we might need to cover the log to
1313 * indicate that the filesystem is idle.
1314 */
1315static void
1316xfs_log_worker(
1317	struct work_struct	*work)
1318{
1319	struct xlog		*log = container_of(to_delayed_work(work),
1320						struct xlog, l_work);
1321	struct xfs_mount	*mp = log->l_mp;
1322
1323	/* dgc: errors ignored - not fatal and nowhere to report them */
1324	if (xfs_log_need_covered(mp)) {
1325		/*
1326		 * Dump a transaction into the log that contains no real change.
1327		 * This is needed to stamp the current tail LSN into the log
1328		 * during the covering operation.
1329		 *
1330		 * We cannot use an inode here for this - that will push dirty
1331		 * state back up into the VFS and then periodic inode flushing
1332		 * will prevent log covering from making progress. Hence we
1333		 * synchronously log the superblock instead to ensure the
1334		 * superblock is immediately unpinned and can be written back.
1335		 */
 
1336		xfs_sync_sb(mp, true);
1337	} else
1338		xfs_log_force(mp, 0);
1339
1340	/* start pushing all the metadata that is currently dirty */
1341	xfs_ail_push_all(mp->m_ail);
1342
1343	/* queue us up again */
1344	xfs_log_work_queue(mp);
1345}
1346
1347/*
1348 * This routine initializes some of the log structure for a given mount point.
1349 * Its primary purpose is to fill in enough, so recovery can occur.  However,
1350 * some other stuff may be filled in too.
1351 */
1352STATIC struct xlog *
1353xlog_alloc_log(
1354	struct xfs_mount	*mp,
1355	struct xfs_buftarg	*log_target,
1356	xfs_daddr_t		blk_offset,
1357	int			num_bblks)
1358{
1359	struct xlog		*log;
1360	xlog_rec_header_t	*head;
1361	xlog_in_core_t		**iclogp;
1362	xlog_in_core_t		*iclog, *prev_iclog=NULL;
1363	int			i;
1364	int			error = -ENOMEM;
1365	uint			log2_size = 0;
1366
1367	log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1368	if (!log) {
1369		xfs_warn(mp, "Log allocation failed: No memory!");
1370		goto out;
1371	}
1372
1373	log->l_mp	   = mp;
1374	log->l_targ	   = log_target;
1375	log->l_logsize     = BBTOB(num_bblks);
1376	log->l_logBBstart  = blk_offset;
1377	log->l_logBBsize   = num_bblks;
1378	log->l_covered_state = XLOG_STATE_COVER_IDLE;
1379	log->l_flags	   |= XLOG_ACTIVE_RECOVERY;
1380	INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1381
1382	log->l_prev_block  = -1;
1383	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1384	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1385	xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1386	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
1387
 
 
 
 
 
1388	xlog_grant_head_init(&log->l_reserve_head);
1389	xlog_grant_head_init(&log->l_write_head);
1390
1391	error = -EFSCORRUPTED;
1392	if (xfs_sb_version_hassector(&mp->m_sb)) {
1393	        log2_size = mp->m_sb.sb_logsectlog;
1394		if (log2_size < BBSHIFT) {
1395			xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1396				log2_size, BBSHIFT);
1397			goto out_free_log;
1398		}
1399
1400	        log2_size -= BBSHIFT;
1401		if (log2_size > mp->m_sectbb_log) {
1402			xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1403				log2_size, mp->m_sectbb_log);
1404			goto out_free_log;
1405		}
1406
1407		/* for larger sector sizes, must have v2 or external log */
1408		if (log2_size && log->l_logBBstart > 0 &&
1409			    !xfs_sb_version_haslogv2(&mp->m_sb)) {
1410			xfs_warn(mp,
1411		"log sector size (0x%x) invalid for configuration.",
1412				log2_size);
1413			goto out_free_log;
1414		}
1415	}
1416	log->l_sectBBsize = 1 << log2_size;
1417
 
 
1418	xlog_get_iclog_buffer_size(mp, log);
1419
1420	spin_lock_init(&log->l_icloglock);
1421	init_waitqueue_head(&log->l_flush_wait);
1422
1423	iclogp = &log->l_iclog;
1424	/*
1425	 * The amount of memory to allocate for the iclog structure is
1426	 * rather funky due to the way the structure is defined.  It is
1427	 * done this way so that we can use different sizes for machines
1428	 * with different amounts of memory.  See the definition of
1429	 * xlog_in_core_t in xfs_log_priv.h for details.
1430	 */
1431	ASSERT(log->l_iclog_size >= 4096);
1432	for (i = 0; i < log->l_iclog_bufs; i++) {
1433		int align_mask = xfs_buftarg_dma_alignment(mp->m_logdev_targp);
1434		size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1435				sizeof(struct bio_vec);
1436
1437		iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
1438		if (!iclog)
1439			goto out_free_iclog;
1440
1441		*iclogp = iclog;
1442		iclog->ic_prev = prev_iclog;
1443		prev_iclog = iclog;
1444
1445		iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
1446						KM_MAYFAIL | KM_ZERO);
1447		if (!iclog->ic_data)
1448			goto out_free_iclog;
1449#ifdef DEBUG
1450		log->l_iclog_bak[i] = &iclog->ic_header;
1451#endif
1452		head = &iclog->ic_header;
1453		memset(head, 0, sizeof(xlog_rec_header_t));
1454		head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1455		head->h_version = cpu_to_be32(
1456			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1457		head->h_size = cpu_to_be32(log->l_iclog_size);
1458		/* new fields */
1459		head->h_fmt = cpu_to_be32(XLOG_FMT);
1460		memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1461
1462		iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1463		iclog->ic_state = XLOG_STATE_ACTIVE;
1464		iclog->ic_log = log;
1465		atomic_set(&iclog->ic_refcnt, 0);
1466		spin_lock_init(&iclog->ic_callback_lock);
1467		INIT_LIST_HEAD(&iclog->ic_callbacks);
1468		iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
1469
1470		init_waitqueue_head(&iclog->ic_force_wait);
1471		init_waitqueue_head(&iclog->ic_write_wait);
1472		INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1473		sema_init(&iclog->ic_sema, 1);
1474
1475		iclogp = &iclog->ic_next;
1476	}
1477	*iclogp = log->l_iclog;			/* complete ring */
1478	log->l_iclog->ic_prev = prev_iclog;	/* re-write 1st prev ptr */
1479
1480	log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1481			WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI, 0,
1482			mp->m_fsname);
 
1483	if (!log->l_ioend_workqueue)
1484		goto out_free_iclog;
1485
1486	error = xlog_cil_init(log);
1487	if (error)
1488		goto out_destroy_workqueue;
1489	return log;
1490
1491out_destroy_workqueue:
1492	destroy_workqueue(log->l_ioend_workqueue);
1493out_free_iclog:
1494	for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1495		prev_iclog = iclog->ic_next;
1496		kmem_free(iclog->ic_data);
1497		kmem_free(iclog);
 
 
1498	}
1499out_free_log:
1500	kmem_free(log);
1501out:
1502	return ERR_PTR(error);
1503}	/* xlog_alloc_log */
1504
1505
1506/*
1507 * Write out the commit record of a transaction associated with the given
1508 * ticket.  Return the lsn of the commit record.
 
 
 
1509 */
1510STATIC int
1511xlog_commit_record(
1512	struct xlog		*log,
1513	struct xlog_ticket	*ticket,
1514	struct xlog_in_core	**iclog,
1515	xfs_lsn_t		*commitlsnp)
1516{
1517	struct xfs_mount *mp = log->l_mp;
1518	int	error;
1519	struct xfs_log_iovec reg = {
1520		.i_addr = NULL,
1521		.i_len = 0,
1522		.i_type = XLOG_REG_TYPE_COMMIT,
1523	};
1524	struct xfs_log_vec vec = {
1525		.lv_niovecs = 1,
1526		.lv_iovecp = &reg,
1527	};
1528
1529	ASSERT_ALWAYS(iclog);
1530	error = xlog_write(log, &vec, ticket, commitlsnp, iclog,
1531					XLOG_COMMIT_TRANS);
1532	if (error)
1533		xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
1534	return error;
1535}
1536
1537/*
1538 * Push on the buffer cache code if we ever use more than 75% of the on-disk
1539 * log space.  This code pushes on the lsn which would supposedly free up
1540 * the 25% which we want to leave free.  We may need to adopt a policy which
1541 * pushes on an lsn which is further along in the log once we reach the high
1542 * water mark.  In this manner, we would be creating a low water mark.
1543 */
1544STATIC void
1545xlog_grant_push_ail(
1546	struct xlog	*log,
1547	int		need_bytes)
1548{
1549	xfs_lsn_t	threshold_lsn = 0;
1550	xfs_lsn_t	last_sync_lsn;
1551	int		free_blocks;
1552	int		free_bytes;
1553	int		threshold_block;
1554	int		threshold_cycle;
1555	int		free_threshold;
1556
1557	ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1558
1559	free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1560	free_blocks = BTOBBT(free_bytes);
1561
1562	/*
1563	 * Set the threshold for the minimum number of free blocks in the
1564	 * log to the maximum of what the caller needs, one quarter of the
1565	 * log, and 256 blocks.
1566	 */
1567	free_threshold = BTOBB(need_bytes);
1568	free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
1569	free_threshold = max(free_threshold, 256);
1570	if (free_blocks >= free_threshold)
1571		return;
1572
1573	xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1574						&threshold_block);
1575	threshold_block += free_threshold;
1576	if (threshold_block >= log->l_logBBsize) {
1577		threshold_block -= log->l_logBBsize;
1578		threshold_cycle += 1;
1579	}
1580	threshold_lsn = xlog_assign_lsn(threshold_cycle,
1581					threshold_block);
1582	/*
1583	 * Don't pass in an lsn greater than the lsn of the last
1584	 * log record known to be on disk. Use a snapshot of the last sync lsn
1585	 * so that it doesn't change between the compare and the set.
1586	 */
1587	last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1588	if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1589		threshold_lsn = last_sync_lsn;
1590
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1591	/*
1592	 * Get the transaction layer to kick the dirty buffers out to
1593	 * disk asynchronously. No point in trying to do this if
1594	 * the filesystem is shutting down.
1595	 */
1596	if (!XLOG_FORCED_SHUTDOWN(log))
1597		xfs_ail_push(log->l_ailp, threshold_lsn);
1598}
1599
1600/*
1601 * Stamp cycle number in every block
1602 */
1603STATIC void
1604xlog_pack_data(
1605	struct xlog		*log,
1606	struct xlog_in_core	*iclog,
1607	int			roundoff)
1608{
1609	int			i, j, k;
1610	int			size = iclog->ic_offset + roundoff;
1611	__be32			cycle_lsn;
1612	char			*dp;
1613
1614	cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1615
1616	dp = iclog->ic_datap;
1617	for (i = 0; i < BTOBB(size); i++) {
1618		if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
1619			break;
1620		iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1621		*(__be32 *)dp = cycle_lsn;
1622		dp += BBSIZE;
1623	}
1624
1625	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1626		xlog_in_core_2_t *xhdr = iclog->ic_data;
1627
1628		for ( ; i < BTOBB(size); i++) {
1629			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1630			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1631			xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
1632			*(__be32 *)dp = cycle_lsn;
1633			dp += BBSIZE;
1634		}
1635
1636		for (i = 1; i < log->l_iclog_heads; i++)
1637			xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
1638	}
1639}
1640
1641/*
1642 * Calculate the checksum for a log buffer.
1643 *
1644 * This is a little more complicated than it should be because the various
1645 * headers and the actual data are non-contiguous.
1646 */
1647__le32
1648xlog_cksum(
1649	struct xlog		*log,
1650	struct xlog_rec_header	*rhead,
1651	char			*dp,
1652	int			size)
1653{
1654	uint32_t		crc;
1655
1656	/* first generate the crc for the record header ... */
1657	crc = xfs_start_cksum_update((char *)rhead,
1658			      sizeof(struct xlog_rec_header),
1659			      offsetof(struct xlog_rec_header, h_crc));
1660
1661	/* ... then for additional cycle data for v2 logs ... */
1662	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1663		union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
1664		int		i;
1665		int		xheads;
1666
1667		xheads = size / XLOG_HEADER_CYCLE_SIZE;
1668		if (size % XLOG_HEADER_CYCLE_SIZE)
1669			xheads++;
1670
1671		for (i = 1; i < xheads; i++) {
1672			crc = crc32c(crc, &xhdr[i].hic_xheader,
1673				     sizeof(struct xlog_rec_ext_header));
1674		}
1675	}
1676
1677	/* ... and finally for the payload */
1678	crc = crc32c(crc, dp, size);
1679
1680	return xfs_end_cksum(crc);
1681}
1682
1683static void
1684xlog_bio_end_io(
1685	struct bio		*bio)
1686{
1687	struct xlog_in_core	*iclog = bio->bi_private;
1688
1689	queue_work(iclog->ic_log->l_ioend_workqueue,
1690		   &iclog->ic_end_io_work);
1691}
1692
1693static void
1694xlog_map_iclog_data(
1695	struct bio		*bio,
1696	void			*data,
1697	size_t			count)
1698{
1699	do {
1700		struct page	*page = kmem_to_page(data);
1701		unsigned int	off = offset_in_page(data);
1702		size_t		len = min_t(size_t, count, PAGE_SIZE - off);
1703
1704		WARN_ON_ONCE(bio_add_page(bio, page, len, off) != len);
 
1705
1706		data += len;
1707		count -= len;
1708	} while (count);
 
 
1709}
1710
1711STATIC void
1712xlog_write_iclog(
1713	struct xlog		*log,
1714	struct xlog_in_core	*iclog,
1715	uint64_t		bno,
1716	unsigned int		count,
1717	bool			need_flush)
1718{
1719	ASSERT(bno < log->l_logBBsize);
 
1720
1721	/*
1722	 * We lock the iclogbufs here so that we can serialise against I/O
1723	 * completion during unmount.  We might be processing a shutdown
1724	 * triggered during unmount, and that can occur asynchronously to the
1725	 * unmount thread, and hence we need to ensure that completes before
1726	 * tearing down the iclogbufs.  Hence we need to hold the buffer lock
1727	 * across the log IO to archieve that.
1728	 */
1729	down(&iclog->ic_sema);
1730	if (unlikely(iclog->ic_state & XLOG_STATE_IOERROR)) {
1731		/*
1732		 * It would seem logical to return EIO here, but we rely on
1733		 * the log state machine to propagate I/O errors instead of
1734		 * doing it here.  We kick of the state machine and unlock
1735		 * the buffer manually, the code needs to be kept in sync
1736		 * with the I/O completion path.
1737		 */
1738		xlog_state_done_syncing(iclog, XFS_LI_ABORTED);
1739		up(&iclog->ic_sema);
1740		return;
1741	}
1742
1743	iclog->ic_io_size = count;
1744
1745	bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE));
1746	bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev);
 
 
 
 
 
1747	iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1748	iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1749	iclog->ic_bio.bi_private = iclog;
1750	iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_FUA;
1751	if (need_flush)
1752		iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1753
1754	xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, iclog->ic_io_size);
 
 
 
 
 
1755	if (is_vmalloc_addr(iclog->ic_data))
1756		flush_kernel_vmap_range(iclog->ic_data, iclog->ic_io_size);
1757
1758	/*
1759	 * If this log buffer would straddle the end of the log we will have
1760	 * to split it up into two bios, so that we can continue at the start.
1761	 */
1762	if (bno + BTOBB(count) > log->l_logBBsize) {
1763		struct bio *split;
1764
1765		split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1766				  GFP_NOIO, &fs_bio_set);
1767		bio_chain(split, &iclog->ic_bio);
1768		submit_bio(split);
1769
1770		/* restart at logical offset zero for the remainder */
1771		iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1772	}
1773
1774	submit_bio(&iclog->ic_bio);
1775}
1776
1777/*
1778 * We need to bump cycle number for the part of the iclog that is
1779 * written to the start of the log. Watch out for the header magic
1780 * number case, though.
1781 */
1782static void
1783xlog_split_iclog(
1784	struct xlog		*log,
1785	void			*data,
1786	uint64_t		bno,
1787	unsigned int		count)
1788{
1789	unsigned int		split_offset = BBTOB(log->l_logBBsize - bno);
1790	unsigned int		i;
1791
1792	for (i = split_offset; i < count; i += BBSIZE) {
1793		uint32_t cycle = get_unaligned_be32(data + i);
1794
1795		if (++cycle == XLOG_HEADER_MAGIC_NUM)
1796			cycle++;
1797		put_unaligned_be32(cycle, data + i);
1798	}
1799}
1800
1801static int
1802xlog_calc_iclog_size(
1803	struct xlog		*log,
1804	struct xlog_in_core	*iclog,
1805	uint32_t		*roundoff)
1806{
1807	uint32_t		count_init, count;
1808	bool			use_lsunit;
1809
1810	use_lsunit = xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
1811			log->l_mp->m_sb.sb_logsunit > 1;
1812
1813	/* Add for LR header */
1814	count_init = log->l_iclog_hsize + iclog->ic_offset;
 
1815
1816	/* Round out the log write size */
1817	if (use_lsunit) {
1818		/* we have a v2 stripe unit to use */
1819		count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
1820	} else {
1821		count = BBTOB(BTOBB(count_init));
1822	}
1823
1824	ASSERT(count >= count_init);
1825	*roundoff = count - count_init;
1826
1827	if (use_lsunit)
1828		ASSERT(*roundoff < log->l_mp->m_sb.sb_logsunit);
1829	else
1830		ASSERT(*roundoff < BBTOB(1));
1831	return count;
1832}
1833
1834/*
1835 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 
1836 * fashion.  Previously, we should have moved the current iclog
1837 * ptr in the log to point to the next available iclog.  This allows further
1838 * write to continue while this code syncs out an iclog ready to go.
1839 * Before an in-core log can be written out, the data section must be scanned
1840 * to save away the 1st word of each BBSIZE block into the header.  We replace
1841 * it with the current cycle count.  Each BBSIZE block is tagged with the
1842 * cycle count because there in an implicit assumption that drives will
1843 * guarantee that entire 512 byte blocks get written at once.  In other words,
1844 * we can't have part of a 512 byte block written and part not written.  By
1845 * tagging each block, we will know which blocks are valid when recovering
1846 * after an unclean shutdown.
1847 *
1848 * This routine is single threaded on the iclog.  No other thread can be in
1849 * this routine with the same iclog.  Changing contents of iclog can there-
1850 * fore be done without grabbing the state machine lock.  Updating the global
1851 * log will require grabbing the lock though.
1852 *
1853 * The entire log manager uses a logical block numbering scheme.  Only
1854 * xlog_write_iclog knows about the fact that the log may not start with
1855 * block zero on a given device.
1856 */
1857STATIC void
1858xlog_sync(
1859	struct xlog		*log,
1860	struct xlog_in_core	*iclog)
 
1861{
1862	unsigned int		count;		/* byte count of bwrite */
1863	unsigned int		roundoff;       /* roundoff to BB or stripe */
1864	uint64_t		bno;
1865	unsigned int		size;
1866	bool			need_flush = true, split = false;
1867
1868	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
 
1869
1870	count = xlog_calc_iclog_size(log, iclog, &roundoff);
1871
1872	/* move grant heads by roundoff in sync */
1873	xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
1874	xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
 
 
 
 
 
 
 
 
1875
1876	/* put cycle number in every block */
1877	xlog_pack_data(log, iclog, roundoff); 
1878
1879	/* real byte length */
1880	size = iclog->ic_offset;
1881	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb))
1882		size += roundoff;
1883	iclog->ic_header.h_len = cpu_to_be32(size);
1884
1885	XFS_STATS_INC(log->l_mp, xs_log_writes);
1886	XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
1887
1888	bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
1889
1890	/* Do we need to split this write into 2 parts? */
1891	if (bno + BTOBB(count) > log->l_logBBsize) {
1892		xlog_split_iclog(log, &iclog->ic_header, bno, count);
1893		split = true;
1894	}
1895
1896	/* calculcate the checksum */
1897	iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
1898					    iclog->ic_datap, size);
1899	/*
1900	 * Intentionally corrupt the log record CRC based on the error injection
1901	 * frequency, if defined. This facilitates testing log recovery in the
1902	 * event of torn writes. Hence, set the IOABORT state to abort the log
1903	 * write on I/O completion and shutdown the fs. The subsequent mount
1904	 * detects the bad CRC and attempts to recover.
1905	 */
1906#ifdef DEBUG
1907	if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
1908		iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
1909		iclog->ic_fail_crc = true;
1910		xfs_warn(log->l_mp,
1911	"Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
1912			 be64_to_cpu(iclog->ic_header.h_lsn));
1913	}
1914#endif
1915
1916	/*
1917	 * Flush the data device before flushing the log to make sure all meta
1918	 * data written back from the AIL actually made it to disk before
1919	 * stamping the new log tail LSN into the log buffer.  For an external
1920	 * log we need to issue the flush explicitly, and unfortunately
1921	 * synchronously here; for an internal log we can simply use the block
1922	 * layer state machine for preflushes.
1923	 */
1924	if (log->l_targ != log->l_mp->m_ddev_targp || split) {
1925		xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
1926		need_flush = false;
1927	}
1928
1929	xlog_verify_iclog(log, iclog, count);
1930	xlog_write_iclog(log, iclog, bno, count, need_flush);
1931}
1932
1933/*
1934 * Deallocate a log structure
1935 */
1936STATIC void
1937xlog_dealloc_log(
1938	struct xlog	*log)
1939{
1940	xlog_in_core_t	*iclog, *next_iclog;
1941	int		i;
1942
1943	xlog_cil_destroy(log);
1944
1945	/*
1946	 * Cycle all the iclogbuf locks to make sure all log IO completion
1947	 * is done before we tear down these buffers.
 
1948	 */
1949	iclog = log->l_iclog;
1950	for (i = 0; i < log->l_iclog_bufs; i++) {
1951		down(&iclog->ic_sema);
1952		up(&iclog->ic_sema);
1953		iclog = iclog->ic_next;
1954	}
1955
1956	iclog = log->l_iclog;
1957	for (i = 0; i < log->l_iclog_bufs; i++) {
1958		next_iclog = iclog->ic_next;
1959		kmem_free(iclog->ic_data);
1960		kmem_free(iclog);
1961		iclog = next_iclog;
1962	}
1963
1964	log->l_mp->m_log = NULL;
1965	destroy_workqueue(log->l_ioend_workqueue);
1966	kmem_free(log);
1967}	/* xlog_dealloc_log */
1968
1969/*
1970 * Update counters atomically now that memcpy is done.
1971 */
1972/* ARGSUSED */
1973static inline void
1974xlog_state_finish_copy(
1975	struct xlog		*log,
1976	struct xlog_in_core	*iclog,
1977	int			record_cnt,
1978	int			copy_bytes)
1979{
1980	spin_lock(&log->l_icloglock);
1981
1982	be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1983	iclog->ic_offset += copy_bytes;
1984
1985	spin_unlock(&log->l_icloglock);
1986}	/* xlog_state_finish_copy */
1987
1988
1989
1990
1991/*
1992 * print out info relating to regions written which consume
1993 * the reservation
1994 */
1995void
1996xlog_print_tic_res(
1997	struct xfs_mount	*mp,
1998	struct xlog_ticket	*ticket)
1999{
2000	uint i;
2001	uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
2002
2003	/* match with XLOG_REG_TYPE_* in xfs_log.h */
2004#define REG_TYPE_STR(type, str)	[XLOG_REG_TYPE_##type] = str
2005	static char *res_type_str[] = {
2006	    REG_TYPE_STR(BFORMAT, "bformat"),
2007	    REG_TYPE_STR(BCHUNK, "bchunk"),
2008	    REG_TYPE_STR(EFI_FORMAT, "efi_format"),
2009	    REG_TYPE_STR(EFD_FORMAT, "efd_format"),
2010	    REG_TYPE_STR(IFORMAT, "iformat"),
2011	    REG_TYPE_STR(ICORE, "icore"),
2012	    REG_TYPE_STR(IEXT, "iext"),
2013	    REG_TYPE_STR(IBROOT, "ibroot"),
2014	    REG_TYPE_STR(ILOCAL, "ilocal"),
2015	    REG_TYPE_STR(IATTR_EXT, "iattr_ext"),
2016	    REG_TYPE_STR(IATTR_BROOT, "iattr_broot"),
2017	    REG_TYPE_STR(IATTR_LOCAL, "iattr_local"),
2018	    REG_TYPE_STR(QFORMAT, "qformat"),
2019	    REG_TYPE_STR(DQUOT, "dquot"),
2020	    REG_TYPE_STR(QUOTAOFF, "quotaoff"),
2021	    REG_TYPE_STR(LRHEADER, "LR header"),
2022	    REG_TYPE_STR(UNMOUNT, "unmount"),
2023	    REG_TYPE_STR(COMMIT, "commit"),
2024	    REG_TYPE_STR(TRANSHDR, "trans header"),
2025	    REG_TYPE_STR(ICREATE, "inode create"),
2026	    REG_TYPE_STR(RUI_FORMAT, "rui_format"),
2027	    REG_TYPE_STR(RUD_FORMAT, "rud_format"),
2028	    REG_TYPE_STR(CUI_FORMAT, "cui_format"),
2029	    REG_TYPE_STR(CUD_FORMAT, "cud_format"),
2030	    REG_TYPE_STR(BUI_FORMAT, "bui_format"),
2031	    REG_TYPE_STR(BUD_FORMAT, "bud_format"),
2032	};
2033	BUILD_BUG_ON(ARRAY_SIZE(res_type_str) != XLOG_REG_TYPE_MAX + 1);
2034#undef REG_TYPE_STR
2035
2036	xfs_warn(mp, "ticket reservation summary:");
2037	xfs_warn(mp, "  unit res    = %d bytes",
2038		 ticket->t_unit_res);
2039	xfs_warn(mp, "  current res = %d bytes",
2040		 ticket->t_curr_res);
2041	xfs_warn(mp, "  total reg   = %u bytes (o/flow = %u bytes)",
2042		 ticket->t_res_arr_sum, ticket->t_res_o_flow);
2043	xfs_warn(mp, "  ophdrs      = %u (ophdr space = %u bytes)",
2044		 ticket->t_res_num_ophdrs, ophdr_spc);
2045	xfs_warn(mp, "  ophdr + reg = %u bytes",
2046		 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc);
2047	xfs_warn(mp, "  num regions = %u",
2048		 ticket->t_res_num);
2049
2050	for (i = 0; i < ticket->t_res_num; i++) {
2051		uint r_type = ticket->t_res_arr[i].r_type;
2052		xfs_warn(mp, "region[%u]: %s - %u bytes", i,
2053			    ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
2054			    "bad-rtype" : res_type_str[r_type]),
2055			    ticket->t_res_arr[i].r_len);
2056	}
2057}
2058
2059/*
2060 * Print a summary of the transaction.
2061 */
2062void
2063xlog_print_trans(
2064	struct xfs_trans	*tp)
2065{
2066	struct xfs_mount	*mp = tp->t_mountp;
2067	struct xfs_log_item	*lip;
2068
2069	/* dump core transaction and ticket info */
2070	xfs_warn(mp, "transaction summary:");
2071	xfs_warn(mp, "  log res   = %d", tp->t_log_res);
2072	xfs_warn(mp, "  log count = %d", tp->t_log_count);
2073	xfs_warn(mp, "  flags     = 0x%x", tp->t_flags);
2074
2075	xlog_print_tic_res(mp, tp->t_ticket);
2076
2077	/* dump each log item */
2078	list_for_each_entry(lip, &tp->t_items, li_trans) {
2079		struct xfs_log_vec	*lv = lip->li_lv;
2080		struct xfs_log_iovec	*vec;
2081		int			i;
2082
2083		xfs_warn(mp, "log item: ");
2084		xfs_warn(mp, "  type	= 0x%x", lip->li_type);
2085		xfs_warn(mp, "  flags	= 0x%lx", lip->li_flags);
2086		if (!lv)
2087			continue;
2088		xfs_warn(mp, "  niovecs	= %d", lv->lv_niovecs);
2089		xfs_warn(mp, "  size	= %d", lv->lv_size);
2090		xfs_warn(mp, "  bytes	= %d", lv->lv_bytes);
2091		xfs_warn(mp, "  buf len	= %d", lv->lv_buf_len);
2092
2093		/* dump each iovec for the log item */
2094		vec = lv->lv_iovecp;
2095		for (i = 0; i < lv->lv_niovecs; i++) {
2096			int dumplen = min(vec->i_len, 32);
2097
2098			xfs_warn(mp, "  iovec[%d]", i);
2099			xfs_warn(mp, "    type	= 0x%x", vec->i_type);
2100			xfs_warn(mp, "    len	= %d", vec->i_len);
2101			xfs_warn(mp, "    first %d bytes of iovec[%d]:", dumplen, i);
2102			xfs_hex_dump(vec->i_addr, dumplen);
2103
2104			vec++;
2105		}
2106	}
2107}
2108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2109/*
2110 * Calculate the potential space needed by the log vector.  Each region gets
2111 * its own xlog_op_header_t and may need to be double word aligned.
2112 */
2113static int
2114xlog_write_calc_vec_length(
 
2115	struct xlog_ticket	*ticket,
2116	struct xfs_log_vec	*log_vector)
 
 
 
 
2117{
2118	struct xfs_log_vec	*lv;
2119	int			headers = 0;
2120	int			len = 0;
2121	int			i;
2122
2123	/* acct for start rec of xact */
2124	if (ticket->t_flags & XLOG_TIC_INITED)
2125		headers++;
2126
2127	for (lv = log_vector; lv; lv = lv->lv_next) {
2128		/* we don't write ordered log vectors */
2129		if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED)
2130			continue;
2131
2132		headers += lv->lv_niovecs;
 
 
 
 
 
 
2133
2134		for (i = 0; i < lv->lv_niovecs; i++) {
2135			struct xfs_log_iovec	*vecp = &lv->lv_iovecp[i];
 
 
 
2136
2137			len += vecp->i_len;
2138			xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type);
2139		}
2140	}
 
 
 
 
 
 
 
 
2141
2142	ticket->t_res_num_ophdrs += headers;
2143	len += headers * sizeof(struct xlog_op_header);
 
 
 
 
 
2144
2145	return len;
 
 
 
 
 
 
 
2146}
2147
2148/*
2149 * If first write for transaction, insert start record  We can't be trying to
2150 * commit if we are inited.  We can't have any "partial_copy" if we are inited.
 
 
2151 */
2152static int
2153xlog_write_start_rec(
2154	struct xlog_op_header	*ophdr,
2155	struct xlog_ticket	*ticket)
2156{
2157	if (!(ticket->t_flags & XLOG_TIC_INITED))
2158		return 0;
 
 
 
 
 
 
 
 
2159
2160	ophdr->oh_tid	= cpu_to_be32(ticket->t_tid);
2161	ophdr->oh_clientid = ticket->t_clientid;
2162	ophdr->oh_len = 0;
2163	ophdr->oh_flags = XLOG_START_TRANS;
2164	ophdr->oh_res2 = 0;
2165
2166	ticket->t_flags &= ~XLOG_TIC_INITED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2167
2168	return sizeof(struct xlog_op_header);
2169}
 
 
2170
2171static xlog_op_header_t *
2172xlog_write_setup_ophdr(
2173	struct xlog		*log,
2174	struct xlog_op_header	*ophdr,
2175	struct xlog_ticket	*ticket,
2176	uint			flags)
2177{
2178	ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2179	ophdr->oh_clientid = ticket->t_clientid;
2180	ophdr->oh_res2 = 0;
2181
2182	/* are we copying a commit or unmount record? */
2183	ophdr->oh_flags = flags;
2184
2185	/*
2186	 * We've seen logs corrupted with bad transaction client ids.  This
2187	 * makes sure that XFS doesn't generate them on.  Turn this into an EIO
2188	 * and shut down the filesystem.
2189	 */
2190	switch (ophdr->oh_clientid)  {
2191	case XFS_TRANSACTION:
2192	case XFS_VOLUME:
2193	case XFS_LOG:
2194		break;
2195	default:
2196		xfs_warn(log->l_mp,
2197			"Bad XFS transaction clientid 0x%x in ticket "PTR_FMT,
2198			ophdr->oh_clientid, ticket);
2199		return NULL;
2200	}
2201
2202	return ophdr;
2203}
 
2204
2205/*
2206 * Set up the parameters of the region copy into the log. This has
2207 * to handle region write split across multiple log buffers - this
2208 * state is kept external to this function so that this code can
2209 * be written in an obvious, self documenting manner.
2210 */
2211static int
2212xlog_write_setup_copy(
2213	struct xlog_ticket	*ticket,
2214	struct xlog_op_header	*ophdr,
2215	int			space_available,
2216	int			space_required,
2217	int			*copy_off,
2218	int			*copy_len,
2219	int			*last_was_partial_copy,
2220	int			*bytes_consumed)
2221{
2222	int			still_to_copy;
2223
2224	still_to_copy = space_required - *bytes_consumed;
2225	*copy_off = *bytes_consumed;
2226
2227	if (still_to_copy <= space_available) {
2228		/* write of region completes here */
2229		*copy_len = still_to_copy;
2230		ophdr->oh_len = cpu_to_be32(*copy_len);
2231		if (*last_was_partial_copy)
2232			ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
2233		*last_was_partial_copy = 0;
2234		*bytes_consumed = 0;
2235		return 0;
2236	}
 
 
 
2237
2238	/* partial write of region, needs extra log op header reservation */
2239	*copy_len = space_available;
2240	ophdr->oh_len = cpu_to_be32(*copy_len);
2241	ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2242	if (*last_was_partial_copy)
2243		ophdr->oh_flags |= XLOG_WAS_CONT_TRANS;
2244	*bytes_consumed += *copy_len;
2245	(*last_was_partial_copy)++;
2246
2247	/* account for new log op header */
2248	ticket->t_curr_res -= sizeof(struct xlog_op_header);
2249	ticket->t_res_num_ophdrs++;
2250
2251	return sizeof(struct xlog_op_header);
2252}
 
 
 
 
 
 
 
 
2253
2254static int
2255xlog_write_copy_finish(
2256	struct xlog		*log,
2257	struct xlog_in_core	*iclog,
2258	uint			flags,
2259	int			*record_cnt,
2260	int			*data_cnt,
2261	int			*partial_copy,
2262	int			*partial_copy_len,
2263	int			log_offset,
2264	struct xlog_in_core	**commit_iclog)
2265{
2266	if (*partial_copy) {
2267		/*
2268		 * This iclog has already been marked WANT_SYNC by
2269		 * xlog_state_get_iclog_space.
2270		 */
2271		xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2272		*record_cnt = 0;
2273		*data_cnt = 0;
2274		return xlog_state_release_iclog(log, iclog);
2275	}
2276
2277	*partial_copy = 0;
2278	*partial_copy_len = 0;
2279
2280	if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
2281		/* no more space in this iclog - push it. */
2282		xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2283		*record_cnt = 0;
2284		*data_cnt = 0;
2285
2286		spin_lock(&log->l_icloglock);
2287		xlog_state_want_sync(log, iclog);
2288		spin_unlock(&log->l_icloglock);
2289
2290		if (!commit_iclog)
2291			return xlog_state_release_iclog(log, iclog);
2292		ASSERT(flags & XLOG_COMMIT_TRANS);
2293		*commit_iclog = iclog;
2294	}
2295
 
 
 
 
 
2296	return 0;
2297}
2298
2299/*
2300 * Write some region out to in-core log
2301 *
2302 * This will be called when writing externally provided regions or when
2303 * writing out a commit record for a given transaction.
2304 *
2305 * General algorithm:
2306 *	1. Find total length of this write.  This may include adding to the
2307 *		lengths passed in.
2308 *	2. Check whether we violate the tickets reservation.
2309 *	3. While writing to this iclog
2310 *	    A. Reserve as much space in this iclog as can get
2311 *	    B. If this is first write, save away start lsn
2312 *	    C. While writing this region:
2313 *		1. If first write of transaction, write start record
2314 *		2. Write log operation header (header per region)
2315 *		3. Find out if we can fit entire region into this iclog
2316 *		4. Potentially, verify destination memcpy ptr
2317 *		5. Memcpy (partial) region
2318 *		6. If partial copy, release iclog; otherwise, continue
2319 *			copying more regions into current iclog
2320 *	4. Mark want sync bit (in simulation mode)
2321 *	5. Release iclog for potential flush to on-disk log.
2322 *
2323 * ERRORS:
2324 * 1.	Panic if reservation is overrun.  This should never happen since
2325 *	reservation amounts are generated internal to the filesystem.
2326 * NOTES:
2327 * 1. Tickets are single threaded data structures.
2328 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2329 *	syncing routine.  When a single log_write region needs to span
2330 *	multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2331 *	on all log operation writes which don't contain the end of the
2332 *	region.  The XLOG_END_TRANS bit is used for the in-core log
2333 *	operation which contains the end of the continued log_write region.
2334 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2335 *	we don't really know exactly how much space will be used.  As a result,
2336 *	we don't update ic_offset until the end when we know exactly how many
2337 *	bytes have been written out.
2338 */
2339int
2340xlog_write(
2341	struct xlog		*log,
2342	struct xfs_log_vec	*log_vector,
 
2343	struct xlog_ticket	*ticket,
2344	xfs_lsn_t		*start_lsn,
2345	struct xlog_in_core	**commit_iclog,
2346	uint			flags)
2347{
2348	struct xlog_in_core	*iclog = NULL;
2349	struct xfs_log_iovec	*vecp;
2350	struct xfs_log_vec	*lv;
2351	int			len;
2352	int			index;
2353	int			partial_copy = 0;
2354	int			partial_copy_len = 0;
2355	int			contwr = 0;
2356	int			record_cnt = 0;
2357	int			data_cnt = 0;
2358	int			error;
2359
2360	*start_lsn = 0;
2361
2362	len = xlog_write_calc_vec_length(ticket, log_vector);
2363
2364	/*
2365	 * Region headers and bytes are already accounted for.
2366	 * We only need to take into account start records and
2367	 * split regions in this function.
2368	 */
2369	if (ticket->t_flags & XLOG_TIC_INITED)
2370		ticket->t_curr_res -= sizeof(xlog_op_header_t);
2371
2372	/*
2373	 * Commit record headers need to be accounted for. These
2374	 * come in as separate writes so are easy to detect.
2375	 */
2376	if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
2377		ticket->t_curr_res -= sizeof(xlog_op_header_t);
2378
2379	if (ticket->t_curr_res < 0) {
2380		xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2381		     "ctx ticket reservation ran out. Need to up reservation");
2382		xlog_print_tic_res(log->l_mp, ticket);
2383		xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
2384	}
2385
2386	index = 0;
2387	lv = log_vector;
2388	vecp = lv->lv_iovecp;
2389	while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) {
2390		void		*ptr;
2391		int		log_offset;
2392
2393		error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2394						   &contwr, &log_offset);
2395		if (error)
2396			return error;
2397
2398		ASSERT(log_offset <= iclog->ic_size - 1);
2399		ptr = iclog->ic_datap + log_offset;
 
 
 
 
 
2400
2401		/* start_lsn is the first lsn written to. That's all we need. */
2402		if (!*start_lsn)
2403			*start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2404
2405		/*
2406		 * This loop writes out as many regions as can fit in the amount
2407		 * of space which was allocated by xlog_state_get_iclog_space().
2408		 */
2409		while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) {
2410			struct xfs_log_iovec	*reg;
2411			struct xlog_op_header	*ophdr;
2412			int			start_rec_copy;
2413			int			copy_len;
2414			int			copy_off;
2415			bool			ordered = false;
2416
2417			/* ordered log vectors have no regions to write */
2418			if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) {
2419				ASSERT(lv->lv_niovecs == 0);
2420				ordered = true;
2421				goto next_lv;
2422			}
2423
2424			reg = &vecp[index];
2425			ASSERT(reg->i_len % sizeof(int32_t) == 0);
2426			ASSERT((unsigned long)ptr % sizeof(int32_t) == 0);
2427
2428			start_rec_copy = xlog_write_start_rec(ptr, ticket);
2429			if (start_rec_copy) {
2430				record_cnt++;
2431				xlog_write_adv_cnt(&ptr, &len, &log_offset,
2432						   start_rec_copy);
2433			}
2434
2435			ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
2436			if (!ophdr)
2437				return -EIO;
2438
2439			xlog_write_adv_cnt(&ptr, &len, &log_offset,
2440					   sizeof(struct xlog_op_header));
2441
2442			len += xlog_write_setup_copy(ticket, ophdr,
2443						     iclog->ic_size-log_offset,
2444						     reg->i_len,
2445						     &copy_off, &copy_len,
2446						     &partial_copy,
2447						     &partial_copy_len);
2448			xlog_verify_dest_ptr(log, ptr);
2449
2450			/*
2451			 * Copy region.
2452			 *
2453			 * Unmount records just log an opheader, so can have
2454			 * empty payloads with no data region to copy. Hence we
2455			 * only copy the payload if the vector says it has data
2456			 * to copy.
2457			 */
2458			ASSERT(copy_len >= 0);
2459			if (copy_len > 0) {
2460				memcpy(ptr, reg->i_addr + copy_off, copy_len);
2461				xlog_write_adv_cnt(&ptr, &len, &log_offset,
2462						   copy_len);
2463			}
2464			copy_len += start_rec_copy + sizeof(xlog_op_header_t);
2465			record_cnt++;
2466			data_cnt += contwr ? copy_len : 0;
2467
2468			error = xlog_write_copy_finish(log, iclog, flags,
2469						       &record_cnt, &data_cnt,
2470						       &partial_copy,
2471						       &partial_copy_len,
2472						       log_offset,
2473						       commit_iclog);
2474			if (error)
2475				return error;
2476
2477			/*
2478			 * if we had a partial copy, we need to get more iclog
2479			 * space but we don't want to increment the region
2480			 * index because there is still more is this region to
2481			 * write.
2482			 *
2483			 * If we completed writing this region, and we flushed
2484			 * the iclog (indicated by resetting of the record
2485			 * count), then we also need to get more log space. If
2486			 * this was the last record, though, we are done and
2487			 * can just return.
2488			 */
2489			if (partial_copy)
2490				break;
2491
2492			if (++index == lv->lv_niovecs) {
2493next_lv:
2494				lv = lv->lv_next;
2495				index = 0;
2496				if (lv)
2497					vecp = lv->lv_iovecp;
2498			}
2499			if (record_cnt == 0 && !ordered) {
2500				if (!lv)
2501					return 0;
2502				break;
2503			}
 
 
 
2504		}
2505	}
2506
2507	ASSERT(len == 0);
2508
2509	xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
2510	if (!commit_iclog)
2511		return xlog_state_release_iclog(log, iclog);
 
 
 
 
 
 
 
2512
2513	ASSERT(flags & XLOG_COMMIT_TRANS);
2514	*commit_iclog = iclog;
2515	return 0;
2516}
2517
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2518
2519/*****************************************************************************
2520 *
2521 *		State Machine functions
2522 *
2523 *****************************************************************************
2524 */
 
 
2525
2526/*
2527 * An iclog has just finished IO completion processing, so we need to update
2528 * the iclog state and propagate that up into the overall log state. Hence we
2529 * prepare the iclog for cleaning, and then clean all the pending dirty iclogs
2530 * starting from the head, and then wake up any threads that are waiting for the
2531 * iclog to be marked clean.
2532 *
2533 * The ordering of marking iclogs ACTIVE must be maintained, so an iclog
2534 * doesn't become ACTIVE beyond one that is SYNCING.  This is also required to
2535 * maintain the notion that we use a ordered wait queue to hold off would be
2536 * writers to the log when every iclog is trying to sync to disk.
2537 *
2538 * Caller must hold the icloglock before calling us.
2539 *
2540 * State Change: !IOERROR -> DIRTY -> ACTIVE
2541 */
2542STATIC void
2543xlog_state_clean_iclog(
2544	struct xlog		*log,
2545	struct xlog_in_core	*dirty_iclog)
2546{
2547	struct xlog_in_core	*iclog;
2548	int			changed = 0;
2549
2550	/* Prepare the completed iclog. */
2551	if (!(dirty_iclog->ic_state & XLOG_STATE_IOERROR))
2552		dirty_iclog->ic_state = XLOG_STATE_DIRTY;
2553
2554	/* Walk all the iclogs to update the ordered active state. */
2555	iclog = log->l_iclog;
2556	do {
2557		if (iclog->ic_state == XLOG_STATE_DIRTY) {
2558			iclog->ic_state	= XLOG_STATE_ACTIVE;
2559			iclog->ic_offset       = 0;
2560			ASSERT(list_empty_careful(&iclog->ic_callbacks));
2561			/*
2562			 * If the number of ops in this iclog indicate it just
2563			 * contains the dummy transaction, we can
2564			 * change state into IDLE (the second time around).
2565			 * Otherwise we should change the state into
2566			 * NEED a dummy.
2567			 * We don't need to cover the dummy.
2568			 */
2569			if (!changed &&
2570			   (be32_to_cpu(iclog->ic_header.h_num_logops) ==
2571			   		XLOG_COVER_OPS)) {
2572				changed = 1;
2573			} else {
2574				/*
2575				 * We have two dirty iclogs so start over
2576				 * This could also be num of ops indicates
2577				 * this is not the dummy going out.
2578				 */
2579				changed = 2;
2580			}
2581			iclog->ic_header.h_num_logops = 0;
2582			memset(iclog->ic_header.h_cycle_data, 0,
2583			      sizeof(iclog->ic_header.h_cycle_data));
2584			iclog->ic_header.h_lsn = 0;
2585		} else if (iclog->ic_state == XLOG_STATE_ACTIVE)
2586			/* do nothing */;
2587		else
2588			break;	/* stop cleaning */
2589		iclog = iclog->ic_next;
2590	} while (iclog != log->l_iclog);
2591
2592
 
 
 
 
 
2593	/*
2594	 * Wake up threads waiting in xfs_log_force() for the dirty iclog
2595	 * to be cleaned.
 
 
2596	 */
2597	wake_up_all(&dirty_iclog->ic_force_wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2598
2599	/*
2600	 * Change state for the dummy log recording.
2601	 * We usually go to NEED. But we go to NEED2 if the changed indicates
2602	 * we are done writing the dummy record.
2603	 * If we are done with the second dummy recored (DONE2), then
2604	 * we go to IDLE.
2605	 */
2606	if (changed) {
2607		switch (log->l_covered_state) {
2608		case XLOG_STATE_COVER_IDLE:
2609		case XLOG_STATE_COVER_NEED:
2610		case XLOG_STATE_COVER_NEED2:
2611			log->l_covered_state = XLOG_STATE_COVER_NEED;
2612			break;
2613
2614		case XLOG_STATE_COVER_DONE:
2615			if (changed == 1)
2616				log->l_covered_state = XLOG_STATE_COVER_NEED2;
2617			else
2618				log->l_covered_state = XLOG_STATE_COVER_NEED;
2619			break;
2620
2621		case XLOG_STATE_COVER_DONE2:
2622			if (changed == 1)
2623				log->l_covered_state = XLOG_STATE_COVER_IDLE;
2624			else
2625				log->l_covered_state = XLOG_STATE_COVER_NEED;
2626			break;
2627
2628		default:
2629			ASSERT(0);
2630		}
2631	}
2632}
2633
2634STATIC xfs_lsn_t
2635xlog_get_lowest_lsn(
2636	struct xlog		*log)
2637{
2638	struct xlog_in_core	*iclog = log->l_iclog;
2639	xfs_lsn_t		lowest_lsn = 0, lsn;
2640
2641	do {
2642		if (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))
 
2643			continue;
2644
2645		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2646		if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
2647			lowest_lsn = lsn;
2648	} while ((iclog = iclog->ic_next) != log->l_iclog);
2649
2650	return lowest_lsn;
2651}
2652
2653/*
2654 * Completion of a iclog IO does not imply that a transaction has completed, as
2655 * transactions can be large enough to span many iclogs. We cannot change the
2656 * tail of the log half way through a transaction as this may be the only
2657 * transaction in the log and moving the tail to point to the middle of it
2658 * will prevent recovery from finding the start of the transaction. Hence we
2659 * should only update the last_sync_lsn if this iclog contains transaction
2660 * completion callbacks on it.
2661 *
2662 * We have to do this before we drop the icloglock to ensure we are the only one
2663 * that can update it.
2664 *
2665 * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
2666 * the reservation grant head pushing. This is due to the fact that the push
2667 * target is bound by the current last_sync_lsn value. Hence if we have a large
2668 * amount of log space bound up in this committing transaction then the
2669 * last_sync_lsn value may be the limiting factor preventing tail pushing from
2670 * freeing space in the log. Hence once we've updated the last_sync_lsn we
2671 * should push the AIL to ensure the push target (and hence the grant head) is
2672 * no longer bound by the old log head location and can move forwards and make
2673 * progress again.
2674 */
2675static void
2676xlog_state_set_callback(
2677	struct xlog		*log,
2678	struct xlog_in_core	*iclog,
2679	xfs_lsn_t		header_lsn)
2680{
 
2681	iclog->ic_state = XLOG_STATE_CALLBACK;
2682
2683	ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2684			   header_lsn) <= 0);
2685
2686	if (list_empty_careful(&iclog->ic_callbacks))
2687		return;
2688
2689	atomic64_set(&log->l_last_sync_lsn, header_lsn);
2690	xlog_grant_push_ail(log, 0);
2691}
2692
2693/*
2694 * Return true if we need to stop processing, false to continue to the next
2695 * iclog. The caller will need to run callbacks if the iclog is returned in the
2696 * XLOG_STATE_CALLBACK state.
2697 */
2698static bool
2699xlog_state_iodone_process_iclog(
2700	struct xlog		*log,
2701	struct xlog_in_core	*iclog,
2702	struct xlog_in_core	*completed_iclog,
2703	bool			*ioerror)
2704{
2705	xfs_lsn_t		lowest_lsn;
2706	xfs_lsn_t		header_lsn;
2707
2708	/* Skip all iclogs in the ACTIVE & DIRTY states */
2709	if (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))
 
 
 
 
2710		return false;
2711
2712	/*
2713	 * Between marking a filesystem SHUTDOWN and stopping the log, we do
2714	 * flush all iclogs to disk (if there wasn't a log I/O error). So, we do
2715	 * want things to go smoothly in case of just a SHUTDOWN  w/o a
2716	 * LOG_IO_ERROR.
2717	 */
2718	if (iclog->ic_state & XLOG_STATE_IOERROR) {
2719		*ioerror = true;
 
 
 
2720		return false;
2721	}
2722
2723	/*
2724	 * Can only perform callbacks in order.  Since this iclog is not in the
2725	 * DONE_SYNC/ DO_CALLBACK state, we skip the rest and just try to clean
2726	 * up.  If we set our iclog to DO_CALLBACK, we will not process it when
2727	 * we retry since a previous iclog is in the CALLBACK and the state
2728	 * cannot change since we are holding the l_icloglock.
2729	 */
2730	if (!(iclog->ic_state &
2731			(XLOG_STATE_DONE_SYNC | XLOG_STATE_DO_CALLBACK))) {
2732		if (completed_iclog &&
2733		    (completed_iclog->ic_state == XLOG_STATE_DONE_SYNC)) {
2734			completed_iclog->ic_state = XLOG_STATE_DO_CALLBACK;
2735		}
2736		return true;
2737	}
2738
2739	/*
2740	 * We now have an iclog that is in either the DO_CALLBACK or DONE_SYNC
2741	 * states. The other states (WANT_SYNC, SYNCING, or CALLBACK were caught
2742	 * by the above if and are going to clean (i.e. we aren't doing their
2743	 * callbacks) see the above if.
2744	 *
2745	 * We will do one more check here to see if we have chased our tail
2746	 * around. If this is not the lowest lsn iclog, then we will leave it
2747	 * for another completion to process.
2748	 */
2749	header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2750	lowest_lsn = xlog_get_lowest_lsn(log);
2751	if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
2752		return false;
2753
2754	xlog_state_set_callback(log, iclog, header_lsn);
2755	return false;
2756
2757}
2758
2759/*
2760 * Keep processing entries in the iclog callback list until we come around and
2761 * it is empty.  We need to atomically see that the list is empty and change the
2762 * state to DIRTY so that we don't miss any more callbacks being added.
2763 *
2764 * This function is called with the icloglock held and returns with it held. We
2765 * drop it while running callbacks, however, as holding it over thousands of
2766 * callbacks is unnecessary and causes excessive contention if we do.
2767 */
2768static void
2769xlog_state_do_iclog_callbacks(
2770	struct xlog		*log,
2771	struct xlog_in_core	*iclog,
2772	bool			aborted)
2773{
2774	spin_unlock(&log->l_icloglock);
2775	spin_lock(&iclog->ic_callback_lock);
2776	while (!list_empty(&iclog->ic_callbacks)) {
2777		LIST_HEAD(tmp);
2778
2779		list_splice_init(&iclog->ic_callbacks, &tmp);
2780
2781		spin_unlock(&iclog->ic_callback_lock);
2782		xlog_cil_process_committed(&tmp, aborted);
2783		spin_lock(&iclog->ic_callback_lock);
2784	}
2785
2786	/*
2787	 * Pick up the icloglock while still holding the callback lock so we
2788	 * serialise against anyone trying to add more callbacks to this iclog
2789	 * now we've finished processing.
2790	 */
2791	spin_lock(&log->l_icloglock);
2792	spin_unlock(&iclog->ic_callback_lock);
2793}
2794
2795#ifdef DEBUG
2796/*
2797 * Make one last gasp attempt to see if iclogs are being left in limbo.  If the
2798 * above loop finds an iclog earlier than the current iclog and in one of the
2799 * syncing states, the current iclog is put into DO_CALLBACK and the callbacks
2800 * are deferred to the completion of the earlier iclog. Walk the iclogs in order
2801 * and make sure that no iclog is in DO_CALLBACK unless an earlier iclog is in
2802 * one of the syncing states.
2803 *
2804 * Note that SYNCING|IOERROR is a valid state so we cannot just check for
2805 * ic_state == SYNCING.
2806 */
2807static void
2808xlog_state_callback_check_state(
2809	struct xlog		*log)
 
 
2810{
2811	struct xlog_in_core	*first_iclog = log->l_iclog;
2812	struct xlog_in_core	*iclog = first_iclog;
 
2813
2814	do {
2815		ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
2816		/*
2817		 * Terminate the loop if iclogs are found in states
2818		 * which will cause other threads to clean up iclogs.
2819		 *
2820		 * SYNCING - i/o completion will go through logs
2821		 * DONE_SYNC - interrupt thread should be waiting for
2822		 *              l_icloglock
2823		 * IOERROR - give up hope all ye who enter here
2824		 */
2825		if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
2826		    iclog->ic_state & XLOG_STATE_SYNCING ||
2827		    iclog->ic_state == XLOG_STATE_DONE_SYNC ||
2828		    iclog->ic_state == XLOG_STATE_IOERROR )
2829			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2830		iclog = iclog->ic_next;
2831	} while (first_iclog != iclog);
 
 
2832}
2833#else
2834#define xlog_state_callback_check_state(l)	((void)0)
2835#endif
2836
 
 
 
 
 
2837STATIC void
2838xlog_state_do_callback(
2839	struct xlog		*log,
2840	bool			aborted,
2841	struct xlog_in_core	*ciclog)
2842{
2843	struct xlog_in_core	*iclog;
2844	struct xlog_in_core	*first_iclog;
2845	bool			did_callbacks = false;
2846	bool			cycled_icloglock;
2847	bool			ioerror;
2848	int			flushcnt = 0;
2849	int			repeats = 0;
2850
2851	spin_lock(&log->l_icloglock);
2852	do {
2853		/*
2854		 * Scan all iclogs starting with the one pointed to by the
2855		 * log.  Reset this starting point each time the log is
2856		 * unlocked (during callbacks).
2857		 *
2858		 * Keep looping through iclogs until one full pass is made
2859		 * without running any callbacks.
2860		 */
2861		first_iclog = log->l_iclog;
2862		iclog = log->l_iclog;
2863		cycled_icloglock = false;
2864		ioerror = false;
2865		repeats++;
2866
2867		do {
2868			if (xlog_state_iodone_process_iclog(log, iclog,
2869							ciclog, &ioerror))
2870				break;
2871
2872			if (!(iclog->ic_state &
2873			      (XLOG_STATE_CALLBACK | XLOG_STATE_IOERROR))) {
2874				iclog = iclog->ic_next;
2875				continue;
2876			}
2877
2878			/*
2879			 * Running callbacks will drop the icloglock which means
2880			 * we'll have to run at least one more complete loop.
2881			 */
2882			cycled_icloglock = true;
2883			xlog_state_do_iclog_callbacks(log, iclog, aborted);
2884
2885			xlog_state_clean_iclog(log, iclog);
2886			iclog = iclog->ic_next;
2887		} while (first_iclog != iclog);
2888
2889		did_callbacks |= cycled_icloglock;
2890
2891		if (repeats > 5000) {
2892			flushcnt += repeats;
2893			repeats = 0;
2894			xfs_warn(log->l_mp,
2895				"%s: possible infinite loop (%d iterations)",
2896				__func__, flushcnt);
2897		}
2898	} while (!ioerror && cycled_icloglock);
2899
2900	if (did_callbacks)
2901		xlog_state_callback_check_state(log);
2902
2903	if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
2904		wake_up_all(&log->l_flush_wait);
2905
2906	spin_unlock(&log->l_icloglock);
2907}
2908
2909
2910/*
2911 * Finish transitioning this iclog to the dirty state.
2912 *
2913 * Make sure that we completely execute this routine only when this is
2914 * the last call to the iclog.  There is a good chance that iclog flushes,
2915 * when we reach the end of the physical log, get turned into 2 separate
2916 * calls to bwrite.  Hence, one iclog flush could generate two calls to this
2917 * routine.  By using the reference count bwritecnt, we guarantee that only
2918 * the second completion goes through.
2919 *
2920 * Callbacks could take time, so they are done outside the scope of the
2921 * global state machine log lock.
2922 */
2923STATIC void
2924xlog_state_done_syncing(
2925	struct xlog_in_core	*iclog,
2926	bool			aborted)
2927{
2928	struct xlog		*log = iclog->ic_log;
2929
2930	spin_lock(&log->l_icloglock);
2931
2932	ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
2933	       iclog->ic_state == XLOG_STATE_IOERROR);
2934	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
 
2935
2936	/*
2937	 * If we got an error, either on the first buffer, or in the case of
2938	 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
2939	 * and none should ever be attempted to be written to disk
2940	 * again.
2941	 */
2942	if (iclog->ic_state != XLOG_STATE_IOERROR)
 
2943		iclog->ic_state = XLOG_STATE_DONE_SYNC;
 
2944
2945	/*
2946	 * Someone could be sleeping prior to writing out the next
2947	 * iclog buffer, we wake them all, one will get to do the
2948	 * I/O, the others get to wait for the result.
2949	 */
2950	wake_up_all(&iclog->ic_write_wait);
2951	spin_unlock(&log->l_icloglock);
2952	xlog_state_do_callback(log, aborted, iclog);	/* also cleans log */
2953}	/* xlog_state_done_syncing */
2954
2955
2956/*
2957 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2958 * sleep.  We wait on the flush queue on the head iclog as that should be
2959 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2960 * we will wait here and all new writes will sleep until a sync completes.
2961 *
2962 * The in-core logs are used in a circular fashion. They are not used
2963 * out-of-order even when an iclog past the head is free.
2964 *
2965 * return:
2966 *	* log_offset where xlog_write() can start writing into the in-core
2967 *		log's data space.
2968 *	* in-core log pointer to which xlog_write() should write.
2969 *	* boolean indicating this is a continued write to an in-core log.
2970 *		If this is the last write, then the in-core log's offset field
2971 *		needs to be incremented, depending on the amount of data which
2972 *		is copied.
2973 */
2974STATIC int
2975xlog_state_get_iclog_space(
2976	struct xlog		*log,
2977	int			len,
2978	struct xlog_in_core	**iclogp,
2979	struct xlog_ticket	*ticket,
2980	int			*continued_write,
2981	int			*logoffsetp)
2982{
2983	int		  log_offset;
2984	xlog_rec_header_t *head;
2985	xlog_in_core_t	  *iclog;
2986	int		  error;
2987
2988restart:
2989	spin_lock(&log->l_icloglock);
2990	if (XLOG_FORCED_SHUTDOWN(log)) {
2991		spin_unlock(&log->l_icloglock);
2992		return -EIO;
2993	}
2994
2995	iclog = log->l_iclog;
2996	if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2997		XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2998
2999		/* Wait for log writes to have flushed */
3000		xlog_wait(&log->l_flush_wait, &log->l_icloglock);
3001		goto restart;
3002	}
3003
3004	head = &iclog->ic_header;
3005
3006	atomic_inc(&iclog->ic_refcnt);	/* prevents sync */
3007	log_offset = iclog->ic_offset;
3008
 
 
3009	/* On the 1st write to an iclog, figure out lsn.  This works
3010	 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
3011	 * committing to.  If the offset is set, that's how many blocks
3012	 * must be written.
3013	 */
3014	if (log_offset == 0) {
3015		ticket->t_curr_res -= log->l_iclog_hsize;
3016		xlog_tic_add_region(ticket,
3017				    log->l_iclog_hsize,
3018				    XLOG_REG_TYPE_LRHEADER);
3019		head->h_cycle = cpu_to_be32(log->l_curr_cycle);
3020		head->h_lsn = cpu_to_be64(
3021			xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
3022		ASSERT(log->l_curr_block >= 0);
3023	}
3024
3025	/* If there is enough room to write everything, then do it.  Otherwise,
3026	 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
3027	 * bit is on, so this will get flushed out.  Don't update ic_offset
3028	 * until you know exactly how many bytes get copied.  Therefore, wait
3029	 * until later to update ic_offset.
3030	 *
3031	 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
3032	 * can fit into remaining data section.
3033	 */
3034	if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
 
 
3035		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
3036
3037		/*
3038		 * If I'm the only one writing to this iclog, sync it to disk.
3039		 * We need to do an atomic compare and decrement here to avoid
3040		 * racing with concurrent atomic_dec_and_lock() calls in
3041		 * xlog_state_release_iclog() when there is more than one
3042		 * reference to the iclog.
3043		 */
3044		if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) {
3045			/* we are the only one */
3046			spin_unlock(&log->l_icloglock);
3047			error = xlog_state_release_iclog(log, iclog);
3048			if (error)
3049				return error;
3050		} else {
3051			spin_unlock(&log->l_icloglock);
3052		}
3053		goto restart;
3054	}
3055
3056	/* Do we have enough room to write the full amount in the remainder
3057	 * of this iclog?  Or must we continue a write on the next iclog and
3058	 * mark this iclog as completely taken?  In the case where we switch
3059	 * iclogs (to mark it taken), this particular iclog will release/sync
3060	 * to disk in xlog_write().
3061	 */
3062	if (len <= iclog->ic_size - iclog->ic_offset) {
3063		*continued_write = 0;
3064		iclog->ic_offset += len;
3065	} else {
3066		*continued_write = 1;
3067		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
3068	}
3069	*iclogp = iclog;
3070
3071	ASSERT(iclog->ic_offset <= iclog->ic_size);
3072	spin_unlock(&log->l_icloglock);
3073
3074	*logoffsetp = log_offset;
3075	return 0;
3076}	/* xlog_state_get_iclog_space */
3077
3078/* The first cnt-1 times through here we don't need to
3079 * move the grant write head because the permanent
3080 * reservation has reserved cnt times the unit amount.
3081 * Release part of current permanent unit reservation and
3082 * reset current reservation to be one units worth.  Also
3083 * move grant reservation head forward.
3084 */
3085STATIC void
3086xlog_regrant_reserve_log_space(
3087	struct xlog		*log,
3088	struct xlog_ticket	*ticket)
3089{
3090	trace_xfs_log_regrant_reserve_enter(log, ticket);
3091
3092	if (ticket->t_cnt > 0)
3093		ticket->t_cnt--;
3094
3095	xlog_grant_sub_space(log, &log->l_reserve_head.grant,
3096					ticket->t_curr_res);
3097	xlog_grant_sub_space(log, &log->l_write_head.grant,
3098					ticket->t_curr_res);
3099	ticket->t_curr_res = ticket->t_unit_res;
3100	xlog_tic_reset_res(ticket);
3101
3102	trace_xfs_log_regrant_reserve_sub(log, ticket);
3103
3104	/* just return if we still have some of the pre-reserved space */
3105	if (ticket->t_cnt > 0)
3106		return;
 
 
3107
3108	xlog_grant_add_space(log, &log->l_reserve_head.grant,
3109					ticket->t_unit_res);
3110
3111	trace_xfs_log_regrant_reserve_exit(log, ticket);
3112
3113	ticket->t_curr_res = ticket->t_unit_res;
3114	xlog_tic_reset_res(ticket);
3115}	/* xlog_regrant_reserve_log_space */
3116
 
 
3117
3118/*
3119 * Give back the space left from a reservation.
3120 *
3121 * All the information we need to make a correct determination of space left
3122 * is present.  For non-permanent reservations, things are quite easy.  The
3123 * count should have been decremented to zero.  We only need to deal with the
3124 * space remaining in the current reservation part of the ticket.  If the
3125 * ticket contains a permanent reservation, there may be left over space which
3126 * needs to be released.  A count of N means that N-1 refills of the current
3127 * reservation can be done before we need to ask for more space.  The first
3128 * one goes to fill up the first current reservation.  Once we run out of
3129 * space, the count will stay at zero and the only space remaining will be
3130 * in the current reservation field.
3131 */
3132STATIC void
3133xlog_ungrant_log_space(
3134	struct xlog		*log,
3135	struct xlog_ticket	*ticket)
3136{
3137	int	bytes;
 
 
3138
3139	if (ticket->t_cnt > 0)
3140		ticket->t_cnt--;
3141
3142	trace_xfs_log_ungrant_enter(log, ticket);
3143	trace_xfs_log_ungrant_sub(log, ticket);
3144
3145	/*
3146	 * If this is a permanent reservation ticket, we may be able to free
3147	 * up more space based on the remaining count.
3148	 */
3149	bytes = ticket->t_curr_res;
3150	if (ticket->t_cnt > 0) {
3151		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
3152		bytes += ticket->t_unit_res*ticket->t_cnt;
3153	}
3154
3155	xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
3156	xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
3157
3158	trace_xfs_log_ungrant_exit(log, ticket);
3159
3160	xfs_log_space_wake(log->l_mp);
 
3161}
3162
3163/*
3164 * Flush iclog to disk if this is the last reference to the given iclog and
3165 * the WANT_SYNC bit is set.
3166 *
3167 * When this function is entered, the iclog is not necessarily in the
3168 * WANT_SYNC state.  It may be sitting around waiting to get filled.
3169 *
3170 *
3171 */
3172STATIC int
3173xlog_state_release_iclog(
3174	struct xlog		*log,
3175	struct xlog_in_core	*iclog)
3176{
3177	int		sync = 0;	/* do we sync? */
3178
3179	if (iclog->ic_state & XLOG_STATE_IOERROR)
3180		return -EIO;
3181
3182	ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
3183	if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
3184		return 0;
3185
3186	if (iclog->ic_state & XLOG_STATE_IOERROR) {
3187		spin_unlock(&log->l_icloglock);
3188		return -EIO;
3189	}
3190	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
3191	       iclog->ic_state == XLOG_STATE_WANT_SYNC);
3192
3193	if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
3194		/* update tail before writing to iclog */
3195		xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
3196		sync++;
3197		iclog->ic_state = XLOG_STATE_SYNCING;
3198		iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
3199		xlog_verify_tail_lsn(log, iclog, tail_lsn);
3200		/* cycle incremented when incrementing curr_block */
3201	}
3202	spin_unlock(&log->l_icloglock);
3203
3204	/*
3205	 * We let the log lock go, so it's possible that we hit a log I/O
3206	 * error or some other SHUTDOWN condition that marks the iclog
3207	 * as XLOG_STATE_IOERROR before the bwrite. However, we know that
3208	 * this iclog has consistent data, so we ignore IOERROR
3209	 * flags after this point.
3210	 */
3211	if (sync)
3212		xlog_sync(log, iclog);
3213	return 0;
3214}	/* xlog_state_release_iclog */
3215
3216
3217/*
3218 * This routine will mark the current iclog in the ring as WANT_SYNC
3219 * and move the current iclog pointer to the next iclog in the ring.
3220 * When this routine is called from xlog_state_get_iclog_space(), the
3221 * exact size of the iclog has not yet been determined.  All we know is
3222 * that every data block.  We have run out of space in this log record.
3223 */
3224STATIC void
3225xlog_state_switch_iclogs(
3226	struct xlog		*log,
3227	struct xlog_in_core	*iclog,
3228	int			eventual_size)
3229{
3230	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
 
 
 
3231	if (!eventual_size)
3232		eventual_size = iclog->ic_offset;
3233	iclog->ic_state = XLOG_STATE_WANT_SYNC;
3234	iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3235	log->l_prev_block = log->l_curr_block;
3236	log->l_prev_cycle = log->l_curr_cycle;
3237
3238	/* roll log?: ic_offset changed later */
3239	log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
3240
3241	/* Round up to next log-sunit */
3242	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
3243	    log->l_mp->m_sb.sb_logsunit > 1) {
3244		uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
3245		log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
3246	}
3247
3248	if (log->l_curr_block >= log->l_logBBsize) {
3249		/*
3250		 * Rewind the current block before the cycle is bumped to make
3251		 * sure that the combined LSN never transiently moves forward
3252		 * when the log wraps to the next cycle. This is to support the
3253		 * unlocked sample of these fields from xlog_valid_lsn(). Most
3254		 * other cases should acquire l_icloglock.
3255		 */
3256		log->l_curr_block -= log->l_logBBsize;
3257		ASSERT(log->l_curr_block >= 0);
3258		smp_wmb();
3259		log->l_curr_cycle++;
3260		if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
3261			log->l_curr_cycle++;
3262	}
3263	ASSERT(iclog == log->l_iclog);
3264	log->l_iclog = iclog->ic_next;
3265}	/* xlog_state_switch_iclogs */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3266
3267/*
3268 * Write out all data in the in-core log as of this exact moment in time.
3269 *
3270 * Data may be written to the in-core log during this call.  However,
3271 * we don't guarantee this data will be written out.  A change from past
3272 * implementation means this routine will *not* write out zero length LRs.
3273 *
3274 * Basically, we try and perform an intelligent scan of the in-core logs.
3275 * If we determine there is no flushable data, we just return.  There is no
3276 * flushable data if:
3277 *
3278 *	1. the current iclog is active and has no data; the previous iclog
3279 *		is in the active or dirty state.
3280 *	2. the current iclog is drity, and the previous iclog is in the
3281 *		active or dirty state.
3282 *
3283 * We may sleep if:
3284 *
3285 *	1. the current iclog is not in the active nor dirty state.
3286 *	2. the current iclog dirty, and the previous iclog is not in the
3287 *		active nor dirty state.
3288 *	3. the current iclog is active, and there is another thread writing
3289 *		to this particular iclog.
3290 *	4. a) the current iclog is active and has no other writers
3291 *	   b) when we return from flushing out this iclog, it is still
3292 *		not in the active nor dirty state.
3293 */
3294int
3295xfs_log_force(
3296	struct xfs_mount	*mp,
3297	uint			flags)
3298{
3299	struct xlog		*log = mp->m_log;
3300	struct xlog_in_core	*iclog;
3301	xfs_lsn_t		lsn;
3302
3303	XFS_STATS_INC(mp, xs_log_force);
3304	trace_xfs_log_force(mp, 0, _RET_IP_);
3305
3306	xlog_cil_force(log);
3307
3308	spin_lock(&log->l_icloglock);
 
 
 
3309	iclog = log->l_iclog;
3310	if (iclog->ic_state & XLOG_STATE_IOERROR)
3311		goto out_error;
3312
3313	if (iclog->ic_state == XLOG_STATE_DIRTY ||
3314	    (iclog->ic_state == XLOG_STATE_ACTIVE &&
3315	     atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
3316		/*
3317		 * If the head is dirty or (active and empty), then we need to
3318		 * look at the previous iclog.
3319		 *
3320		 * If the previous iclog is active or dirty we are done.  There
3321		 * is nothing to sync out. Otherwise, we attach ourselves to the
3322		 * previous iclog and go to sleep.
3323		 */
3324		iclog = iclog->ic_prev;
3325		if (iclog->ic_state == XLOG_STATE_ACTIVE ||
3326		    iclog->ic_state == XLOG_STATE_DIRTY)
3327			goto out_unlock;
3328	} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3329		if (atomic_read(&iclog->ic_refcnt) == 0) {
3330			/*
3331			 * We are the only one with access to this iclog.
3332			 *
3333			 * Flush it out now.  There should be a roundoff of zero
3334			 * to show that someone has already taken care of the
3335			 * roundoff from the previous sync.
3336			 */
3337			atomic_inc(&iclog->ic_refcnt);
3338			lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3339			xlog_state_switch_iclogs(log, iclog, 0);
3340			spin_unlock(&log->l_icloglock);
3341
3342			if (xlog_state_release_iclog(log, iclog))
3343				return -EIO;
3344
3345			spin_lock(&log->l_icloglock);
3346			if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn ||
3347			    iclog->ic_state == XLOG_STATE_DIRTY)
3348				goto out_unlock;
3349		} else {
3350			/*
3351			 * Someone else is writing to this iclog.
3352			 *
3353			 * Use its call to flush out the data.  However, the
3354			 * other thread may not force out this LR, so we mark
3355			 * it WANT_SYNC.
3356			 */
3357			xlog_state_switch_iclogs(log, iclog, 0);
3358		}
3359	} else {
3360		/*
3361		 * If the head iclog is not active nor dirty, we just attach
3362		 * ourselves to the head and go to sleep if necessary.
3363		 */
3364		;
3365	}
3366
3367	if (!(flags & XFS_LOG_SYNC))
3368		goto out_unlock;
3369
3370	if (iclog->ic_state & XLOG_STATE_IOERROR)
3371		goto out_error;
3372	XFS_STATS_INC(mp, xs_log_force_sleep);
3373	xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3374	if (iclog->ic_state & XLOG_STATE_IOERROR)
3375		return -EIO;
3376	return 0;
3377
 
 
3378out_unlock:
3379	spin_unlock(&log->l_icloglock);
3380	return 0;
3381out_error:
3382	spin_unlock(&log->l_icloglock);
3383	return -EIO;
3384}
3385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3386static int
3387__xfs_log_force_lsn(
3388	struct xfs_mount	*mp,
3389	xfs_lsn_t		lsn,
3390	uint			flags,
3391	int			*log_flushed,
3392	bool			already_slept)
3393{
3394	struct xlog		*log = mp->m_log;
3395	struct xlog_in_core	*iclog;
 
3396
3397	spin_lock(&log->l_icloglock);
3398	iclog = log->l_iclog;
3399	if (iclog->ic_state & XLOG_STATE_IOERROR)
3400		goto out_error;
3401
 
3402	while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
 
3403		iclog = iclog->ic_next;
3404		if (iclog == log->l_iclog)
3405			goto out_unlock;
3406	}
3407
3408	if (iclog->ic_state == XLOG_STATE_DIRTY)
3409		goto out_unlock;
3410
3411	if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3412		/*
3413		 * We sleep here if we haven't already slept (e.g. this is the
3414		 * first time we've looked at the correct iclog buf) and the
3415		 * buffer before us is going to be sync'ed.  The reason for this
3416		 * is that if we are doing sync transactions here, by waiting
3417		 * for the previous I/O to complete, we can allow a few more
3418		 * transactions into this iclog before we close it down.
3419		 *
3420		 * Otherwise, we mark the buffer WANT_SYNC, and bump up the
3421		 * refcnt so we can release the log (which drops the ref count).
3422		 * The state switch keeps new transaction commits from using
3423		 * this buffer.  When the current commits finish writing into
3424		 * the buffer, the refcount will drop to zero and the buffer
3425		 * will go out then.
3426		 */
3427		if (!already_slept &&
3428		    (iclog->ic_prev->ic_state &
3429		     (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
3430			ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3431
3432			XFS_STATS_INC(mp, xs_log_force_sleep);
3433
3434			xlog_wait(&iclog->ic_prev->ic_write_wait,
3435					&log->l_icloglock);
3436			return -EAGAIN;
3437		}
3438		atomic_inc(&iclog->ic_refcnt);
3439		xlog_state_switch_iclogs(log, iclog, 0);
3440		spin_unlock(&log->l_icloglock);
3441		if (xlog_state_release_iclog(log, iclog))
3442			return -EIO;
3443		if (log_flushed)
3444			*log_flushed = 1;
3445		spin_lock(&log->l_icloglock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3446	}
3447
3448	if (!(flags & XFS_LOG_SYNC) ||
3449	    (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY)))
3450		goto out_unlock;
3451
3452	if (iclog->ic_state & XLOG_STATE_IOERROR)
3453		goto out_error;
3454
3455	XFS_STATS_INC(mp, xs_log_force_sleep);
3456	xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3457	if (iclog->ic_state & XLOG_STATE_IOERROR)
3458		return -EIO;
3459	return 0;
3460
3461out_unlock:
3462	spin_unlock(&log->l_icloglock);
3463	return 0;
3464out_error:
3465	spin_unlock(&log->l_icloglock);
3466	return -EIO;
3467}
3468
3469/*
3470 * Force the in-core log to disk for a specific LSN.
3471 *
3472 * Find in-core log with lsn.
3473 *	If it is in the DIRTY state, just return.
3474 *	If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3475 *		state and go to sleep or return.
3476 *	If it is in any other state, go to sleep or return.
3477 *
3478 * Synchronous forces are implemented with a wait queue.  All callers trying
3479 * to force a given lsn to disk must wait on the queue attached to the
3480 * specific in-core log.  When given in-core log finally completes its write
3481 * to disk, that thread will wake up all threads waiting on the queue.
3482 */
3483int
3484xfs_log_force_lsn(
3485	struct xfs_mount	*mp,
3486	xfs_lsn_t		lsn,
3487	uint			flags,
3488	int			*log_flushed)
3489{
 
 
3490	int			ret;
3491	ASSERT(lsn != 0);
3492
3493	XFS_STATS_INC(mp, xs_log_force);
3494	trace_xfs_log_force(mp, lsn, _RET_IP_);
3495
3496	lsn = xlog_cil_force_lsn(mp->m_log, lsn);
3497	if (lsn == NULLCOMMITLSN)
3498		return 0;
3499
3500	ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, false);
3501	if (ret == -EAGAIN)
3502		ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, true);
 
 
3503	return ret;
3504}
3505
3506/*
3507 * Called when we want to mark the current iclog as being ready to sync to
3508 * disk.
3509 */
3510STATIC void
3511xlog_state_want_sync(
3512	struct xlog		*log,
3513	struct xlog_in_core	*iclog)
3514{
3515	assert_spin_locked(&log->l_icloglock);
3516
3517	if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3518		xlog_state_switch_iclogs(log, iclog, 0);
3519	} else {
3520		ASSERT(iclog->ic_state &
3521			(XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
3522	}
3523}
3524
3525
3526/*****************************************************************************
3527 *
3528 *		TICKET functions
3529 *
3530 *****************************************************************************
3531 */
3532
3533/*
3534 * Free a used ticket when its refcount falls to zero.
3535 */
3536void
3537xfs_log_ticket_put(
3538	xlog_ticket_t	*ticket)
3539{
3540	ASSERT(atomic_read(&ticket->t_ref) > 0);
3541	if (atomic_dec_and_test(&ticket->t_ref))
3542		kmem_zone_free(xfs_log_ticket_zone, ticket);
3543}
3544
3545xlog_ticket_t *
3546xfs_log_ticket_get(
3547	xlog_ticket_t	*ticket)
3548{
3549	ASSERT(atomic_read(&ticket->t_ref) > 0);
3550	atomic_inc(&ticket->t_ref);
3551	return ticket;
3552}
3553
3554/*
3555 * Figure out the total log space unit (in bytes) that would be
3556 * required for a log ticket.
3557 */
3558int
3559xfs_log_calc_unit_res(
3560	struct xfs_mount	*mp,
3561	int			unit_bytes)
 
3562{
3563	struct xlog		*log = mp->m_log;
3564	int			iclog_space;
3565	uint			num_headers;
3566
3567	/*
3568	 * Permanent reservations have up to 'cnt'-1 active log operations
3569	 * in the log.  A unit in this case is the amount of space for one
3570	 * of these log operations.  Normal reservations have a cnt of 1
3571	 * and their unit amount is the total amount of space required.
3572	 *
3573	 * The following lines of code account for non-transaction data
3574	 * which occupy space in the on-disk log.
3575	 *
3576	 * Normal form of a transaction is:
3577	 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3578	 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3579	 *
3580	 * We need to account for all the leadup data and trailer data
3581	 * around the transaction data.
3582	 * And then we need to account for the worst case in terms of using
3583	 * more space.
3584	 * The worst case will happen if:
3585	 * - the placement of the transaction happens to be such that the
3586	 *   roundoff is at its maximum
3587	 * - the transaction data is synced before the commit record is synced
3588	 *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3589	 *   Therefore the commit record is in its own Log Record.
3590	 *   This can happen as the commit record is called with its
3591	 *   own region to xlog_write().
3592	 *   This then means that in the worst case, roundoff can happen for
3593	 *   the commit-rec as well.
3594	 *   The commit-rec is smaller than padding in this scenario and so it is
3595	 *   not added separately.
3596	 */
3597
3598	/* for trans header */
3599	unit_bytes += sizeof(xlog_op_header_t);
3600	unit_bytes += sizeof(xfs_trans_header_t);
3601
3602	/* for start-rec */
3603	unit_bytes += sizeof(xlog_op_header_t);
3604
3605	/*
3606	 * for LR headers - the space for data in an iclog is the size minus
3607	 * the space used for the headers. If we use the iclog size, then we
3608	 * undercalculate the number of headers required.
3609	 *
3610	 * Furthermore - the addition of op headers for split-recs might
3611	 * increase the space required enough to require more log and op
3612	 * headers, so take that into account too.
3613	 *
3614	 * IMPORTANT: This reservation makes the assumption that if this
3615	 * transaction is the first in an iclog and hence has the LR headers
3616	 * accounted to it, then the remaining space in the iclog is
3617	 * exclusively for this transaction.  i.e. if the transaction is larger
3618	 * than the iclog, it will be the only thing in that iclog.
3619	 * Fundamentally, this means we must pass the entire log vector to
3620	 * xlog_write to guarantee this.
3621	 */
3622	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3623	num_headers = howmany(unit_bytes, iclog_space);
3624
3625	/* for split-recs - ophdrs added when data split over LRs */
3626	unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3627
3628	/* add extra header reservations if we overrun */
3629	while (!num_headers ||
3630	       howmany(unit_bytes, iclog_space) > num_headers) {
3631		unit_bytes += sizeof(xlog_op_header_t);
3632		num_headers++;
3633	}
3634	unit_bytes += log->l_iclog_hsize * num_headers;
3635
3636	/* for commit-rec LR header - note: padding will subsume the ophdr */
3637	unit_bytes += log->l_iclog_hsize;
3638
3639	/* for roundoff padding for transaction data and one for commit record */
3640	if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) {
3641		/* log su roundoff */
3642		unit_bytes += 2 * mp->m_sb.sb_logsunit;
3643	} else {
3644		/* BB roundoff */
3645		unit_bytes += 2 * BBSIZE;
3646        }
3647
 
 
3648	return unit_bytes;
3649}
3650
 
 
 
 
 
 
 
 
3651/*
3652 * Allocate and initialise a new log ticket.
3653 */
3654struct xlog_ticket *
3655xlog_ticket_alloc(
3656	struct xlog		*log,
3657	int			unit_bytes,
3658	int			cnt,
3659	char			client,
3660	bool			permanent,
3661	xfs_km_flags_t		alloc_flags)
3662{
3663	struct xlog_ticket	*tic;
3664	int			unit_res;
3665
3666	tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags);
3667	if (!tic)
3668		return NULL;
3669
3670	unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes);
3671
3672	atomic_set(&tic->t_ref, 1);
3673	tic->t_task		= current;
3674	INIT_LIST_HEAD(&tic->t_queue);
3675	tic->t_unit_res		= unit_res;
3676	tic->t_curr_res		= unit_res;
3677	tic->t_cnt		= cnt;
3678	tic->t_ocnt		= cnt;
3679	tic->t_tid		= prandom_u32();
3680	tic->t_clientid		= client;
3681	tic->t_flags		= XLOG_TIC_INITED;
3682	if (permanent)
3683		tic->t_flags |= XLOG_TIC_PERM_RESERV;
3684
3685	xlog_tic_reset_res(tic);
3686
3687	return tic;
3688}
3689
3690
3691/******************************************************************************
3692 *
3693 *		Log debug routines
3694 *
3695 ******************************************************************************
3696 */
3697#if defined(DEBUG)
3698/*
3699 * Make sure that the destination ptr is within the valid data region of
3700 * one of the iclogs.  This uses backup pointers stored in a different
3701 * part of the log in case we trash the log structure.
3702 */
3703STATIC void
3704xlog_verify_dest_ptr(
3705	struct xlog	*log,
3706	void		*ptr)
3707{
3708	int i;
3709	int good_ptr = 0;
3710
3711	for (i = 0; i < log->l_iclog_bufs; i++) {
3712		if (ptr >= log->l_iclog_bak[i] &&
3713		    ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
3714			good_ptr++;
3715	}
3716
3717	if (!good_ptr)
3718		xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3719}
3720
3721/*
3722 * Check to make sure the grant write head didn't just over lap the tail.  If
3723 * the cycles are the same, we can't be overlapping.  Otherwise, make sure that
3724 * the cycles differ by exactly one and check the byte count.
3725 *
3726 * This check is run unlocked, so can give false positives. Rather than assert
3727 * on failures, use a warn-once flag and a panic tag to allow the admin to
3728 * determine if they want to panic the machine when such an error occurs. For
3729 * debug kernels this will have the same effect as using an assert but, unlinke
3730 * an assert, it can be turned off at runtime.
3731 */
3732STATIC void
3733xlog_verify_grant_tail(
3734	struct xlog	*log)
3735{
3736	int		tail_cycle, tail_blocks;
3737	int		cycle, space;
3738
3739	xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3740	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3741	if (tail_cycle != cycle) {
3742		if (cycle - 1 != tail_cycle &&
3743		    !(log->l_flags & XLOG_TAIL_WARN)) {
3744			xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3745				"%s: cycle - 1 != tail_cycle", __func__);
3746			log->l_flags |= XLOG_TAIL_WARN;
3747		}
3748
3749		if (space > BBTOB(tail_blocks) &&
3750		    !(log->l_flags & XLOG_TAIL_WARN)) {
3751			xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3752				"%s: space > BBTOB(tail_blocks)", __func__);
3753			log->l_flags |= XLOG_TAIL_WARN;
3754		}
3755	}
3756}
3757
3758/* check if it will fit */
3759STATIC void
3760xlog_verify_tail_lsn(
3761	struct xlog		*log,
3762	struct xlog_in_core	*iclog,
3763	xfs_lsn_t		tail_lsn)
3764{
3765    int blocks;
 
3766
3767    if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3768	blocks =
3769	    log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3770	if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3771		xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3772    } else {
3773	ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3774
3775	if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3776		xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3777
3778	blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3779	if (blocks < BTOBB(iclog->ic_offset) + 1)
3780		xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3781    }
3782}	/* xlog_verify_tail_lsn */
3783
3784/*
3785 * Perform a number of checks on the iclog before writing to disk.
3786 *
3787 * 1. Make sure the iclogs are still circular
3788 * 2. Make sure we have a good magic number
3789 * 3. Make sure we don't have magic numbers in the data
3790 * 4. Check fields of each log operation header for:
3791 *	A. Valid client identifier
3792 *	B. tid ptr value falls in valid ptr space (user space code)
3793 *	C. Length in log record header is correct according to the
3794 *		individual operation headers within record.
3795 * 5. When a bwrite will occur within 5 blocks of the front of the physical
3796 *	log, check the preceding blocks of the physical log to make sure all
3797 *	the cycle numbers agree with the current cycle number.
3798 */
3799STATIC void
3800xlog_verify_iclog(
3801	struct xlog		*log,
3802	struct xlog_in_core	*iclog,
3803	int			count)
3804{
3805	xlog_op_header_t	*ophead;
3806	xlog_in_core_t		*icptr;
3807	xlog_in_core_2_t	*xhdr;
3808	void			*base_ptr, *ptr, *p;
3809	ptrdiff_t		field_offset;
3810	uint8_t			clientid;
3811	int			len, i, j, k, op_len;
3812	int			idx;
3813
3814	/* check validity of iclog pointers */
3815	spin_lock(&log->l_icloglock);
3816	icptr = log->l_iclog;
3817	for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3818		ASSERT(icptr);
3819
3820	if (icptr != log->l_iclog)
3821		xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3822	spin_unlock(&log->l_icloglock);
3823
3824	/* check log magic numbers */
3825	if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3826		xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3827
3828	base_ptr = ptr = &iclog->ic_header;
3829	p = &iclog->ic_header;
3830	for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
3831		if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3832			xfs_emerg(log->l_mp, "%s: unexpected magic num",
3833				__func__);
3834	}
3835
3836	/* check fields */
3837	len = be32_to_cpu(iclog->ic_header.h_num_logops);
3838	base_ptr = ptr = iclog->ic_datap;
3839	ophead = ptr;
3840	xhdr = iclog->ic_data;
3841	for (i = 0; i < len; i++) {
3842		ophead = ptr;
3843
3844		/* clientid is only 1 byte */
3845		p = &ophead->oh_clientid;
3846		field_offset = p - base_ptr;
3847		if (field_offset & 0x1ff) {
3848			clientid = ophead->oh_clientid;
3849		} else {
3850			idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap);
3851			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3852				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3853				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3854				clientid = xlog_get_client_id(
3855					xhdr[j].hic_xheader.xh_cycle_data[k]);
3856			} else {
3857				clientid = xlog_get_client_id(
3858					iclog->ic_header.h_cycle_data[idx]);
3859			}
3860		}
3861		if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
3862			xfs_warn(log->l_mp,
3863				"%s: invalid clientid %d op "PTR_FMT" offset 0x%lx",
3864				__func__, clientid, ophead,
3865				(unsigned long)field_offset);
 
3866
3867		/* check length */
3868		p = &ophead->oh_len;
3869		field_offset = p - base_ptr;
3870		if (field_offset & 0x1ff) {
3871			op_len = be32_to_cpu(ophead->oh_len);
3872		} else {
3873			idx = BTOBBT((uintptr_t)&ophead->oh_len -
3874				    (uintptr_t)iclog->ic_datap);
3875			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3876				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3877				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3878				op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3879			} else {
3880				op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3881			}
3882		}
3883		ptr += sizeof(xlog_op_header_t) + op_len;
3884	}
3885}	/* xlog_verify_iclog */
3886#endif
3887
3888/*
3889 * Mark all iclogs IOERROR. l_icloglock is held by the caller.
3890 */
3891STATIC int
3892xlog_state_ioerror(
3893	struct xlog	*log)
3894{
3895	xlog_in_core_t	*iclog, *ic;
3896
3897	iclog = log->l_iclog;
3898	if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
3899		/*
3900		 * Mark all the incore logs IOERROR.
3901		 * From now on, no log flushes will result.
3902		 */
3903		ic = iclog;
3904		do {
3905			ic->ic_state = XLOG_STATE_IOERROR;
3906			ic = ic->ic_next;
3907		} while (ic != iclog);
3908		return 0;
3909	}
3910	/*
3911	 * Return non-zero, if state transition has already happened.
3912	 */
3913	return 1;
3914}
3915
3916/*
3917 * This is called from xfs_force_shutdown, when we're forcibly
3918 * shutting down the filesystem, typically because of an IO error.
3919 * Our main objectives here are to make sure that:
3920 *	a. if !logerror, flush the logs to disk. Anything modified
3921 *	   after this is ignored.
3922 *	b. the filesystem gets marked 'SHUTDOWN' for all interested
3923 *	   parties to find out, 'atomically'.
3924 *	c. those who're sleeping on log reservations, pinned objects and
3925 *	    other resources get woken up, and be told the bad news.
3926 *	d. nothing new gets queued up after (b) and (c) are done.
3927 *
3928 * Note: for the !logerror case we need to flush the regions held in memory out
3929 * to disk first. This needs to be done before the log is marked as shutdown,
3930 * otherwise the iclog writes will fail.
3931 */
3932int
3933xfs_log_force_umount(
3934	struct xfs_mount	*mp,
3935	int			logerror)
3936{
3937	struct xlog	*log;
3938	int		retval;
3939
3940	log = mp->m_log;
3941
3942	/*
3943	 * If this happens during log recovery, don't worry about
3944	 * locking; the log isn't open for business yet.
3945	 */
3946	if (!log ||
3947	    log->l_flags & XLOG_ACTIVE_RECOVERY) {
3948		mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3949		if (mp->m_sb_bp)
3950			mp->m_sb_bp->b_flags |= XBF_DONE;
3951		return 0;
3952	}
3953
3954	/*
3955	 * Somebody could've already done the hard work for us.
3956	 * No need to get locks for this.
3957	 */
3958	if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
3959		ASSERT(XLOG_FORCED_SHUTDOWN(log));
3960		return 1;
3961	}
3962
3963	/*
3964	 * Flush all the completed transactions to disk before marking the log
3965	 * being shut down. We need to do it in this order to ensure that
3966	 * completed operations are safely on disk before we shut down, and that
3967	 * we don't have to issue any buffer IO after the shutdown flags are set
3968	 * to guarantee this.
3969	 */
3970	if (!logerror)
3971		xfs_log_force(mp, XFS_LOG_SYNC);
 
 
 
 
 
 
 
 
3972
3973	/*
3974	 * mark the filesystem and the as in a shutdown state and wake
3975	 * everybody up to tell them the bad news.
 
 
 
 
 
 
 
3976	 */
3977	spin_lock(&log->l_icloglock);
3978	mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3979	if (mp->m_sb_bp)
3980		mp->m_sb_bp->b_flags |= XBF_DONE;
 
 
3981
3982	/*
3983	 * Mark the log and the iclogs with IO error flags to prevent any
3984	 * further log IO from being issued or completed.
3985	 */
3986	log->l_flags |= XLOG_IO_ERROR;
3987	retval = xlog_state_ioerror(log);
3988	spin_unlock(&log->l_icloglock);
 
 
 
 
 
 
3989
3990	/*
3991	 * We don't want anybody waiting for log reservations after this. That
3992	 * means we have to wake up everybody queued up on reserveq as well as
3993	 * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
3994	 * we don't enqueue anything once the SHUTDOWN flag is set, and this
3995	 * action is protected by the grant locks.
3996	 */
3997	xlog_grant_head_wake_all(&log->l_reserve_head);
3998	xlog_grant_head_wake_all(&log->l_write_head);
3999
4000	/*
4001	 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
4002	 * as if the log writes were completed. The abort handling in the log
4003	 * item committed callback functions will do this again under lock to
4004	 * avoid races.
4005	 */
4006	spin_lock(&log->l_cilp->xc_push_lock);
 
4007	wake_up_all(&log->l_cilp->xc_commit_wait);
4008	spin_unlock(&log->l_cilp->xc_push_lock);
4009	xlog_state_do_callback(log, true, NULL);
4010
4011#ifdef XFSERRORDEBUG
4012	{
4013		xlog_in_core_t	*iclog;
4014
4015		spin_lock(&log->l_icloglock);
4016		iclog = log->l_iclog;
4017		do {
4018			ASSERT(iclog->ic_callback == 0);
4019			iclog = iclog->ic_next;
4020		} while (iclog != log->l_iclog);
4021		spin_unlock(&log->l_icloglock);
4022	}
4023#endif
4024	/* return non-zero if log IOERROR transition had already happened */
4025	return retval;
4026}
4027
4028STATIC int
4029xlog_iclogs_empty(
4030	struct xlog	*log)
4031{
4032	xlog_in_core_t	*iclog;
4033
4034	iclog = log->l_iclog;
4035	do {
4036		/* endianness does not matter here, zero is zero in
4037		 * any language.
4038		 */
4039		if (iclog->ic_header.h_num_logops)
4040			return 0;
4041		iclog = iclog->ic_next;
4042	} while (iclog != log->l_iclog);
4043	return 1;
4044}
4045
4046/*
4047 * Verify that an LSN stamped into a piece of metadata is valid. This is
4048 * intended for use in read verifiers on v5 superblocks.
4049 */
4050bool
4051xfs_log_check_lsn(
4052	struct xfs_mount	*mp,
4053	xfs_lsn_t		lsn)
4054{
4055	struct xlog		*log = mp->m_log;
4056	bool			valid;
4057
4058	/*
4059	 * norecovery mode skips mount-time log processing and unconditionally
4060	 * resets the in-core LSN. We can't validate in this mode, but
4061	 * modifications are not allowed anyways so just return true.
4062	 */
4063	if (mp->m_flags & XFS_MOUNT_NORECOVERY)
4064		return true;
4065
4066	/*
4067	 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
4068	 * handled by recovery and thus safe to ignore here.
4069	 */
4070	if (lsn == NULLCOMMITLSN)
4071		return true;
4072
4073	valid = xlog_valid_lsn(mp->m_log, lsn);
4074
4075	/* warn the user about what's gone wrong before verifier failure */
4076	if (!valid) {
4077		spin_lock(&log->l_icloglock);
4078		xfs_warn(mp,
4079"Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
4080"Please unmount and run xfs_repair (>= v4.3) to resolve.",
4081			 CYCLE_LSN(lsn), BLOCK_LSN(lsn),
4082			 log->l_curr_cycle, log->l_curr_block);
4083		spin_unlock(&log->l_icloglock);
4084	}
4085
4086	return valid;
4087}
4088
4089bool
4090xfs_log_in_recovery(
4091	struct xfs_mount	*mp)
 
 
 
 
 
4092{
4093	struct xlog		*log = mp->m_log;
 
4094
4095	return log->l_flags & XLOG_ACTIVE_RECOVERY;
 
 
 
 
 
4096}