Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
 
  21#include "xfs_log.h"
 
  22#include "xfs_trans.h"
  23#include "xfs_sb.h"
  24#include "xfs_ag.h"
  25#include "xfs_mount.h"
  26#include "xfs_error.h"
  27#include "xfs_log_priv.h"
  28#include "xfs_buf_item.h"
  29#include "xfs_bmap_btree.h"
  30#include "xfs_alloc_btree.h"
  31#include "xfs_ialloc_btree.h"
  32#include "xfs_log_recover.h"
  33#include "xfs_trans_priv.h"
  34#include "xfs_dinode.h"
  35#include "xfs_inode.h"
 
  36#include "xfs_trace.h"
  37
  38kmem_zone_t	*xfs_log_ticket_zone;
  39
  40/* Local miscellaneous function prototypes */
  41STATIC int
  42xlog_commit_record(
  43	struct xlog		*log,
  44	struct xlog_ticket	*ticket,
  45	struct xlog_in_core	**iclog,
  46	xfs_lsn_t		*commitlsnp);
  47
  48STATIC xlog_t *  xlog_alloc_log(xfs_mount_t	*mp,
  49				xfs_buftarg_t	*log_target,
  50				xfs_daddr_t	blk_offset,
  51				int		num_bblks);
  52STATIC int
  53xlog_space_left(
  54	struct xlog		*log,
  55	atomic64_t		*head);
  56STATIC int	 xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
  57STATIC void	 xlog_dealloc_log(xlog_t *log);
  58
  59/* local state machine functions */
  60STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
  61STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog);
  62STATIC int  xlog_state_get_iclog_space(xlog_t		*log,
  63				       int		len,
  64				       xlog_in_core_t	**iclog,
  65				       xlog_ticket_t	*ticket,
  66				       int		*continued_write,
  67				       int		*logoffsetp);
  68STATIC int  xlog_state_release_iclog(xlog_t		*log,
  69				     xlog_in_core_t	*iclog);
  70STATIC void xlog_state_switch_iclogs(xlog_t		*log,
  71				     xlog_in_core_t *iclog,
  72				     int		eventual_size);
  73STATIC void xlog_state_want_sync(xlog_t	*log, xlog_in_core_t *iclog);
  74
  75STATIC void
  76xlog_grant_push_ail(
  77	struct xlog	*log,
  78	int		need_bytes);
 
  79STATIC void xlog_regrant_reserve_log_space(xlog_t	 *log,
  80					   xlog_ticket_t *ticket);
 
 
  81STATIC void xlog_ungrant_log_space(xlog_t	 *log,
  82				   xlog_ticket_t *ticket);
  83
  84#if defined(DEBUG)
  85STATIC void	xlog_verify_dest_ptr(xlog_t *log, char *ptr);
  86STATIC void
  87xlog_verify_grant_tail(
  88	struct xlog	*log);
  89STATIC void	xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
  90				  int count, boolean_t syncing);
  91STATIC void	xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
  92				     xfs_lsn_t tail_lsn);
  93#else
  94#define xlog_verify_dest_ptr(a,b)
  95#define xlog_verify_grant_tail(a)
  96#define xlog_verify_iclog(a,b,c,d)
  97#define xlog_verify_tail_lsn(a,b,c)
  98#endif
  99
 100STATIC int	xlog_iclogs_empty(xlog_t *log);
 101
 102static void
 103xlog_grant_sub_space(
 104	struct xlog		*log,
 105	atomic64_t		*head,
 106	int			bytes)
 107{
 108	int64_t	head_val = atomic64_read(head);
 109	int64_t new, old;
 110
 111	do {
 112		int	cycle, space;
 113
 114		xlog_crack_grant_head_val(head_val, &cycle, &space);
 115
 116		space -= bytes;
 117		if (space < 0) {
 118			space += log->l_logsize;
 119			cycle--;
 120		}
 121
 122		old = head_val;
 123		new = xlog_assign_grant_head_val(cycle, space);
 124		head_val = atomic64_cmpxchg(head, old, new);
 125	} while (head_val != old);
 126}
 127
 128static void
 129xlog_grant_add_space(
 130	struct xlog		*log,
 131	atomic64_t		*head,
 132	int			bytes)
 133{
 134	int64_t	head_val = atomic64_read(head);
 135	int64_t new, old;
 136
 137	do {
 138		int		tmp;
 139		int		cycle, space;
 140
 141		xlog_crack_grant_head_val(head_val, &cycle, &space);
 142
 143		tmp = log->l_logsize - space;
 144		if (tmp > bytes)
 145			space += bytes;
 146		else {
 147			space = bytes - tmp;
 148			cycle++;
 149		}
 150
 151		old = head_val;
 152		new = xlog_assign_grant_head_val(cycle, space);
 153		head_val = atomic64_cmpxchg(head, old, new);
 154	} while (head_val != old);
 155}
 156
 157STATIC void
 158xlog_grant_head_init(
 159	struct xlog_grant_head	*head)
 160{
 161	xlog_assign_grant_head(&head->grant, 1, 0);
 162	INIT_LIST_HEAD(&head->waiters);
 163	spin_lock_init(&head->lock);
 164}
 165
 166STATIC void
 167xlog_grant_head_wake_all(
 168	struct xlog_grant_head	*head)
 169{
 170	struct xlog_ticket	*tic;
 171
 172	spin_lock(&head->lock);
 173	list_for_each_entry(tic, &head->waiters, t_queue)
 174		wake_up_process(tic->t_task);
 175	spin_unlock(&head->lock);
 176}
 177
 178static inline int
 179xlog_ticket_reservation(
 180	struct xlog		*log,
 181	struct xlog_grant_head	*head,
 182	struct xlog_ticket	*tic)
 183{
 184	if (head == &log->l_write_head) {
 185		ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
 186		return tic->t_unit_res;
 187	} else {
 188		if (tic->t_flags & XLOG_TIC_PERM_RESERV)
 189			return tic->t_unit_res * tic->t_cnt;
 190		else
 191			return tic->t_unit_res;
 192	}
 193}
 194
 195STATIC bool
 196xlog_grant_head_wake(
 197	struct xlog		*log,
 198	struct xlog_grant_head	*head,
 199	int			*free_bytes)
 200{
 201	struct xlog_ticket	*tic;
 202	int			need_bytes;
 203
 204	list_for_each_entry(tic, &head->waiters, t_queue) {
 205		need_bytes = xlog_ticket_reservation(log, head, tic);
 206		if (*free_bytes < need_bytes)
 207			return false;
 208
 209		*free_bytes -= need_bytes;
 210		trace_xfs_log_grant_wake_up(log, tic);
 211		wake_up_process(tic->t_task);
 212	}
 213
 214	return true;
 215}
 216
 217STATIC int
 218xlog_grant_head_wait(
 219	struct xlog		*log,
 220	struct xlog_grant_head	*head,
 221	struct xlog_ticket	*tic,
 222	int			need_bytes)
 223{
 224	list_add_tail(&tic->t_queue, &head->waiters);
 225
 226	do {
 227		if (XLOG_FORCED_SHUTDOWN(log))
 228			goto shutdown;
 229		xlog_grant_push_ail(log, need_bytes);
 230
 231		__set_current_state(TASK_UNINTERRUPTIBLE);
 232		spin_unlock(&head->lock);
 233
 234		XFS_STATS_INC(xs_sleep_logspace);
 235
 236		trace_xfs_log_grant_sleep(log, tic);
 237		schedule();
 238		trace_xfs_log_grant_wake(log, tic);
 239
 240		spin_lock(&head->lock);
 241		if (XLOG_FORCED_SHUTDOWN(log))
 242			goto shutdown;
 243	} while (xlog_space_left(log, &head->grant) < need_bytes);
 244
 245	list_del_init(&tic->t_queue);
 246	return 0;
 247shutdown:
 248	list_del_init(&tic->t_queue);
 249	return XFS_ERROR(EIO);
 250}
 251
 252/*
 253 * Atomically get the log space required for a log ticket.
 254 *
 255 * Once a ticket gets put onto head->waiters, it will only return after the
 256 * needed reservation is satisfied.
 257 *
 258 * This function is structured so that it has a lock free fast path. This is
 259 * necessary because every new transaction reservation will come through this
 260 * path. Hence any lock will be globally hot if we take it unconditionally on
 261 * every pass.
 262 *
 263 * As tickets are only ever moved on and off head->waiters under head->lock, we
 264 * only need to take that lock if we are going to add the ticket to the queue
 265 * and sleep. We can avoid taking the lock if the ticket was never added to
 266 * head->waiters because the t_queue list head will be empty and we hold the
 267 * only reference to it so it can safely be checked unlocked.
 268 */
 269STATIC int
 270xlog_grant_head_check(
 271	struct xlog		*log,
 272	struct xlog_grant_head	*head,
 273	struct xlog_ticket	*tic,
 274	int			*need_bytes)
 275{
 276	int			free_bytes;
 277	int			error = 0;
 278
 279	ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 280
 281	/*
 282	 * If there are other waiters on the queue then give them a chance at
 283	 * logspace before us.  Wake up the first waiters, if we do not wake
 284	 * up all the waiters then go to sleep waiting for more free space,
 285	 * otherwise try to get some space for this transaction.
 286	 */
 287	*need_bytes = xlog_ticket_reservation(log, head, tic);
 288	free_bytes = xlog_space_left(log, &head->grant);
 289	if (!list_empty_careful(&head->waiters)) {
 290		spin_lock(&head->lock);
 291		if (!xlog_grant_head_wake(log, head, &free_bytes) ||
 292		    free_bytes < *need_bytes) {
 293			error = xlog_grant_head_wait(log, head, tic,
 294						     *need_bytes);
 295		}
 296		spin_unlock(&head->lock);
 297	} else if (free_bytes < *need_bytes) {
 298		spin_lock(&head->lock);
 299		error = xlog_grant_head_wait(log, head, tic, *need_bytes);
 300		spin_unlock(&head->lock);
 301	}
 302
 303	return error;
 304}
 305
 306static void
 307xlog_tic_reset_res(xlog_ticket_t *tic)
 308{
 309	tic->t_res_num = 0;
 310	tic->t_res_arr_sum = 0;
 311	tic->t_res_num_ophdrs = 0;
 312}
 313
 314static void
 315xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)
 316{
 317	if (tic->t_res_num == XLOG_TIC_LEN_MAX) {
 318		/* add to overflow and start again */
 319		tic->t_res_o_flow += tic->t_res_arr_sum;
 320		tic->t_res_num = 0;
 321		tic->t_res_arr_sum = 0;
 322	}
 323
 324	tic->t_res_arr[tic->t_res_num].r_len = len;
 325	tic->t_res_arr[tic->t_res_num].r_type = type;
 326	tic->t_res_arr_sum += len;
 327	tic->t_res_num++;
 328}
 329
 330/*
 331 * Replenish the byte reservation required by moving the grant write head.
 332 */
 333int
 334xfs_log_regrant(
 335	struct xfs_mount	*mp,
 336	struct xlog_ticket	*tic)
 337{
 338	struct xlog		*log = mp->m_log;
 339	int			need_bytes;
 340	int			error = 0;
 341
 342	if (XLOG_FORCED_SHUTDOWN(log))
 343		return XFS_ERROR(EIO);
 344
 345	XFS_STATS_INC(xs_try_logspace);
 346
 347	/*
 348	 * This is a new transaction on the ticket, so we need to change the
 349	 * transaction ID so that the next transaction has a different TID in
 350	 * the log. Just add one to the existing tid so that we can see chains
 351	 * of rolling transactions in the log easily.
 352	 */
 353	tic->t_tid++;
 354
 355	xlog_grant_push_ail(log, tic->t_unit_res);
 356
 357	tic->t_curr_res = tic->t_unit_res;
 358	xlog_tic_reset_res(tic);
 359
 360	if (tic->t_cnt > 0)
 361		return 0;
 362
 363	trace_xfs_log_regrant(log, tic);
 364
 365	error = xlog_grant_head_check(log, &log->l_write_head, tic,
 366				      &need_bytes);
 367	if (error)
 368		goto out_error;
 369
 370	xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
 371	trace_xfs_log_regrant_exit(log, tic);
 372	xlog_verify_grant_tail(log);
 373	return 0;
 374
 375out_error:
 376	/*
 377	 * If we are failing, make sure the ticket doesn't have any current
 378	 * reservations.  We don't want to add this back when the ticket/
 379	 * transaction gets cancelled.
 380	 */
 381	tic->t_curr_res = 0;
 382	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
 383	return error;
 384}
 385
 386/*
 387 * Reserve log space and return a ticket corresponding the reservation.
 388 *
 389 * Each reservation is going to reserve extra space for a log record header.
 390 * When writes happen to the on-disk log, we don't subtract the length of the
 391 * log record header from any reservation.  By wasting space in each
 392 * reservation, we prevent over allocation problems.
 393 */
 394int
 395xfs_log_reserve(
 396	struct xfs_mount	*mp,
 397	int		 	unit_bytes,
 398	int		 	cnt,
 399	struct xlog_ticket	**ticp,
 400	__uint8_t	 	client,
 401	bool			permanent,
 402	uint		 	t_type)
 403{
 404	struct xlog		*log = mp->m_log;
 405	struct xlog_ticket	*tic;
 406	int			need_bytes;
 407	int			error = 0;
 408
 409	ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
 410
 411	if (XLOG_FORCED_SHUTDOWN(log))
 412		return XFS_ERROR(EIO);
 413
 414	XFS_STATS_INC(xs_try_logspace);
 415
 416	ASSERT(*ticp == NULL);
 417	tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
 418				KM_SLEEP | KM_MAYFAIL);
 419	if (!tic)
 420		return XFS_ERROR(ENOMEM);
 421
 422	tic->t_trans_type = t_type;
 423	*ticp = tic;
 424
 425	xlog_grant_push_ail(log, tic->t_unit_res * tic->t_cnt);
 426
 427	trace_xfs_log_reserve(log, tic);
 428
 429	error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
 430				      &need_bytes);
 431	if (error)
 432		goto out_error;
 433
 434	xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
 435	xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
 436	trace_xfs_log_reserve_exit(log, tic);
 437	xlog_verify_grant_tail(log);
 438	return 0;
 439
 440out_error:
 441	/*
 442	 * If we are failing, make sure the ticket doesn't have any current
 443	 * reservations.  We don't want to add this back when the ticket/
 444	 * transaction gets cancelled.
 445	 */
 446	tic->t_curr_res = 0;
 447	tic->t_cnt = 0;	/* ungrant will give back unit_res * t_cnt. */
 448	return error;
 449}
 450
 451
 452/*
 453 * NOTES:
 454 *
 455 *	1. currblock field gets updated at startup and after in-core logs
 456 *		marked as with WANT_SYNC.
 457 */
 458
 459/*
 460 * This routine is called when a user of a log manager ticket is done with
 461 * the reservation.  If the ticket was ever used, then a commit record for
 462 * the associated transaction is written out as a log operation header with
 463 * no data.  The flag XLOG_TIC_INITED is set when the first write occurs with
 464 * a given ticket.  If the ticket was one with a permanent reservation, then
 465 * a few operations are done differently.  Permanent reservation tickets by
 466 * default don't release the reservation.  They just commit the current
 467 * transaction with the belief that the reservation is still needed.  A flag
 468 * must be passed in before permanent reservations are actually released.
 469 * When these type of tickets are not released, they need to be set into
 470 * the inited state again.  By doing this, a start record will be written
 471 * out when the next write occurs.
 472 */
 473xfs_lsn_t
 474xfs_log_done(
 475	struct xfs_mount	*mp,
 476	struct xlog_ticket	*ticket,
 477	struct xlog_in_core	**iclog,
 478	uint			flags)
 479{
 480	struct xlog		*log = mp->m_log;
 481	xfs_lsn_t		lsn = 0;
 482
 483	if (XLOG_FORCED_SHUTDOWN(log) ||
 484	    /*
 485	     * If nothing was ever written, don't write out commit record.
 486	     * If we get an error, just continue and give back the log ticket.
 487	     */
 488	    (((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
 489	     (xlog_commit_record(log, ticket, iclog, &lsn)))) {
 490		lsn = (xfs_lsn_t) -1;
 491		if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
 492			flags |= XFS_LOG_REL_PERM_RESERV;
 493		}
 494	}
 495
 496
 497	if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
 498	    (flags & XFS_LOG_REL_PERM_RESERV)) {
 499		trace_xfs_log_done_nonperm(log, ticket);
 500
 501		/*
 502		 * Release ticket if not permanent reservation or a specific
 503		 * request has been made to release a permanent reservation.
 504		 */
 505		xlog_ungrant_log_space(log, ticket);
 506		xfs_log_ticket_put(ticket);
 507	} else {
 508		trace_xfs_log_done_perm(log, ticket);
 509
 510		xlog_regrant_reserve_log_space(log, ticket);
 511		/* If this ticket was a permanent reservation and we aren't
 512		 * trying to release it, reset the inited flags; so next time
 513		 * we write, a start record will be written out.
 514		 */
 515		ticket->t_flags |= XLOG_TIC_INITED;
 516	}
 517
 518	return lsn;
 519}
 520
 521/*
 522 * Attaches a new iclog I/O completion callback routine during
 523 * transaction commit.  If the log is in error state, a non-zero
 524 * return code is handed back and the caller is responsible for
 525 * executing the callback at an appropriate time.
 526 */
 527int
 528xfs_log_notify(
 529	struct xfs_mount	*mp,
 530	struct xlog_in_core	*iclog,
 531	xfs_log_callback_t	*cb)
 532{
 533	int	abortflg;
 534
 535	spin_lock(&iclog->ic_callback_lock);
 536	abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
 537	if (!abortflg) {
 538		ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
 539			      (iclog->ic_state == XLOG_STATE_WANT_SYNC));
 540		cb->cb_next = NULL;
 541		*(iclog->ic_callback_tail) = cb;
 542		iclog->ic_callback_tail = &(cb->cb_next);
 543	}
 544	spin_unlock(&iclog->ic_callback_lock);
 545	return abortflg;
 546}
 547
 548int
 549xfs_log_release_iclog(
 550	struct xfs_mount	*mp,
 551	struct xlog_in_core	*iclog)
 552{
 553	if (xlog_state_release_iclog(mp->m_log, iclog)) {
 554		xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
 555		return EIO;
 556	}
 557
 558	return 0;
 559}
 560
 561/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562 * Mount a log filesystem
 563 *
 564 * mp		- ubiquitous xfs mount point structure
 565 * log_target	- buftarg of on-disk log device
 566 * blk_offset	- Start block # where block size is 512 bytes (BBSIZE)
 567 * num_bblocks	- Number of BBSIZE blocks in on-disk log
 568 *
 569 * Return error or zero.
 570 */
 571int
 572xfs_log_mount(
 573	xfs_mount_t	*mp,
 574	xfs_buftarg_t	*log_target,
 575	xfs_daddr_t	blk_offset,
 576	int		num_bblks)
 577{
 578	int		error;
 579
 580	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
 581		xfs_notice(mp, "Mounting Filesystem");
 582	else {
 583		xfs_notice(mp,
 584"Mounting filesystem in no-recovery mode.  Filesystem will be inconsistent.");
 585		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 586	}
 587
 588	mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
 589	if (IS_ERR(mp->m_log)) {
 590		error = -PTR_ERR(mp->m_log);
 591		goto out;
 592	}
 593
 594	/*
 595	 * Initialize the AIL now we have a log.
 596	 */
 597	error = xfs_trans_ail_init(mp);
 598	if (error) {
 599		xfs_warn(mp, "AIL initialisation failed: error %d", error);
 600		goto out_free_log;
 601	}
 602	mp->m_log->l_ailp = mp->m_ail;
 603
 604	/*
 605	 * skip log recovery on a norecovery mount.  pretend it all
 606	 * just worked.
 607	 */
 608	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
 609		int	readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
 610
 611		if (readonly)
 612			mp->m_flags &= ~XFS_MOUNT_RDONLY;
 613
 614		error = xlog_recover(mp->m_log);
 615
 616		if (readonly)
 617			mp->m_flags |= XFS_MOUNT_RDONLY;
 618		if (error) {
 619			xfs_warn(mp, "log mount/recovery failed: error %d",
 620				error);
 621			goto out_destroy_ail;
 622		}
 623	}
 624
 625	/* Normal transactions can now occur */
 626	mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
 627
 628	/*
 629	 * Now the log has been fully initialised and we know were our
 630	 * space grant counters are, we can initialise the permanent ticket
 631	 * needed for delayed logging to work.
 632	 */
 633	xlog_cil_init_post_recovery(mp->m_log);
 634
 635	return 0;
 636
 637out_destroy_ail:
 638	xfs_trans_ail_destroy(mp);
 639out_free_log:
 640	xlog_dealloc_log(mp->m_log);
 641out:
 642	return error;
 643}
 644
 645/*
 646 * Finish the recovery of the file system.  This is separate from
 647 * the xfs_log_mount() call, because it depends on the code in
 648 * xfs_mountfs() to read in the root and real-time bitmap inodes
 649 * between calling xfs_log_mount() and here.
 650 *
 651 * mp		- ubiquitous xfs mount point structure
 652 */
 653int
 654xfs_log_mount_finish(xfs_mount_t *mp)
 655{
 656	int	error;
 657
 658	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
 659		error = xlog_recover_finish(mp->m_log);
 660	else {
 661		error = 0;
 662		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 663	}
 664
 665	return error;
 666}
 667
 668/*
 669 * Final log writes as part of unmount.
 670 *
 671 * Mark the filesystem clean as unmount happens.  Note that during relocation
 672 * this routine needs to be executed as part of source-bag while the
 673 * deallocation must not be done until source-end.
 674 */
 675
 676/*
 677 * Unmount record used to have a string "Unmount filesystem--" in the
 678 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
 679 * We just write the magic number now since that particular field isn't
 680 * currently architecture converted and "nUmount" is a bit foo.
 681 * As far as I know, there weren't any dependencies on the old behaviour.
 682 */
 683
 684int
 685xfs_log_unmount_write(xfs_mount_t *mp)
 686{
 687	xlog_t		 *log = mp->m_log;
 688	xlog_in_core_t	 *iclog;
 689#ifdef DEBUG
 690	xlog_in_core_t	 *first_iclog;
 691#endif
 692	xlog_ticket_t	*tic = NULL;
 693	xfs_lsn_t	 lsn;
 694	int		 error;
 695
 696	/*
 697	 * Don't write out unmount record on read-only mounts.
 698	 * Or, if we are doing a forced umount (typically because of IO errors).
 699	 */
 700	if (mp->m_flags & XFS_MOUNT_RDONLY)
 701		return 0;
 702
 703	error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
 704	ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
 705
 706#ifdef DEBUG
 707	first_iclog = iclog = log->l_iclog;
 708	do {
 709		if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
 710			ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
 711			ASSERT(iclog->ic_offset == 0);
 712		}
 713		iclog = iclog->ic_next;
 714	} while (iclog != first_iclog);
 715#endif
 716	if (! (XLOG_FORCED_SHUTDOWN(log))) {
 717		error = xfs_log_reserve(mp, 600, 1, &tic,
 718					XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
 719		if (!error) {
 720			/* the data section must be 32 bit size aligned */
 721			struct {
 722			    __uint16_t magic;
 723			    __uint16_t pad1;
 724			    __uint32_t pad2; /* may as well make it 64 bits */
 725			} magic = {
 726				.magic = XLOG_UNMOUNT_TYPE,
 727			};
 728			struct xfs_log_iovec reg = {
 729				.i_addr = &magic,
 730				.i_len = sizeof(magic),
 731				.i_type = XLOG_REG_TYPE_UNMOUNT,
 732			};
 733			struct xfs_log_vec vec = {
 734				.lv_niovecs = 1,
 735				.lv_iovecp = &reg,
 736			};
 737
 738			/* remove inited flag, and account for space used */
 739			tic->t_flags = 0;
 740			tic->t_curr_res -= sizeof(magic);
 741			error = xlog_write(log, &vec, tic, &lsn,
 742					   NULL, XLOG_UNMOUNT_TRANS);
 743			/*
 744			 * At this point, we're umounting anyway,
 745			 * so there's no point in transitioning log state
 746			 * to IOERROR. Just continue...
 747			 */
 748		}
 749
 750		if (error)
 751			xfs_alert(mp, "%s: unmount record failed", __func__);
 752
 753
 754		spin_lock(&log->l_icloglock);
 755		iclog = log->l_iclog;
 756		atomic_inc(&iclog->ic_refcnt);
 757		xlog_state_want_sync(log, iclog);
 758		spin_unlock(&log->l_icloglock);
 759		error = xlog_state_release_iclog(log, iclog);
 760
 761		spin_lock(&log->l_icloglock);
 762		if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
 763		      iclog->ic_state == XLOG_STATE_DIRTY)) {
 764			if (!XLOG_FORCED_SHUTDOWN(log)) {
 765				xlog_wait(&iclog->ic_force_wait,
 766							&log->l_icloglock);
 767			} else {
 768				spin_unlock(&log->l_icloglock);
 769			}
 770		} else {
 771			spin_unlock(&log->l_icloglock);
 772		}
 773		if (tic) {
 774			trace_xfs_log_umount_write(log, tic);
 775			xlog_ungrant_log_space(log, tic);
 776			xfs_log_ticket_put(tic);
 777		}
 778	} else {
 779		/*
 780		 * We're already in forced_shutdown mode, couldn't
 781		 * even attempt to write out the unmount transaction.
 782		 *
 783		 * Go through the motions of sync'ing and releasing
 784		 * the iclog, even though no I/O will actually happen,
 785		 * we need to wait for other log I/Os that may already
 786		 * be in progress.  Do this as a separate section of
 787		 * code so we'll know if we ever get stuck here that
 788		 * we're in this odd situation of trying to unmount
 789		 * a file system that went into forced_shutdown as
 790		 * the result of an unmount..
 791		 */
 792		spin_lock(&log->l_icloglock);
 793		iclog = log->l_iclog;
 794		atomic_inc(&iclog->ic_refcnt);
 795
 796		xlog_state_want_sync(log, iclog);
 797		spin_unlock(&log->l_icloglock);
 798		error =  xlog_state_release_iclog(log, iclog);
 799
 800		spin_lock(&log->l_icloglock);
 801
 802		if ( ! (   iclog->ic_state == XLOG_STATE_ACTIVE
 803			|| iclog->ic_state == XLOG_STATE_DIRTY
 804			|| iclog->ic_state == XLOG_STATE_IOERROR) ) {
 805
 806				xlog_wait(&iclog->ic_force_wait,
 807							&log->l_icloglock);
 808		} else {
 809			spin_unlock(&log->l_icloglock);
 810		}
 811	}
 812
 813	return error;
 814}	/* xfs_log_unmount_write */
 815
 816/*
 817 * Deallocate log structures for unmount/relocation.
 818 *
 819 * We need to stop the aild from running before we destroy
 820 * and deallocate the log as the aild references the log.
 821 */
 822void
 823xfs_log_unmount(xfs_mount_t *mp)
 824{
 825	cancel_delayed_work_sync(&mp->m_sync_work);
 826	xfs_trans_ail_destroy(mp);
 827	xlog_dealloc_log(mp->m_log);
 828}
 829
 830void
 831xfs_log_item_init(
 832	struct xfs_mount	*mp,
 833	struct xfs_log_item	*item,
 834	int			type,
 835	const struct xfs_item_ops *ops)
 836{
 837	item->li_mountp = mp;
 838	item->li_ailp = mp->m_ail;
 839	item->li_type = type;
 840	item->li_ops = ops;
 841	item->li_lv = NULL;
 842
 843	INIT_LIST_HEAD(&item->li_ail);
 844	INIT_LIST_HEAD(&item->li_cil);
 845}
 846
 847/*
 848 * Wake up processes waiting for log space after we have moved the log tail.
 
 
 
 
 
 849 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 850void
 851xfs_log_space_wake(
 852	struct xfs_mount	*mp)
 853{
 854	struct xlog		*log = mp->m_log;
 855	int			free_bytes;
 
 856
 857	if (XLOG_FORCED_SHUTDOWN(log))
 858		return;
 859
 860	if (!list_empty_careful(&log->l_write_head.waiters)) {
 861		ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
 
 
 
 862
 863		spin_lock(&log->l_write_head.lock);
 864		free_bytes = xlog_space_left(log, &log->l_write_head.grant);
 865		xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
 866		spin_unlock(&log->l_write_head.lock);
 867	}
 868
 869	if (!list_empty_careful(&log->l_reserve_head.waiters)) {
 870		ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 871
 872		spin_lock(&log->l_reserve_head.lock);
 873		free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
 874		xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
 875		spin_unlock(&log->l_reserve_head.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876	}
 877}
 878
 879/*
 880 * Determine if we have a transaction that has gone to disk
 881 * that needs to be covered. To begin the transition to the idle state
 882 * firstly the log needs to be idle (no AIL and nothing in the iclogs).
 883 * If we are then in a state where covering is needed, the caller is informed
 884 * that dummy transactions are required to move the log into the idle state.
 885 *
 886 * Because this is called as part of the sync process, we should also indicate
 887 * that dummy transactions should be issued in anything but the covered or
 888 * idle states. This ensures that the log tail is accurately reflected in
 889 * the log at the end of the sync, hence if a crash occurrs avoids replay
 890 * of transactions where the metadata is already on disk.
 891 */
 892int
 893xfs_log_need_covered(xfs_mount_t *mp)
 894{
 895	int		needed = 0;
 896	xlog_t		*log = mp->m_log;
 897
 898	if (!xfs_fs_writable(mp))
 899		return 0;
 900
 901	spin_lock(&log->l_icloglock);
 902	switch (log->l_covered_state) {
 903	case XLOG_STATE_COVER_DONE:
 904	case XLOG_STATE_COVER_DONE2:
 905	case XLOG_STATE_COVER_IDLE:
 906		break;
 907	case XLOG_STATE_COVER_NEED:
 908	case XLOG_STATE_COVER_NEED2:
 909		if (!xfs_ail_min_lsn(log->l_ailp) &&
 910		    xlog_iclogs_empty(log)) {
 911			if (log->l_covered_state == XLOG_STATE_COVER_NEED)
 912				log->l_covered_state = XLOG_STATE_COVER_DONE;
 913			else
 914				log->l_covered_state = XLOG_STATE_COVER_DONE2;
 915		}
 916		/* FALLTHRU */
 917	default:
 918		needed = 1;
 919		break;
 920	}
 921	spin_unlock(&log->l_icloglock);
 922	return needed;
 923}
 924
 925/*
 926 * We may be holding the log iclog lock upon entering this routine.
 
 
 
 927 */
 928xfs_lsn_t
 929xlog_assign_tail_lsn_locked(
 930	struct xfs_mount	*mp)
 931{
 932	struct xlog		*log = mp->m_log;
 933	struct xfs_log_item	*lip;
 934	xfs_lsn_t		tail_lsn;
 935
 936	assert_spin_locked(&mp->m_ail->xa_lock);
 937
 938	/*
 939	 * To make sure we always have a valid LSN for the log tail we keep
 940	 * track of the last LSN which was committed in log->l_last_sync_lsn,
 941	 * and use that when the AIL was empty.
 942	 */
 943	lip = xfs_ail_min(mp->m_ail);
 944	if (lip)
 945		tail_lsn = lip->li_lsn;
 946	else
 947		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 948	atomic64_set(&log->l_tail_lsn, tail_lsn);
 949	return tail_lsn;
 950}
 951
 
 
 
 
 
 
 
 
 
 
 952xfs_lsn_t
 953xlog_assign_tail_lsn(
 954	struct xfs_mount	*mp)
 955{
 956	xfs_lsn_t		tail_lsn;
 
 957
 958	spin_lock(&mp->m_ail->xa_lock);
 959	tail_lsn = xlog_assign_tail_lsn_locked(mp);
 960	spin_unlock(&mp->m_ail->xa_lock);
 961
 
 962	return tail_lsn;
 963}
 964
 965/*
 966 * Return the space in the log between the tail and the head.  The head
 967 * is passed in the cycle/bytes formal parms.  In the special case where
 968 * the reserve head has wrapped passed the tail, this calculation is no
 969 * longer valid.  In this case, just return 0 which means there is no space
 970 * in the log.  This works for all places where this function is called
 971 * with the reserve head.  Of course, if the write head were to ever
 972 * wrap the tail, we should blow up.  Rather than catch this case here,
 973 * we depend on other ASSERTions in other parts of the code.   XXXmiken
 974 *
 975 * This code also handles the case where the reservation head is behind
 976 * the tail.  The details of this case are described below, but the end
 977 * result is that we return the size of the log as the amount of space left.
 978 */
 979STATIC int
 980xlog_space_left(
 981	struct xlog	*log,
 982	atomic64_t	*head)
 983{
 984	int		free_bytes;
 985	int		tail_bytes;
 986	int		tail_cycle;
 987	int		head_cycle;
 988	int		head_bytes;
 989
 990	xlog_crack_grant_head(head, &head_cycle, &head_bytes);
 991	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
 992	tail_bytes = BBTOB(tail_bytes);
 993	if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
 994		free_bytes = log->l_logsize - (head_bytes - tail_bytes);
 995	else if (tail_cycle + 1 < head_cycle)
 996		return 0;
 997	else if (tail_cycle < head_cycle) {
 998		ASSERT(tail_cycle == (head_cycle - 1));
 999		free_bytes = tail_bytes - head_bytes;
1000	} else {
1001		/*
1002		 * The reservation head is behind the tail.
1003		 * In this case we just want to return the size of the
1004		 * log as the amount of space left.
1005		 */
1006		xfs_alert(log->l_mp,
1007			"xlog_space_left: head behind tail\n"
1008			"  tail_cycle = %d, tail_bytes = %d\n"
1009			"  GH   cycle = %d, GH   bytes = %d",
1010			tail_cycle, tail_bytes, head_cycle, head_bytes);
1011		ASSERT(0);
1012		free_bytes = log->l_logsize;
1013	}
1014	return free_bytes;
1015}
1016
1017
1018/*
1019 * Log function which is called when an io completes.
1020 *
1021 * The log manager needs its own routine, in order to control what
1022 * happens with the buffer after the write completes.
1023 */
1024void
1025xlog_iodone(xfs_buf_t *bp)
1026{
1027	xlog_in_core_t	*iclog = bp->b_fspriv;
1028	xlog_t		*l = iclog->ic_log;
1029	int		aborted = 0;
1030
1031	/*
1032	 * Race to shutdown the filesystem if we see an error.
1033	 */
1034	if (XFS_TEST_ERROR((xfs_buf_geterror(bp)), l->l_mp,
1035			XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
1036		xfs_buf_ioerror_alert(bp, __func__);
1037		xfs_buf_stale(bp);
1038		xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR);
1039		/*
1040		 * This flag will be propagated to the trans-committed
1041		 * callback routines to let them know that the log-commit
1042		 * didn't succeed.
1043		 */
1044		aborted = XFS_LI_ABORTED;
1045	} else if (iclog->ic_state & XLOG_STATE_IOERROR) {
1046		aborted = XFS_LI_ABORTED;
1047	}
1048
1049	/* log I/O is always issued ASYNC */
1050	ASSERT(XFS_BUF_ISASYNC(bp));
1051	xlog_state_done_syncing(iclog, aborted);
1052	/*
1053	 * do not reference the buffer (bp) here as we could race
1054	 * with it being freed after writing the unmount record to the
1055	 * log.
1056	 */
1057
1058}	/* xlog_iodone */
1059
1060/*
1061 * Return size of each in-core log record buffer.
1062 *
1063 * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1064 *
1065 * If the filesystem blocksize is too large, we may need to choose a
1066 * larger size since the directory code currently logs entire blocks.
1067 */
1068
1069STATIC void
1070xlog_get_iclog_buffer_size(xfs_mount_t	*mp,
1071			   xlog_t	*log)
1072{
1073	int size;
1074	int xhdrs;
1075
1076	if (mp->m_logbufs <= 0)
1077		log->l_iclog_bufs = XLOG_MAX_ICLOGS;
1078	else
1079		log->l_iclog_bufs = mp->m_logbufs;
1080
1081	/*
1082	 * Buffer size passed in from mount system call.
1083	 */
1084	if (mp->m_logbsize > 0) {
1085		size = log->l_iclog_size = mp->m_logbsize;
1086		log->l_iclog_size_log = 0;
1087		while (size != 1) {
1088			log->l_iclog_size_log++;
1089			size >>= 1;
1090		}
1091
1092		if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1093			/* # headers = size / 32k
1094			 * one header holds cycles from 32k of data
1095			 */
1096
1097			xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
1098			if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE)
1099				xhdrs++;
1100			log->l_iclog_hsize = xhdrs << BBSHIFT;
1101			log->l_iclog_heads = xhdrs;
1102		} else {
1103			ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE);
1104			log->l_iclog_hsize = BBSIZE;
1105			log->l_iclog_heads = 1;
1106		}
1107		goto done;
1108	}
1109
1110	/* All machines use 32kB buffers by default. */
1111	log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
1112	log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
1113
1114	/* the default log size is 16k or 32k which is one header sector */
1115	log->l_iclog_hsize = BBSIZE;
1116	log->l_iclog_heads = 1;
1117
1118done:
1119	/* are we being asked to make the sizes selected above visible? */
1120	if (mp->m_logbufs == 0)
1121		mp->m_logbufs = log->l_iclog_bufs;
1122	if (mp->m_logbsize == 0)
1123		mp->m_logbsize = log->l_iclog_size;
1124}	/* xlog_get_iclog_buffer_size */
1125
1126
1127/*
1128 * This routine initializes some of the log structure for a given mount point.
1129 * Its primary purpose is to fill in enough, so recovery can occur.  However,
1130 * some other stuff may be filled in too.
1131 */
1132STATIC xlog_t *
1133xlog_alloc_log(xfs_mount_t	*mp,
1134	       xfs_buftarg_t	*log_target,
1135	       xfs_daddr_t	blk_offset,
1136	       int		num_bblks)
1137{
1138	xlog_t			*log;
1139	xlog_rec_header_t	*head;
1140	xlog_in_core_t		**iclogp;
1141	xlog_in_core_t		*iclog, *prev_iclog=NULL;
1142	xfs_buf_t		*bp;
1143	int			i;
1144	int			error = ENOMEM;
1145	uint			log2_size = 0;
1146
1147	log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL);
1148	if (!log) {
1149		xfs_warn(mp, "Log allocation failed: No memory!");
1150		goto out;
1151	}
1152
1153	log->l_mp	   = mp;
1154	log->l_targ	   = log_target;
1155	log->l_logsize     = BBTOB(num_bblks);
1156	log->l_logBBstart  = blk_offset;
1157	log->l_logBBsize   = num_bblks;
1158	log->l_covered_state = XLOG_STATE_COVER_IDLE;
1159	log->l_flags	   |= XLOG_ACTIVE_RECOVERY;
1160
1161	log->l_prev_block  = -1;
1162	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1163	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1164	xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1165	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
1166
1167	xlog_grant_head_init(&log->l_reserve_head);
1168	xlog_grant_head_init(&log->l_write_head);
 
 
 
1169
1170	error = EFSCORRUPTED;
1171	if (xfs_sb_version_hassector(&mp->m_sb)) {
1172	        log2_size = mp->m_sb.sb_logsectlog;
1173		if (log2_size < BBSHIFT) {
1174			xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1175				log2_size, BBSHIFT);
1176			goto out_free_log;
1177		}
1178
1179	        log2_size -= BBSHIFT;
1180		if (log2_size > mp->m_sectbb_log) {
1181			xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1182				log2_size, mp->m_sectbb_log);
1183			goto out_free_log;
1184		}
1185
1186		/* for larger sector sizes, must have v2 or external log */
1187		if (log2_size && log->l_logBBstart > 0 &&
1188			    !xfs_sb_version_haslogv2(&mp->m_sb)) {
1189			xfs_warn(mp,
1190		"log sector size (0x%x) invalid for configuration.",
1191				log2_size);
1192			goto out_free_log;
1193		}
1194	}
1195	log->l_sectBBsize = 1 << log2_size;
1196
1197	xlog_get_iclog_buffer_size(mp, log);
1198
1199	error = ENOMEM;
1200	bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0);
1201	if (!bp)
1202		goto out_free_log;
1203	bp->b_iodone = xlog_iodone;
1204	ASSERT(xfs_buf_islocked(bp));
1205	log->l_xbuf = bp;
1206
1207	spin_lock_init(&log->l_icloglock);
1208	init_waitqueue_head(&log->l_flush_wait);
1209
 
 
 
1210	iclogp = &log->l_iclog;
1211	/*
1212	 * The amount of memory to allocate for the iclog structure is
1213	 * rather funky due to the way the structure is defined.  It is
1214	 * done this way so that we can use different sizes for machines
1215	 * with different amounts of memory.  See the definition of
1216	 * xlog_in_core_t in xfs_log_priv.h for details.
1217	 */
1218	ASSERT(log->l_iclog_size >= 4096);
1219	for (i=0; i < log->l_iclog_bufs; i++) {
1220		*iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL);
1221		if (!*iclogp)
1222			goto out_free_iclog;
1223
1224		iclog = *iclogp;
1225		iclog->ic_prev = prev_iclog;
1226		prev_iclog = iclog;
1227
1228		bp = xfs_buf_get_uncached(mp->m_logdev_targp,
1229						BTOBB(log->l_iclog_size), 0);
1230		if (!bp)
1231			goto out_free_iclog;
1232
1233		bp->b_iodone = xlog_iodone;
1234		iclog->ic_bp = bp;
1235		iclog->ic_data = bp->b_addr;
1236#ifdef DEBUG
1237		log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
1238#endif
1239		head = &iclog->ic_header;
1240		memset(head, 0, sizeof(xlog_rec_header_t));
1241		head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1242		head->h_version = cpu_to_be32(
1243			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1244		head->h_size = cpu_to_be32(log->l_iclog_size);
1245		/* new fields */
1246		head->h_fmt = cpu_to_be32(XLOG_FMT);
1247		memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1248
1249		iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize;
1250		iclog->ic_state = XLOG_STATE_ACTIVE;
1251		iclog->ic_log = log;
1252		atomic_set(&iclog->ic_refcnt, 0);
1253		spin_lock_init(&iclog->ic_callback_lock);
1254		iclog->ic_callback_tail = &(iclog->ic_callback);
1255		iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
1256
1257		ASSERT(xfs_buf_islocked(iclog->ic_bp));
1258		init_waitqueue_head(&iclog->ic_force_wait);
1259		init_waitqueue_head(&iclog->ic_write_wait);
1260
1261		iclogp = &iclog->ic_next;
1262	}
1263	*iclogp = log->l_iclog;			/* complete ring */
1264	log->l_iclog->ic_prev = prev_iclog;	/* re-write 1st prev ptr */
1265
1266	error = xlog_cil_init(log);
1267	if (error)
1268		goto out_free_iclog;
1269	return log;
1270
1271out_free_iclog:
1272	for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1273		prev_iclog = iclog->ic_next;
1274		if (iclog->ic_bp)
1275			xfs_buf_free(iclog->ic_bp);
1276		kmem_free(iclog);
1277	}
1278	spinlock_destroy(&log->l_icloglock);
1279	xfs_buf_free(log->l_xbuf);
1280out_free_log:
1281	kmem_free(log);
1282out:
1283	return ERR_PTR(-error);
1284}	/* xlog_alloc_log */
1285
1286
1287/*
1288 * Write out the commit record of a transaction associated with the given
1289 * ticket.  Return the lsn of the commit record.
1290 */
1291STATIC int
1292xlog_commit_record(
1293	struct xlog		*log,
1294	struct xlog_ticket	*ticket,
1295	struct xlog_in_core	**iclog,
1296	xfs_lsn_t		*commitlsnp)
1297{
1298	struct xfs_mount *mp = log->l_mp;
1299	int	error;
1300	struct xfs_log_iovec reg = {
1301		.i_addr = NULL,
1302		.i_len = 0,
1303		.i_type = XLOG_REG_TYPE_COMMIT,
1304	};
1305	struct xfs_log_vec vec = {
1306		.lv_niovecs = 1,
1307		.lv_iovecp = &reg,
1308	};
1309
1310	ASSERT_ALWAYS(iclog);
1311	error = xlog_write(log, &vec, ticket, commitlsnp, iclog,
1312					XLOG_COMMIT_TRANS);
1313	if (error)
1314		xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
1315	return error;
1316}
1317
1318/*
1319 * Push on the buffer cache code if we ever use more than 75% of the on-disk
1320 * log space.  This code pushes on the lsn which would supposedly free up
1321 * the 25% which we want to leave free.  We may need to adopt a policy which
1322 * pushes on an lsn which is further along in the log once we reach the high
1323 * water mark.  In this manner, we would be creating a low water mark.
1324 */
1325STATIC void
1326xlog_grant_push_ail(
1327	struct xlog	*log,
1328	int		need_bytes)
1329{
1330	xfs_lsn_t	threshold_lsn = 0;
1331	xfs_lsn_t	last_sync_lsn;
1332	int		free_blocks;
1333	int		free_bytes;
1334	int		threshold_block;
1335	int		threshold_cycle;
1336	int		free_threshold;
1337
1338	ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1339
1340	free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1341	free_blocks = BTOBBT(free_bytes);
1342
1343	/*
1344	 * Set the threshold for the minimum number of free blocks in the
1345	 * log to the maximum of what the caller needs, one quarter of the
1346	 * log, and 256 blocks.
1347	 */
1348	free_threshold = BTOBB(need_bytes);
1349	free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
1350	free_threshold = MAX(free_threshold, 256);
1351	if (free_blocks >= free_threshold)
1352		return;
1353
1354	xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1355						&threshold_block);
1356	threshold_block += free_threshold;
1357	if (threshold_block >= log->l_logBBsize) {
1358		threshold_block -= log->l_logBBsize;
1359		threshold_cycle += 1;
1360	}
1361	threshold_lsn = xlog_assign_lsn(threshold_cycle,
1362					threshold_block);
1363	/*
1364	 * Don't pass in an lsn greater than the lsn of the last
1365	 * log record known to be on disk. Use a snapshot of the last sync lsn
1366	 * so that it doesn't change between the compare and the set.
1367	 */
1368	last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1369	if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1370		threshold_lsn = last_sync_lsn;
1371
1372	/*
1373	 * Get the transaction layer to kick the dirty buffers out to
1374	 * disk asynchronously. No point in trying to do this if
1375	 * the filesystem is shutting down.
1376	 */
1377	if (!XLOG_FORCED_SHUTDOWN(log))
1378		xfs_ail_push(log->l_ailp, threshold_lsn);
1379}
1380
1381/*
1382 * The bdstrat callback function for log bufs. This gives us a central
1383 * place to trap bufs in case we get hit by a log I/O error and need to
1384 * shutdown. Actually, in practice, even when we didn't get a log error,
1385 * we transition the iclogs to IOERROR state *after* flushing all existing
1386 * iclogs to disk. This is because we don't want anymore new transactions to be
1387 * started or completed afterwards.
1388 */
1389STATIC int
1390xlog_bdstrat(
1391	struct xfs_buf		*bp)
1392{
1393	struct xlog_in_core	*iclog = bp->b_fspriv;
1394
1395	if (iclog->ic_state & XLOG_STATE_IOERROR) {
1396		xfs_buf_ioerror(bp, EIO);
1397		xfs_buf_stale(bp);
1398		xfs_buf_ioend(bp, 0);
1399		/*
1400		 * It would seem logical to return EIO here, but we rely on
1401		 * the log state machine to propagate I/O errors instead of
1402		 * doing it here.
1403		 */
1404		return 0;
1405	}
1406
1407	xfs_buf_iorequest(bp);
1408	return 0;
1409}
1410
1411/*
1412 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 
1413 * fashion.  Previously, we should have moved the current iclog
1414 * ptr in the log to point to the next available iclog.  This allows further
1415 * write to continue while this code syncs out an iclog ready to go.
1416 * Before an in-core log can be written out, the data section must be scanned
1417 * to save away the 1st word of each BBSIZE block into the header.  We replace
1418 * it with the current cycle count.  Each BBSIZE block is tagged with the
1419 * cycle count because there in an implicit assumption that drives will
1420 * guarantee that entire 512 byte blocks get written at once.  In other words,
1421 * we can't have part of a 512 byte block written and part not written.  By
1422 * tagging each block, we will know which blocks are valid when recovering
1423 * after an unclean shutdown.
1424 *
1425 * This routine is single threaded on the iclog.  No other thread can be in
1426 * this routine with the same iclog.  Changing contents of iclog can there-
1427 * fore be done without grabbing the state machine lock.  Updating the global
1428 * log will require grabbing the lock though.
1429 *
1430 * The entire log manager uses a logical block numbering scheme.  Only
1431 * log_sync (and then only bwrite()) know about the fact that the log may
1432 * not start with block zero on a given device.  The log block start offset
1433 * is added immediately before calling bwrite().
1434 */
1435
1436STATIC int
1437xlog_sync(xlog_t		*log,
1438	  xlog_in_core_t	*iclog)
1439{
1440	xfs_caddr_t	dptr;		/* pointer to byte sized element */
1441	xfs_buf_t	*bp;
1442	int		i;
1443	uint		count;		/* byte count of bwrite */
1444	uint		count_init;	/* initial count before roundup */
1445	int		roundoff;       /* roundoff to BB or stripe */
1446	int		split = 0;	/* split write into two regions */
1447	int		error;
1448	int		v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
1449
1450	XFS_STATS_INC(xs_log_writes);
1451	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
1452
1453	/* Add for LR header */
1454	count_init = log->l_iclog_hsize + iclog->ic_offset;
1455
1456	/* Round out the log write size */
1457	if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
1458		/* we have a v2 stripe unit to use */
1459		count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
1460	} else {
1461		count = BBTOB(BTOBB(count_init));
1462	}
1463	roundoff = count - count_init;
1464	ASSERT(roundoff >= 0);
1465	ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && 
1466                roundoff < log->l_mp->m_sb.sb_logsunit)
1467		|| 
1468		(log->l_mp->m_sb.sb_logsunit <= 1 && 
1469		 roundoff < BBTOB(1)));
1470
1471	/* move grant heads by roundoff in sync */
1472	xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
1473	xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
1474
1475	/* put cycle number in every block */
1476	xlog_pack_data(log, iclog, roundoff); 
1477
1478	/* real byte length */
1479	if (v2) {
1480		iclog->ic_header.h_len =
1481			cpu_to_be32(iclog->ic_offset + roundoff);
1482	} else {
1483		iclog->ic_header.h_len =
1484			cpu_to_be32(iclog->ic_offset);
1485	}
1486
1487	bp = iclog->ic_bp;
1488	XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
1489
1490	XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
1491
1492	/* Do we need to split this write into 2 parts? */
1493	if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) {
1494		split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)));
1495		count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp));
1496		iclog->ic_bwritecnt = 2;	/* split into 2 writes */
1497	} else {
1498		iclog->ic_bwritecnt = 1;
1499	}
1500	bp->b_io_length = BTOBB(count);
1501	bp->b_fspriv = iclog;
1502	XFS_BUF_ZEROFLAGS(bp);
1503	XFS_BUF_ASYNC(bp);
1504	bp->b_flags |= XBF_SYNCIO;
1505
1506	if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
1507		bp->b_flags |= XBF_FUA;
1508
1509		/*
1510		 * Flush the data device before flushing the log to make
1511		 * sure all meta data written back from the AIL actually made
1512		 * it to disk before stamping the new log tail LSN into the
1513		 * log buffer.  For an external log we need to issue the
1514		 * flush explicitly, and unfortunately synchronously here;
1515		 * for an internal log we can simply use the block layer
1516		 * state machine for preflushes.
1517		 */
1518		if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
1519			xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
1520		else
1521			bp->b_flags |= XBF_FLUSH;
1522	}
1523
1524	ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1525	ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1526
1527	xlog_verify_iclog(log, iclog, count, B_TRUE);
1528
1529	/* account for log which doesn't start at block #0 */
1530	XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1531	/*
1532	 * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
1533	 * is shutting down.
1534	 */
1535	XFS_BUF_WRITE(bp);
1536
1537	error = xlog_bdstrat(bp);
1538	if (error) {
1539		xfs_buf_ioerror_alert(bp, "xlog_sync");
1540		return error;
1541	}
1542	if (split) {
1543		bp = iclog->ic_log->l_xbuf;
1544		XFS_BUF_SET_ADDR(bp, 0);	     /* logical 0 */
1545		xfs_buf_associate_memory(bp,
1546				(char *)&iclog->ic_header + count, split);
1547		bp->b_fspriv = iclog;
1548		XFS_BUF_ZEROFLAGS(bp);
1549		XFS_BUF_ASYNC(bp);
1550		bp->b_flags |= XBF_SYNCIO;
1551		if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1552			bp->b_flags |= XBF_FUA;
1553		dptr = bp->b_addr;
1554		/*
1555		 * Bump the cycle numbers at the start of each block
1556		 * since this part of the buffer is at the start of
1557		 * a new cycle.  Watch out for the header magic number
1558		 * case, though.
1559		 */
1560		for (i = 0; i < split; i += BBSIZE) {
1561			be32_add_cpu((__be32 *)dptr, 1);
1562			if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM)
1563				be32_add_cpu((__be32 *)dptr, 1);
1564			dptr += BBSIZE;
1565		}
1566
1567		ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1568		ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1569
1570		/* account for internal log which doesn't start at block #0 */
1571		XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1572		XFS_BUF_WRITE(bp);
1573		error = xlog_bdstrat(bp);
1574		if (error) {
1575			xfs_buf_ioerror_alert(bp, "xlog_sync (split)");
1576			return error;
1577		}
1578	}
1579	return 0;
1580}	/* xlog_sync */
1581
1582
1583/*
1584 * Deallocate a log structure
1585 */
1586STATIC void
1587xlog_dealloc_log(xlog_t *log)
1588{
1589	xlog_in_core_t	*iclog, *next_iclog;
1590	int		i;
1591
1592	xlog_cil_destroy(log);
1593
1594	/*
1595	 * always need to ensure that the extra buffer does not point to memory
1596	 * owned by another log buffer before we free it.
1597	 */
1598	xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size));
1599	xfs_buf_free(log->l_xbuf);
1600
1601	iclog = log->l_iclog;
1602	for (i=0; i<log->l_iclog_bufs; i++) {
1603		xfs_buf_free(iclog->ic_bp);
1604		next_iclog = iclog->ic_next;
1605		kmem_free(iclog);
1606		iclog = next_iclog;
1607	}
1608	spinlock_destroy(&log->l_icloglock);
1609
1610	log->l_mp->m_log = NULL;
1611	kmem_free(log);
1612}	/* xlog_dealloc_log */
1613
1614/*
1615 * Update counters atomically now that memcpy is done.
1616 */
1617/* ARGSUSED */
1618static inline void
1619xlog_state_finish_copy(xlog_t		*log,
1620		       xlog_in_core_t	*iclog,
1621		       int		record_cnt,
1622		       int		copy_bytes)
1623{
1624	spin_lock(&log->l_icloglock);
1625
1626	be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1627	iclog->ic_offset += copy_bytes;
1628
1629	spin_unlock(&log->l_icloglock);
1630}	/* xlog_state_finish_copy */
1631
1632
1633
1634
1635/*
1636 * print out info relating to regions written which consume
1637 * the reservation
1638 */
1639void
1640xlog_print_tic_res(
1641	struct xfs_mount	*mp,
1642	struct xlog_ticket	*ticket)
1643{
1644	uint i;
1645	uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
1646
1647	/* match with XLOG_REG_TYPE_* in xfs_log.h */
1648	static char *res_type_str[XLOG_REG_TYPE_MAX] = {
1649	    "bformat",
1650	    "bchunk",
1651	    "efi_format",
1652	    "efd_format",
1653	    "iformat",
1654	    "icore",
1655	    "iext",
1656	    "ibroot",
1657	    "ilocal",
1658	    "iattr_ext",
1659	    "iattr_broot",
1660	    "iattr_local",
1661	    "qformat",
1662	    "dquot",
1663	    "quotaoff",
1664	    "LR header",
1665	    "unmount",
1666	    "commit",
1667	    "trans header"
1668	};
1669	static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
1670	    "SETATTR_NOT_SIZE",
1671	    "SETATTR_SIZE",
1672	    "INACTIVE",
1673	    "CREATE",
1674	    "CREATE_TRUNC",
1675	    "TRUNCATE_FILE",
1676	    "REMOVE",
1677	    "LINK",
1678	    "RENAME",
1679	    "MKDIR",
1680	    "RMDIR",
1681	    "SYMLINK",
1682	    "SET_DMATTRS",
1683	    "GROWFS",
1684	    "STRAT_WRITE",
1685	    "DIOSTRAT",
1686	    "WRITE_SYNC",
1687	    "WRITEID",
1688	    "ADDAFORK",
1689	    "ATTRINVAL",
1690	    "ATRUNCATE",
1691	    "ATTR_SET",
1692	    "ATTR_RM",
1693	    "ATTR_FLAG",
1694	    "CLEAR_AGI_BUCKET",
1695	    "QM_SBCHANGE",
1696	    "DUMMY1",
1697	    "DUMMY2",
1698	    "QM_QUOTAOFF",
1699	    "QM_DQALLOC",
1700	    "QM_SETQLIM",
1701	    "QM_DQCLUSTER",
1702	    "QM_QINOCREATE",
1703	    "QM_QUOTAOFF_END",
1704	    "SB_UNIT",
1705	    "FSYNC_TS",
1706	    "GROWFSRT_ALLOC",
1707	    "GROWFSRT_ZERO",
1708	    "GROWFSRT_FREE",
1709	    "SWAPEXT"
1710	};
1711
1712	xfs_warn(mp,
1713		"xlog_write: reservation summary:\n"
1714		"  trans type  = %s (%u)\n"
1715		"  unit res    = %d bytes\n"
1716		"  current res = %d bytes\n"
1717		"  total reg   = %u bytes (o/flow = %u bytes)\n"
1718		"  ophdrs      = %u (ophdr space = %u bytes)\n"
1719		"  ophdr + reg = %u bytes\n"
1720		"  num regions = %u\n",
1721		((ticket->t_trans_type <= 0 ||
1722		  ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
1723		  "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
1724		ticket->t_trans_type,
1725		ticket->t_unit_res,
1726		ticket->t_curr_res,
1727		ticket->t_res_arr_sum, ticket->t_res_o_flow,
1728		ticket->t_res_num_ophdrs, ophdr_spc,
1729		ticket->t_res_arr_sum +
1730		ticket->t_res_o_flow + ophdr_spc,
1731		ticket->t_res_num);
1732
1733	for (i = 0; i < ticket->t_res_num; i++) {
1734		uint r_type = ticket->t_res_arr[i].r_type;
1735		xfs_warn(mp, "region[%u]: %s - %u bytes\n", i,
1736			    ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
1737			    "bad-rtype" : res_type_str[r_type-1]),
1738			    ticket->t_res_arr[i].r_len);
1739	}
1740
1741	xfs_alert_tag(mp, XFS_PTAG_LOGRES,
1742		"xlog_write: reservation ran out. Need to up reservation");
1743	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1744}
1745
1746/*
1747 * Calculate the potential space needed by the log vector.  Each region gets
1748 * its own xlog_op_header_t and may need to be double word aligned.
1749 */
1750static int
1751xlog_write_calc_vec_length(
1752	struct xlog_ticket	*ticket,
1753	struct xfs_log_vec	*log_vector)
1754{
1755	struct xfs_log_vec	*lv;
1756	int			headers = 0;
1757	int			len = 0;
1758	int			i;
1759
1760	/* acct for start rec of xact */
1761	if (ticket->t_flags & XLOG_TIC_INITED)
1762		headers++;
1763
1764	for (lv = log_vector; lv; lv = lv->lv_next) {
1765		headers += lv->lv_niovecs;
1766
1767		for (i = 0; i < lv->lv_niovecs; i++) {
1768			struct xfs_log_iovec	*vecp = &lv->lv_iovecp[i];
1769
1770			len += vecp->i_len;
1771			xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type);
1772		}
1773	}
1774
1775	ticket->t_res_num_ophdrs += headers;
1776	len += headers * sizeof(struct xlog_op_header);
1777
1778	return len;
1779}
1780
1781/*
1782 * If first write for transaction, insert start record  We can't be trying to
1783 * commit if we are inited.  We can't have any "partial_copy" if we are inited.
1784 */
1785static int
1786xlog_write_start_rec(
1787	struct xlog_op_header	*ophdr,
1788	struct xlog_ticket	*ticket)
1789{
1790	if (!(ticket->t_flags & XLOG_TIC_INITED))
1791		return 0;
1792
1793	ophdr->oh_tid	= cpu_to_be32(ticket->t_tid);
1794	ophdr->oh_clientid = ticket->t_clientid;
1795	ophdr->oh_len = 0;
1796	ophdr->oh_flags = XLOG_START_TRANS;
1797	ophdr->oh_res2 = 0;
1798
1799	ticket->t_flags &= ~XLOG_TIC_INITED;
1800
1801	return sizeof(struct xlog_op_header);
1802}
1803
1804static xlog_op_header_t *
1805xlog_write_setup_ophdr(
1806	struct xlog		*log,
1807	struct xlog_op_header	*ophdr,
1808	struct xlog_ticket	*ticket,
1809	uint			flags)
1810{
1811	ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
1812	ophdr->oh_clientid = ticket->t_clientid;
1813	ophdr->oh_res2 = 0;
1814
1815	/* are we copying a commit or unmount record? */
1816	ophdr->oh_flags = flags;
1817
1818	/*
1819	 * We've seen logs corrupted with bad transaction client ids.  This
1820	 * makes sure that XFS doesn't generate them on.  Turn this into an EIO
1821	 * and shut down the filesystem.
1822	 */
1823	switch (ophdr->oh_clientid)  {
1824	case XFS_TRANSACTION:
1825	case XFS_VOLUME:
1826	case XFS_LOG:
1827		break;
1828	default:
1829		xfs_warn(log->l_mp,
1830			"Bad XFS transaction clientid 0x%x in ticket 0x%p",
1831			ophdr->oh_clientid, ticket);
1832		return NULL;
1833	}
1834
1835	return ophdr;
1836}
1837
1838/*
1839 * Set up the parameters of the region copy into the log. This has
1840 * to handle region write split across multiple log buffers - this
1841 * state is kept external to this function so that this code can
1842 * can be written in an obvious, self documenting manner.
1843 */
1844static int
1845xlog_write_setup_copy(
1846	struct xlog_ticket	*ticket,
1847	struct xlog_op_header	*ophdr,
1848	int			space_available,
1849	int			space_required,
1850	int			*copy_off,
1851	int			*copy_len,
1852	int			*last_was_partial_copy,
1853	int			*bytes_consumed)
1854{
1855	int			still_to_copy;
1856
1857	still_to_copy = space_required - *bytes_consumed;
1858	*copy_off = *bytes_consumed;
1859
1860	if (still_to_copy <= space_available) {
1861		/* write of region completes here */
1862		*copy_len = still_to_copy;
1863		ophdr->oh_len = cpu_to_be32(*copy_len);
1864		if (*last_was_partial_copy)
1865			ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
1866		*last_was_partial_copy = 0;
1867		*bytes_consumed = 0;
1868		return 0;
1869	}
1870
1871	/* partial write of region, needs extra log op header reservation */
1872	*copy_len = space_available;
1873	ophdr->oh_len = cpu_to_be32(*copy_len);
1874	ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
1875	if (*last_was_partial_copy)
1876		ophdr->oh_flags |= XLOG_WAS_CONT_TRANS;
1877	*bytes_consumed += *copy_len;
1878	(*last_was_partial_copy)++;
1879
1880	/* account for new log op header */
1881	ticket->t_curr_res -= sizeof(struct xlog_op_header);
1882	ticket->t_res_num_ophdrs++;
1883
1884	return sizeof(struct xlog_op_header);
1885}
1886
1887static int
1888xlog_write_copy_finish(
1889	struct xlog		*log,
1890	struct xlog_in_core	*iclog,
1891	uint			flags,
1892	int			*record_cnt,
1893	int			*data_cnt,
1894	int			*partial_copy,
1895	int			*partial_copy_len,
1896	int			log_offset,
1897	struct xlog_in_core	**commit_iclog)
1898{
1899	if (*partial_copy) {
1900		/*
1901		 * This iclog has already been marked WANT_SYNC by
1902		 * xlog_state_get_iclog_space.
1903		 */
1904		xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
1905		*record_cnt = 0;
1906		*data_cnt = 0;
1907		return xlog_state_release_iclog(log, iclog);
1908	}
1909
1910	*partial_copy = 0;
1911	*partial_copy_len = 0;
1912
1913	if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
1914		/* no more space in this iclog - push it. */
1915		xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
1916		*record_cnt = 0;
1917		*data_cnt = 0;
1918
1919		spin_lock(&log->l_icloglock);
1920		xlog_state_want_sync(log, iclog);
1921		spin_unlock(&log->l_icloglock);
1922
1923		if (!commit_iclog)
1924			return xlog_state_release_iclog(log, iclog);
1925		ASSERT(flags & XLOG_COMMIT_TRANS);
1926		*commit_iclog = iclog;
1927	}
1928
1929	return 0;
1930}
1931
1932/*
1933 * Write some region out to in-core log
1934 *
1935 * This will be called when writing externally provided regions or when
1936 * writing out a commit record for a given transaction.
1937 *
1938 * General algorithm:
1939 *	1. Find total length of this write.  This may include adding to the
1940 *		lengths passed in.
1941 *	2. Check whether we violate the tickets reservation.
1942 *	3. While writing to this iclog
1943 *	    A. Reserve as much space in this iclog as can get
1944 *	    B. If this is first write, save away start lsn
1945 *	    C. While writing this region:
1946 *		1. If first write of transaction, write start record
1947 *		2. Write log operation header (header per region)
1948 *		3. Find out if we can fit entire region into this iclog
1949 *		4. Potentially, verify destination memcpy ptr
1950 *		5. Memcpy (partial) region
1951 *		6. If partial copy, release iclog; otherwise, continue
1952 *			copying more regions into current iclog
1953 *	4. Mark want sync bit (in simulation mode)
1954 *	5. Release iclog for potential flush to on-disk log.
1955 *
1956 * ERRORS:
1957 * 1.	Panic if reservation is overrun.  This should never happen since
1958 *	reservation amounts are generated internal to the filesystem.
1959 * NOTES:
1960 * 1. Tickets are single threaded data structures.
1961 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
1962 *	syncing routine.  When a single log_write region needs to span
1963 *	multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
1964 *	on all log operation writes which don't contain the end of the
1965 *	region.  The XLOG_END_TRANS bit is used for the in-core log
1966 *	operation which contains the end of the continued log_write region.
1967 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
1968 *	we don't really know exactly how much space will be used.  As a result,
1969 *	we don't update ic_offset until the end when we know exactly how many
1970 *	bytes have been written out.
1971 */
1972int
1973xlog_write(
1974	struct xlog		*log,
1975	struct xfs_log_vec	*log_vector,
1976	struct xlog_ticket	*ticket,
1977	xfs_lsn_t		*start_lsn,
1978	struct xlog_in_core	**commit_iclog,
1979	uint			flags)
1980{
1981	struct xlog_in_core	*iclog = NULL;
1982	struct xfs_log_iovec	*vecp;
1983	struct xfs_log_vec	*lv;
1984	int			len;
1985	int			index;
1986	int			partial_copy = 0;
1987	int			partial_copy_len = 0;
1988	int			contwr = 0;
1989	int			record_cnt = 0;
1990	int			data_cnt = 0;
1991	int			error;
1992
1993	*start_lsn = 0;
1994
1995	len = xlog_write_calc_vec_length(ticket, log_vector);
 
 
 
 
 
 
 
 
1996
1997	/*
1998	 * Region headers and bytes are already accounted for.
1999	 * We only need to take into account start records and
2000	 * split regions in this function.
2001	 */
2002	if (ticket->t_flags & XLOG_TIC_INITED)
2003		ticket->t_curr_res -= sizeof(xlog_op_header_t);
2004
2005	/*
2006	 * Commit record headers need to be accounted for. These
2007	 * come in as separate writes so are easy to detect.
2008	 */
2009	if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
2010		ticket->t_curr_res -= sizeof(xlog_op_header_t);
2011
2012	if (ticket->t_curr_res < 0)
2013		xlog_print_tic_res(log->l_mp, ticket);
2014
2015	index = 0;
2016	lv = log_vector;
2017	vecp = lv->lv_iovecp;
2018	while (lv && index < lv->lv_niovecs) {
2019		void		*ptr;
2020		int		log_offset;
2021
2022		error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2023						   &contwr, &log_offset);
2024		if (error)
2025			return error;
2026
2027		ASSERT(log_offset <= iclog->ic_size - 1);
2028		ptr = iclog->ic_datap + log_offset;
2029
2030		/* start_lsn is the first lsn written to. That's all we need. */
2031		if (!*start_lsn)
2032			*start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2033
2034		/*
2035		 * This loop writes out as many regions as can fit in the amount
2036		 * of space which was allocated by xlog_state_get_iclog_space().
2037		 */
2038		while (lv && index < lv->lv_niovecs) {
2039			struct xfs_log_iovec	*reg = &vecp[index];
2040			struct xlog_op_header	*ophdr;
2041			int			start_rec_copy;
2042			int			copy_len;
2043			int			copy_off;
2044
2045			ASSERT(reg->i_len % sizeof(__int32_t) == 0);
2046			ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0);
2047
2048			start_rec_copy = xlog_write_start_rec(ptr, ticket);
2049			if (start_rec_copy) {
2050				record_cnt++;
2051				xlog_write_adv_cnt(&ptr, &len, &log_offset,
2052						   start_rec_copy);
2053			}
2054
2055			ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
2056			if (!ophdr)
2057				return XFS_ERROR(EIO);
2058
2059			xlog_write_adv_cnt(&ptr, &len, &log_offset,
2060					   sizeof(struct xlog_op_header));
2061
2062			len += xlog_write_setup_copy(ticket, ophdr,
2063						     iclog->ic_size-log_offset,
2064						     reg->i_len,
2065						     &copy_off, &copy_len,
2066						     &partial_copy,
2067						     &partial_copy_len);
2068			xlog_verify_dest_ptr(log, ptr);
2069
2070			/* copy region */
2071			ASSERT(copy_len >= 0);
2072			memcpy(ptr, reg->i_addr + copy_off, copy_len);
2073			xlog_write_adv_cnt(&ptr, &len, &log_offset, copy_len);
2074
2075			copy_len += start_rec_copy + sizeof(xlog_op_header_t);
2076			record_cnt++;
2077			data_cnt += contwr ? copy_len : 0;
2078
2079			error = xlog_write_copy_finish(log, iclog, flags,
2080						       &record_cnt, &data_cnt,
2081						       &partial_copy,
2082						       &partial_copy_len,
2083						       log_offset,
2084						       commit_iclog);
2085			if (error)
2086				return error;
2087
2088			/*
2089			 * if we had a partial copy, we need to get more iclog
2090			 * space but we don't want to increment the region
2091			 * index because there is still more is this region to
2092			 * write.
2093			 *
2094			 * If we completed writing this region, and we flushed
2095			 * the iclog (indicated by resetting of the record
2096			 * count), then we also need to get more log space. If
2097			 * this was the last record, though, we are done and
2098			 * can just return.
2099			 */
2100			if (partial_copy)
2101				break;
2102
2103			if (++index == lv->lv_niovecs) {
2104				lv = lv->lv_next;
2105				index = 0;
2106				if (lv)
2107					vecp = lv->lv_iovecp;
2108			}
2109			if (record_cnt == 0) {
2110				if (!lv)
2111					return 0;
2112				break;
2113			}
2114		}
2115	}
2116
2117	ASSERT(len == 0);
2118
2119	xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
2120	if (!commit_iclog)
2121		return xlog_state_release_iclog(log, iclog);
2122
2123	ASSERT(flags & XLOG_COMMIT_TRANS);
2124	*commit_iclog = iclog;
2125	return 0;
2126}
2127
2128
2129/*****************************************************************************
2130 *
2131 *		State Machine functions
2132 *
2133 *****************************************************************************
2134 */
2135
2136/* Clean iclogs starting from the head.  This ordering must be
2137 * maintained, so an iclog doesn't become ACTIVE beyond one that
2138 * is SYNCING.  This is also required to maintain the notion that we use
2139 * a ordered wait queue to hold off would be writers to the log when every
2140 * iclog is trying to sync to disk.
2141 *
2142 * State Change: DIRTY -> ACTIVE
2143 */
2144STATIC void
2145xlog_state_clean_log(xlog_t *log)
2146{
2147	xlog_in_core_t	*iclog;
2148	int changed = 0;
2149
2150	iclog = log->l_iclog;
2151	do {
2152		if (iclog->ic_state == XLOG_STATE_DIRTY) {
2153			iclog->ic_state	= XLOG_STATE_ACTIVE;
2154			iclog->ic_offset       = 0;
2155			ASSERT(iclog->ic_callback == NULL);
2156			/*
2157			 * If the number of ops in this iclog indicate it just
2158			 * contains the dummy transaction, we can
2159			 * change state into IDLE (the second time around).
2160			 * Otherwise we should change the state into
2161			 * NEED a dummy.
2162			 * We don't need to cover the dummy.
2163			 */
2164			if (!changed &&
2165			   (be32_to_cpu(iclog->ic_header.h_num_logops) ==
2166			   		XLOG_COVER_OPS)) {
2167				changed = 1;
2168			} else {
2169				/*
2170				 * We have two dirty iclogs so start over
2171				 * This could also be num of ops indicates
2172				 * this is not the dummy going out.
2173				 */
2174				changed = 2;
2175			}
2176			iclog->ic_header.h_num_logops = 0;
2177			memset(iclog->ic_header.h_cycle_data, 0,
2178			      sizeof(iclog->ic_header.h_cycle_data));
2179			iclog->ic_header.h_lsn = 0;
2180		} else if (iclog->ic_state == XLOG_STATE_ACTIVE)
2181			/* do nothing */;
2182		else
2183			break;	/* stop cleaning */
2184		iclog = iclog->ic_next;
2185	} while (iclog != log->l_iclog);
2186
2187	/* log is locked when we are called */
2188	/*
2189	 * Change state for the dummy log recording.
2190	 * We usually go to NEED. But we go to NEED2 if the changed indicates
2191	 * we are done writing the dummy record.
2192	 * If we are done with the second dummy recored (DONE2), then
2193	 * we go to IDLE.
2194	 */
2195	if (changed) {
2196		switch (log->l_covered_state) {
2197		case XLOG_STATE_COVER_IDLE:
2198		case XLOG_STATE_COVER_NEED:
2199		case XLOG_STATE_COVER_NEED2:
2200			log->l_covered_state = XLOG_STATE_COVER_NEED;
2201			break;
2202
2203		case XLOG_STATE_COVER_DONE:
2204			if (changed == 1)
2205				log->l_covered_state = XLOG_STATE_COVER_NEED2;
2206			else
2207				log->l_covered_state = XLOG_STATE_COVER_NEED;
2208			break;
2209
2210		case XLOG_STATE_COVER_DONE2:
2211			if (changed == 1)
2212				log->l_covered_state = XLOG_STATE_COVER_IDLE;
2213			else
2214				log->l_covered_state = XLOG_STATE_COVER_NEED;
2215			break;
2216
2217		default:
2218			ASSERT(0);
2219		}
2220	}
2221}	/* xlog_state_clean_log */
2222
2223STATIC xfs_lsn_t
2224xlog_get_lowest_lsn(
2225	xlog_t		*log)
2226{
2227	xlog_in_core_t  *lsn_log;
2228	xfs_lsn_t	lowest_lsn, lsn;
2229
2230	lsn_log = log->l_iclog;
2231	lowest_lsn = 0;
2232	do {
2233	    if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
2234		lsn = be64_to_cpu(lsn_log->ic_header.h_lsn);
2235		if ((lsn && !lowest_lsn) ||
2236		    (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
2237			lowest_lsn = lsn;
2238		}
2239	    }
2240	    lsn_log = lsn_log->ic_next;
2241	} while (lsn_log != log->l_iclog);
2242	return lowest_lsn;
2243}
2244
2245
2246STATIC void
2247xlog_state_do_callback(
2248	xlog_t		*log,
2249	int		aborted,
2250	xlog_in_core_t	*ciclog)
2251{
2252	xlog_in_core_t	   *iclog;
2253	xlog_in_core_t	   *first_iclog;	/* used to know when we've
2254						 * processed all iclogs once */
2255	xfs_log_callback_t *cb, *cb_next;
2256	int		   flushcnt = 0;
2257	xfs_lsn_t	   lowest_lsn;
2258	int		   ioerrors;	/* counter: iclogs with errors */
2259	int		   loopdidcallbacks; /* flag: inner loop did callbacks*/
2260	int		   funcdidcallbacks; /* flag: function did callbacks */
2261	int		   repeats;	/* for issuing console warnings if
2262					 * looping too many times */
2263	int		   wake = 0;
2264
2265	spin_lock(&log->l_icloglock);
2266	first_iclog = iclog = log->l_iclog;
2267	ioerrors = 0;
2268	funcdidcallbacks = 0;
2269	repeats = 0;
2270
2271	do {
2272		/*
2273		 * Scan all iclogs starting with the one pointed to by the
2274		 * log.  Reset this starting point each time the log is
2275		 * unlocked (during callbacks).
2276		 *
2277		 * Keep looping through iclogs until one full pass is made
2278		 * without running any callbacks.
2279		 */
2280		first_iclog = log->l_iclog;
2281		iclog = log->l_iclog;
2282		loopdidcallbacks = 0;
2283		repeats++;
2284
2285		do {
2286
2287			/* skip all iclogs in the ACTIVE & DIRTY states */
2288			if (iclog->ic_state &
2289			    (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) {
2290				iclog = iclog->ic_next;
2291				continue;
2292			}
2293
2294			/*
2295			 * Between marking a filesystem SHUTDOWN and stopping
2296			 * the log, we do flush all iclogs to disk (if there
2297			 * wasn't a log I/O error). So, we do want things to
2298			 * go smoothly in case of just a SHUTDOWN  w/o a
2299			 * LOG_IO_ERROR.
2300			 */
2301			if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
2302				/*
2303				 * Can only perform callbacks in order.  Since
2304				 * this iclog is not in the DONE_SYNC/
2305				 * DO_CALLBACK state, we skip the rest and
2306				 * just try to clean up.  If we set our iclog
2307				 * to DO_CALLBACK, we will not process it when
2308				 * we retry since a previous iclog is in the
2309				 * CALLBACK and the state cannot change since
2310				 * we are holding the l_icloglock.
2311				 */
2312				if (!(iclog->ic_state &
2313					(XLOG_STATE_DONE_SYNC |
2314						 XLOG_STATE_DO_CALLBACK))) {
2315					if (ciclog && (ciclog->ic_state ==
2316							XLOG_STATE_DONE_SYNC)) {
2317						ciclog->ic_state = XLOG_STATE_DO_CALLBACK;
2318					}
2319					break;
2320				}
2321				/*
2322				 * We now have an iclog that is in either the
2323				 * DO_CALLBACK or DONE_SYNC states. The other
2324				 * states (WANT_SYNC, SYNCING, or CALLBACK were
2325				 * caught by the above if and are going to
2326				 * clean (i.e. we aren't doing their callbacks)
2327				 * see the above if.
2328				 */
2329
2330				/*
2331				 * We will do one more check here to see if we
2332				 * have chased our tail around.
2333				 */
2334
2335				lowest_lsn = xlog_get_lowest_lsn(log);
2336				if (lowest_lsn &&
2337				    XFS_LSN_CMP(lowest_lsn,
2338						be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
2339					iclog = iclog->ic_next;
2340					continue; /* Leave this iclog for
2341						   * another thread */
2342				}
2343
2344				iclog->ic_state = XLOG_STATE_CALLBACK;
2345
2346
2347				/*
2348				 * update the last_sync_lsn before we drop the
2349				 * icloglock to ensure we are the only one that
2350				 * can update it.
2351				 */
2352				ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2353					be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
2354				atomic64_set(&log->l_last_sync_lsn,
2355					be64_to_cpu(iclog->ic_header.h_lsn));
2356
2357			} else
2358				ioerrors++;
2359
2360			spin_unlock(&log->l_icloglock);
2361
2362			/*
2363			 * Keep processing entries in the callback list until
2364			 * we come around and it is empty.  We need to
2365			 * atomically see that the list is empty and change the
2366			 * state to DIRTY so that we don't miss any more
2367			 * callbacks being added.
2368			 */
2369			spin_lock(&iclog->ic_callback_lock);
2370			cb = iclog->ic_callback;
2371			while (cb) {
2372				iclog->ic_callback_tail = &(iclog->ic_callback);
2373				iclog->ic_callback = NULL;
2374				spin_unlock(&iclog->ic_callback_lock);
2375
2376				/* perform callbacks in the order given */
2377				for (; cb; cb = cb_next) {
2378					cb_next = cb->cb_next;
2379					cb->cb_func(cb->cb_arg, aborted);
2380				}
2381				spin_lock(&iclog->ic_callback_lock);
2382				cb = iclog->ic_callback;
2383			}
2384
2385			loopdidcallbacks++;
2386			funcdidcallbacks++;
2387
2388			spin_lock(&log->l_icloglock);
2389			ASSERT(iclog->ic_callback == NULL);
2390			spin_unlock(&iclog->ic_callback_lock);
2391			if (!(iclog->ic_state & XLOG_STATE_IOERROR))
2392				iclog->ic_state = XLOG_STATE_DIRTY;
2393
2394			/*
2395			 * Transition from DIRTY to ACTIVE if applicable.
2396			 * NOP if STATE_IOERROR.
2397			 */
2398			xlog_state_clean_log(log);
2399
2400			/* wake up threads waiting in xfs_log_force() */
2401			wake_up_all(&iclog->ic_force_wait);
2402
2403			iclog = iclog->ic_next;
2404		} while (first_iclog != iclog);
2405
2406		if (repeats > 5000) {
2407			flushcnt += repeats;
2408			repeats = 0;
2409			xfs_warn(log->l_mp,
2410				"%s: possible infinite loop (%d iterations)",
2411				__func__, flushcnt);
2412		}
2413	} while (!ioerrors && loopdidcallbacks);
2414
2415	/*
2416	 * make one last gasp attempt to see if iclogs are being left in
2417	 * limbo..
2418	 */
2419#ifdef DEBUG
2420	if (funcdidcallbacks) {
2421		first_iclog = iclog = log->l_iclog;
2422		do {
2423			ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
2424			/*
2425			 * Terminate the loop if iclogs are found in states
2426			 * which will cause other threads to clean up iclogs.
2427			 *
2428			 * SYNCING - i/o completion will go through logs
2429			 * DONE_SYNC - interrupt thread should be waiting for
2430			 *              l_icloglock
2431			 * IOERROR - give up hope all ye who enter here
2432			 */
2433			if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
2434			    iclog->ic_state == XLOG_STATE_SYNCING ||
2435			    iclog->ic_state == XLOG_STATE_DONE_SYNC ||
2436			    iclog->ic_state == XLOG_STATE_IOERROR )
2437				break;
2438			iclog = iclog->ic_next;
2439		} while (first_iclog != iclog);
2440	}
2441#endif
2442
2443	if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
2444		wake = 1;
2445	spin_unlock(&log->l_icloglock);
2446
2447	if (wake)
2448		wake_up_all(&log->l_flush_wait);
2449}
2450
2451
2452/*
2453 * Finish transitioning this iclog to the dirty state.
2454 *
2455 * Make sure that we completely execute this routine only when this is
2456 * the last call to the iclog.  There is a good chance that iclog flushes,
2457 * when we reach the end of the physical log, get turned into 2 separate
2458 * calls to bwrite.  Hence, one iclog flush could generate two calls to this
2459 * routine.  By using the reference count bwritecnt, we guarantee that only
2460 * the second completion goes through.
2461 *
2462 * Callbacks could take time, so they are done outside the scope of the
2463 * global state machine log lock.
2464 */
2465STATIC void
2466xlog_state_done_syncing(
2467	xlog_in_core_t	*iclog,
2468	int		aborted)
2469{
2470	xlog_t		   *log = iclog->ic_log;
2471
2472	spin_lock(&log->l_icloglock);
2473
2474	ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
2475	       iclog->ic_state == XLOG_STATE_IOERROR);
2476	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2477	ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2);
2478
2479
2480	/*
2481	 * If we got an error, either on the first buffer, or in the case of
2482	 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
2483	 * and none should ever be attempted to be written to disk
2484	 * again.
2485	 */
2486	if (iclog->ic_state != XLOG_STATE_IOERROR) {
2487		if (--iclog->ic_bwritecnt == 1) {
2488			spin_unlock(&log->l_icloglock);
2489			return;
2490		}
2491		iclog->ic_state = XLOG_STATE_DONE_SYNC;
2492	}
2493
2494	/*
2495	 * Someone could be sleeping prior to writing out the next
2496	 * iclog buffer, we wake them all, one will get to do the
2497	 * I/O, the others get to wait for the result.
2498	 */
2499	wake_up_all(&iclog->ic_write_wait);
2500	spin_unlock(&log->l_icloglock);
2501	xlog_state_do_callback(log, aborted, iclog);	/* also cleans log */
2502}	/* xlog_state_done_syncing */
2503
2504
2505/*
2506 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2507 * sleep.  We wait on the flush queue on the head iclog as that should be
2508 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2509 * we will wait here and all new writes will sleep until a sync completes.
2510 *
2511 * The in-core logs are used in a circular fashion. They are not used
2512 * out-of-order even when an iclog past the head is free.
2513 *
2514 * return:
2515 *	* log_offset where xlog_write() can start writing into the in-core
2516 *		log's data space.
2517 *	* in-core log pointer to which xlog_write() should write.
2518 *	* boolean indicating this is a continued write to an in-core log.
2519 *		If this is the last write, then the in-core log's offset field
2520 *		needs to be incremented, depending on the amount of data which
2521 *		is copied.
2522 */
2523STATIC int
2524xlog_state_get_iclog_space(xlog_t	  *log,
2525			   int		  len,
2526			   xlog_in_core_t **iclogp,
2527			   xlog_ticket_t  *ticket,
2528			   int		  *continued_write,
2529			   int		  *logoffsetp)
2530{
2531	int		  log_offset;
2532	xlog_rec_header_t *head;
2533	xlog_in_core_t	  *iclog;
2534	int		  error;
2535
2536restart:
2537	spin_lock(&log->l_icloglock);
2538	if (XLOG_FORCED_SHUTDOWN(log)) {
2539		spin_unlock(&log->l_icloglock);
2540		return XFS_ERROR(EIO);
2541	}
2542
2543	iclog = log->l_iclog;
2544	if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2545		XFS_STATS_INC(xs_log_noiclogs);
2546
2547		/* Wait for log writes to have flushed */
2548		xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2549		goto restart;
2550	}
2551
2552	head = &iclog->ic_header;
2553
2554	atomic_inc(&iclog->ic_refcnt);	/* prevents sync */
2555	log_offset = iclog->ic_offset;
2556
2557	/* On the 1st write to an iclog, figure out lsn.  This works
2558	 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2559	 * committing to.  If the offset is set, that's how many blocks
2560	 * must be written.
2561	 */
2562	if (log_offset == 0) {
2563		ticket->t_curr_res -= log->l_iclog_hsize;
2564		xlog_tic_add_region(ticket,
2565				    log->l_iclog_hsize,
2566				    XLOG_REG_TYPE_LRHEADER);
2567		head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2568		head->h_lsn = cpu_to_be64(
2569			xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2570		ASSERT(log->l_curr_block >= 0);
2571	}
2572
2573	/* If there is enough room to write everything, then do it.  Otherwise,
2574	 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2575	 * bit is on, so this will get flushed out.  Don't update ic_offset
2576	 * until you know exactly how many bytes get copied.  Therefore, wait
2577	 * until later to update ic_offset.
2578	 *
2579	 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2580	 * can fit into remaining data section.
2581	 */
2582	if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2583		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2584
2585		/*
2586		 * If I'm the only one writing to this iclog, sync it to disk.
2587		 * We need to do an atomic compare and decrement here to avoid
2588		 * racing with concurrent atomic_dec_and_lock() calls in
2589		 * xlog_state_release_iclog() when there is more than one
2590		 * reference to the iclog.
2591		 */
2592		if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) {
2593			/* we are the only one */
2594			spin_unlock(&log->l_icloglock);
2595			error = xlog_state_release_iclog(log, iclog);
2596			if (error)
2597				return error;
2598		} else {
2599			spin_unlock(&log->l_icloglock);
2600		}
2601		goto restart;
2602	}
2603
2604	/* Do we have enough room to write the full amount in the remainder
2605	 * of this iclog?  Or must we continue a write on the next iclog and
2606	 * mark this iclog as completely taken?  In the case where we switch
2607	 * iclogs (to mark it taken), this particular iclog will release/sync
2608	 * to disk in xlog_write().
2609	 */
2610	if (len <= iclog->ic_size - iclog->ic_offset) {
2611		*continued_write = 0;
2612		iclog->ic_offset += len;
2613	} else {
2614		*continued_write = 1;
2615		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2616	}
2617	*iclogp = iclog;
2618
2619	ASSERT(iclog->ic_offset <= iclog->ic_size);
2620	spin_unlock(&log->l_icloglock);
2621
2622	*logoffsetp = log_offset;
2623	return 0;
2624}	/* xlog_state_get_iclog_space */
2625
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2626/* The first cnt-1 times through here we don't need to
2627 * move the grant write head because the permanent
2628 * reservation has reserved cnt times the unit amount.
2629 * Release part of current permanent unit reservation and
2630 * reset current reservation to be one units worth.  Also
2631 * move grant reservation head forward.
2632 */
2633STATIC void
2634xlog_regrant_reserve_log_space(xlog_t	     *log,
2635			       xlog_ticket_t *ticket)
2636{
2637	trace_xfs_log_regrant_reserve_enter(log, ticket);
2638
2639	if (ticket->t_cnt > 0)
2640		ticket->t_cnt--;
2641
2642	xlog_grant_sub_space(log, &log->l_reserve_head.grant,
2643					ticket->t_curr_res);
2644	xlog_grant_sub_space(log, &log->l_write_head.grant,
2645					ticket->t_curr_res);
2646	ticket->t_curr_res = ticket->t_unit_res;
2647	xlog_tic_reset_res(ticket);
2648
2649	trace_xfs_log_regrant_reserve_sub(log, ticket);
2650
2651	/* just return if we still have some of the pre-reserved space */
2652	if (ticket->t_cnt > 0)
2653		return;
2654
2655	xlog_grant_add_space(log, &log->l_reserve_head.grant,
2656					ticket->t_unit_res);
2657
2658	trace_xfs_log_regrant_reserve_exit(log, ticket);
2659
2660	ticket->t_curr_res = ticket->t_unit_res;
2661	xlog_tic_reset_res(ticket);
2662}	/* xlog_regrant_reserve_log_space */
2663
2664
2665/*
2666 * Give back the space left from a reservation.
2667 *
2668 * All the information we need to make a correct determination of space left
2669 * is present.  For non-permanent reservations, things are quite easy.  The
2670 * count should have been decremented to zero.  We only need to deal with the
2671 * space remaining in the current reservation part of the ticket.  If the
2672 * ticket contains a permanent reservation, there may be left over space which
2673 * needs to be released.  A count of N means that N-1 refills of the current
2674 * reservation can be done before we need to ask for more space.  The first
2675 * one goes to fill up the first current reservation.  Once we run out of
2676 * space, the count will stay at zero and the only space remaining will be
2677 * in the current reservation field.
2678 */
2679STATIC void
2680xlog_ungrant_log_space(xlog_t	     *log,
2681		       xlog_ticket_t *ticket)
2682{
2683	int	bytes;
2684
2685	if (ticket->t_cnt > 0)
2686		ticket->t_cnt--;
2687
2688	trace_xfs_log_ungrant_enter(log, ticket);
2689	trace_xfs_log_ungrant_sub(log, ticket);
2690
2691	/*
2692	 * If this is a permanent reservation ticket, we may be able to free
2693	 * up more space based on the remaining count.
2694	 */
2695	bytes = ticket->t_curr_res;
2696	if (ticket->t_cnt > 0) {
2697		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2698		bytes += ticket->t_unit_res*ticket->t_cnt;
2699	}
2700
2701	xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
2702	xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
2703
2704	trace_xfs_log_ungrant_exit(log, ticket);
2705
2706	xfs_log_space_wake(log->l_mp);
2707}
 
2708
2709/*
2710 * Flush iclog to disk if this is the last reference to the given iclog and
2711 * the WANT_SYNC bit is set.
2712 *
2713 * When this function is entered, the iclog is not necessarily in the
2714 * WANT_SYNC state.  It may be sitting around waiting to get filled.
2715 *
2716 *
2717 */
2718STATIC int
2719xlog_state_release_iclog(
2720	xlog_t		*log,
2721	xlog_in_core_t	*iclog)
2722{
2723	int		sync = 0;	/* do we sync? */
2724
2725	if (iclog->ic_state & XLOG_STATE_IOERROR)
2726		return XFS_ERROR(EIO);
2727
2728	ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
2729	if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
2730		return 0;
2731
2732	if (iclog->ic_state & XLOG_STATE_IOERROR) {
2733		spin_unlock(&log->l_icloglock);
2734		return XFS_ERROR(EIO);
2735	}
2736	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
2737	       iclog->ic_state == XLOG_STATE_WANT_SYNC);
2738
2739	if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
2740		/* update tail before writing to iclog */
2741		xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
2742		sync++;
2743		iclog->ic_state = XLOG_STATE_SYNCING;
2744		iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
2745		xlog_verify_tail_lsn(log, iclog, tail_lsn);
2746		/* cycle incremented when incrementing curr_block */
2747	}
2748	spin_unlock(&log->l_icloglock);
2749
2750	/*
2751	 * We let the log lock go, so it's possible that we hit a log I/O
2752	 * error or some other SHUTDOWN condition that marks the iclog
2753	 * as XLOG_STATE_IOERROR before the bwrite. However, we know that
2754	 * this iclog has consistent data, so we ignore IOERROR
2755	 * flags after this point.
2756	 */
2757	if (sync)
2758		return xlog_sync(log, iclog);
2759	return 0;
2760}	/* xlog_state_release_iclog */
2761
2762
2763/*
2764 * This routine will mark the current iclog in the ring as WANT_SYNC
2765 * and move the current iclog pointer to the next iclog in the ring.
2766 * When this routine is called from xlog_state_get_iclog_space(), the
2767 * exact size of the iclog has not yet been determined.  All we know is
2768 * that every data block.  We have run out of space in this log record.
2769 */
2770STATIC void
2771xlog_state_switch_iclogs(xlog_t		*log,
2772			 xlog_in_core_t *iclog,
2773			 int		eventual_size)
2774{
2775	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
2776	if (!eventual_size)
2777		eventual_size = iclog->ic_offset;
2778	iclog->ic_state = XLOG_STATE_WANT_SYNC;
2779	iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
2780	log->l_prev_block = log->l_curr_block;
2781	log->l_prev_cycle = log->l_curr_cycle;
2782
2783	/* roll log?: ic_offset changed later */
2784	log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
2785
2786	/* Round up to next log-sunit */
2787	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
2788	    log->l_mp->m_sb.sb_logsunit > 1) {
2789		__uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
2790		log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
2791	}
2792
2793	if (log->l_curr_block >= log->l_logBBsize) {
2794		log->l_curr_cycle++;
2795		if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
2796			log->l_curr_cycle++;
2797		log->l_curr_block -= log->l_logBBsize;
2798		ASSERT(log->l_curr_block >= 0);
2799	}
2800	ASSERT(iclog == log->l_iclog);
2801	log->l_iclog = iclog->ic_next;
2802}	/* xlog_state_switch_iclogs */
2803
2804/*
2805 * Write out all data in the in-core log as of this exact moment in time.
2806 *
2807 * Data may be written to the in-core log during this call.  However,
2808 * we don't guarantee this data will be written out.  A change from past
2809 * implementation means this routine will *not* write out zero length LRs.
2810 *
2811 * Basically, we try and perform an intelligent scan of the in-core logs.
2812 * If we determine there is no flushable data, we just return.  There is no
2813 * flushable data if:
2814 *
2815 *	1. the current iclog is active and has no data; the previous iclog
2816 *		is in the active or dirty state.
2817 *	2. the current iclog is drity, and the previous iclog is in the
2818 *		active or dirty state.
2819 *
2820 * We may sleep if:
2821 *
2822 *	1. the current iclog is not in the active nor dirty state.
2823 *	2. the current iclog dirty, and the previous iclog is not in the
2824 *		active nor dirty state.
2825 *	3. the current iclog is active, and there is another thread writing
2826 *		to this particular iclog.
2827 *	4. a) the current iclog is active and has no other writers
2828 *	   b) when we return from flushing out this iclog, it is still
2829 *		not in the active nor dirty state.
2830 */
2831int
2832_xfs_log_force(
2833	struct xfs_mount	*mp,
2834	uint			flags,
2835	int			*log_flushed)
2836{
2837	struct xlog		*log = mp->m_log;
2838	struct xlog_in_core	*iclog;
2839	xfs_lsn_t		lsn;
2840
2841	XFS_STATS_INC(xs_log_force);
2842
2843	xlog_cil_force(log);
 
2844
2845	spin_lock(&log->l_icloglock);
2846
2847	iclog = log->l_iclog;
2848	if (iclog->ic_state & XLOG_STATE_IOERROR) {
2849		spin_unlock(&log->l_icloglock);
2850		return XFS_ERROR(EIO);
2851	}
2852
2853	/* If the head iclog is not active nor dirty, we just attach
2854	 * ourselves to the head and go to sleep.
2855	 */
2856	if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2857	    iclog->ic_state == XLOG_STATE_DIRTY) {
2858		/*
2859		 * If the head is dirty or (active and empty), then
2860		 * we need to look at the previous iclog.  If the previous
2861		 * iclog is active or dirty we are done.  There is nothing
2862		 * to sync out.  Otherwise, we attach ourselves to the
2863		 * previous iclog and go to sleep.
2864		 */
2865		if (iclog->ic_state == XLOG_STATE_DIRTY ||
2866		    (atomic_read(&iclog->ic_refcnt) == 0
2867		     && iclog->ic_offset == 0)) {
2868			iclog = iclog->ic_prev;
2869			if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2870			    iclog->ic_state == XLOG_STATE_DIRTY)
2871				goto no_sleep;
2872			else
2873				goto maybe_sleep;
2874		} else {
2875			if (atomic_read(&iclog->ic_refcnt) == 0) {
2876				/* We are the only one with access to this
2877				 * iclog.  Flush it out now.  There should
2878				 * be a roundoff of zero to show that someone
2879				 * has already taken care of the roundoff from
2880				 * the previous sync.
2881				 */
2882				atomic_inc(&iclog->ic_refcnt);
2883				lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2884				xlog_state_switch_iclogs(log, iclog, 0);
2885				spin_unlock(&log->l_icloglock);
2886
2887				if (xlog_state_release_iclog(log, iclog))
2888					return XFS_ERROR(EIO);
2889
2890				if (log_flushed)
2891					*log_flushed = 1;
2892				spin_lock(&log->l_icloglock);
2893				if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
2894				    iclog->ic_state != XLOG_STATE_DIRTY)
2895					goto maybe_sleep;
2896				else
2897					goto no_sleep;
2898			} else {
2899				/* Someone else is writing to this iclog.
2900				 * Use its call to flush out the data.  However,
2901				 * the other thread may not force out this LR,
2902				 * so we mark it WANT_SYNC.
2903				 */
2904				xlog_state_switch_iclogs(log, iclog, 0);
2905				goto maybe_sleep;
2906			}
2907		}
2908	}
2909
2910	/* By the time we come around again, the iclog could've been filled
2911	 * which would give it another lsn.  If we have a new lsn, just
2912	 * return because the relevant data has been flushed.
2913	 */
2914maybe_sleep:
2915	if (flags & XFS_LOG_SYNC) {
2916		/*
2917		 * We must check if we're shutting down here, before
2918		 * we wait, while we're holding the l_icloglock.
2919		 * Then we check again after waking up, in case our
2920		 * sleep was disturbed by a bad news.
2921		 */
2922		if (iclog->ic_state & XLOG_STATE_IOERROR) {
2923			spin_unlock(&log->l_icloglock);
2924			return XFS_ERROR(EIO);
2925		}
2926		XFS_STATS_INC(xs_log_force_sleep);
2927		xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
2928		/*
2929		 * No need to grab the log lock here since we're
2930		 * only deciding whether or not to return EIO
2931		 * and the memory read should be atomic.
2932		 */
2933		if (iclog->ic_state & XLOG_STATE_IOERROR)
2934			return XFS_ERROR(EIO);
2935		if (log_flushed)
2936			*log_flushed = 1;
2937	} else {
2938
2939no_sleep:
2940		spin_unlock(&log->l_icloglock);
2941	}
2942	return 0;
2943}
2944
2945/*
2946 * Wrapper for _xfs_log_force(), to be used when caller doesn't care
2947 * about errors or whether the log was flushed or not. This is the normal
2948 * interface to use when trying to unpin items or move the log forward.
2949 */
2950void
2951xfs_log_force(
2952	xfs_mount_t	*mp,
2953	uint		flags)
2954{
2955	int	error;
2956
2957	trace_xfs_log_force(mp, 0);
2958	error = _xfs_log_force(mp, flags, NULL);
2959	if (error)
2960		xfs_warn(mp, "%s: error %d returned.", __func__, error);
2961}
2962
2963/*
2964 * Force the in-core log to disk for a specific LSN.
2965 *
2966 * Find in-core log with lsn.
2967 *	If it is in the DIRTY state, just return.
2968 *	If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
2969 *		state and go to sleep or return.
2970 *	If it is in any other state, go to sleep or return.
2971 *
2972 * Synchronous forces are implemented with a signal variable. All callers
2973 * to force a given lsn to disk will wait on a the sv attached to the
2974 * specific in-core log.  When given in-core log finally completes its
2975 * write to disk, that thread will wake up all threads waiting on the
2976 * sv.
2977 */
2978int
2979_xfs_log_force_lsn(
2980	struct xfs_mount	*mp,
2981	xfs_lsn_t		lsn,
2982	uint			flags,
2983	int			*log_flushed)
2984{
2985	struct xlog		*log = mp->m_log;
2986	struct xlog_in_core	*iclog;
2987	int			already_slept = 0;
2988
2989	ASSERT(lsn != 0);
2990
2991	XFS_STATS_INC(xs_log_force);
2992
2993	lsn = xlog_cil_force_lsn(log, lsn);
2994	if (lsn == NULLCOMMITLSN)
2995		return 0;
 
 
2996
2997try_again:
2998	spin_lock(&log->l_icloglock);
2999	iclog = log->l_iclog;
3000	if (iclog->ic_state & XLOG_STATE_IOERROR) {
3001		spin_unlock(&log->l_icloglock);
3002		return XFS_ERROR(EIO);
3003	}
3004
3005	do {
3006		if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3007			iclog = iclog->ic_next;
3008			continue;
3009		}
3010
3011		if (iclog->ic_state == XLOG_STATE_DIRTY) {
3012			spin_unlock(&log->l_icloglock);
3013			return 0;
3014		}
3015
3016		if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3017			/*
3018			 * We sleep here if we haven't already slept (e.g.
3019			 * this is the first time we've looked at the correct
3020			 * iclog buf) and the buffer before us is going to
3021			 * be sync'ed. The reason for this is that if we
3022			 * are doing sync transactions here, by waiting for
3023			 * the previous I/O to complete, we can allow a few
3024			 * more transactions into this iclog before we close
3025			 * it down.
3026			 *
3027			 * Otherwise, we mark the buffer WANT_SYNC, and bump
3028			 * up the refcnt so we can release the log (which
3029			 * drops the ref count).  The state switch keeps new
3030			 * transaction commits from using this buffer.  When
3031			 * the current commits finish writing into the buffer,
3032			 * the refcount will drop to zero and the buffer will
3033			 * go out then.
3034			 */
3035			if (!already_slept &&
3036			    (iclog->ic_prev->ic_state &
3037			     (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
3038				ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3039
3040				XFS_STATS_INC(xs_log_force_sleep);
3041
3042				xlog_wait(&iclog->ic_prev->ic_write_wait,
3043							&log->l_icloglock);
3044				if (log_flushed)
3045					*log_flushed = 1;
3046				already_slept = 1;
3047				goto try_again;
3048			}
3049			atomic_inc(&iclog->ic_refcnt);
3050			xlog_state_switch_iclogs(log, iclog, 0);
3051			spin_unlock(&log->l_icloglock);
3052			if (xlog_state_release_iclog(log, iclog))
3053				return XFS_ERROR(EIO);
3054			if (log_flushed)
3055				*log_flushed = 1;
3056			spin_lock(&log->l_icloglock);
3057		}
3058
3059		if ((flags & XFS_LOG_SYNC) && /* sleep */
3060		    !(iclog->ic_state &
3061		      (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3062			/*
3063			 * Don't wait on completion if we know that we've
3064			 * gotten a log write error.
3065			 */
3066			if (iclog->ic_state & XLOG_STATE_IOERROR) {
3067				spin_unlock(&log->l_icloglock);
3068				return XFS_ERROR(EIO);
3069			}
3070			XFS_STATS_INC(xs_log_force_sleep);
3071			xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3072			/*
3073			 * No need to grab the log lock here since we're
3074			 * only deciding whether or not to return EIO
3075			 * and the memory read should be atomic.
3076			 */
3077			if (iclog->ic_state & XLOG_STATE_IOERROR)
3078				return XFS_ERROR(EIO);
3079
3080			if (log_flushed)
3081				*log_flushed = 1;
3082		} else {		/* just return */
3083			spin_unlock(&log->l_icloglock);
3084		}
3085
3086		return 0;
3087	} while (iclog != log->l_iclog);
3088
3089	spin_unlock(&log->l_icloglock);
3090	return 0;
3091}
3092
3093/*
3094 * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care
3095 * about errors or whether the log was flushed or not. This is the normal
3096 * interface to use when trying to unpin items or move the log forward.
3097 */
3098void
3099xfs_log_force_lsn(
3100	xfs_mount_t	*mp,
3101	xfs_lsn_t	lsn,
3102	uint		flags)
3103{
3104	int	error;
3105
3106	trace_xfs_log_force(mp, lsn);
3107	error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
3108	if (error)
3109		xfs_warn(mp, "%s: error %d returned.", __func__, error);
3110}
3111
3112/*
3113 * Called when we want to mark the current iclog as being ready to sync to
3114 * disk.
3115 */
3116STATIC void
3117xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
3118{
3119	assert_spin_locked(&log->l_icloglock);
3120
3121	if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3122		xlog_state_switch_iclogs(log, iclog, 0);
3123	} else {
3124		ASSERT(iclog->ic_state &
3125			(XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
3126	}
3127}
3128
3129
3130/*****************************************************************************
3131 *
3132 *		TICKET functions
3133 *
3134 *****************************************************************************
3135 */
3136
3137/*
3138 * Free a used ticket when its refcount falls to zero.
3139 */
3140void
3141xfs_log_ticket_put(
3142	xlog_ticket_t	*ticket)
3143{
3144	ASSERT(atomic_read(&ticket->t_ref) > 0);
3145	if (atomic_dec_and_test(&ticket->t_ref))
3146		kmem_zone_free(xfs_log_ticket_zone, ticket);
3147}
3148
3149xlog_ticket_t *
3150xfs_log_ticket_get(
3151	xlog_ticket_t	*ticket)
3152{
3153	ASSERT(atomic_read(&ticket->t_ref) > 0);
3154	atomic_inc(&ticket->t_ref);
3155	return ticket;
3156}
3157
3158/*
3159 * Allocate and initialise a new log ticket.
3160 */
3161xlog_ticket_t *
3162xlog_ticket_alloc(
3163	struct xlog	*log,
3164	int		unit_bytes,
3165	int		cnt,
3166	char		client,
3167	bool		permanent,
3168	xfs_km_flags_t	alloc_flags)
3169{
3170	struct xlog_ticket *tic;
3171	uint		num_headers;
3172	int		iclog_space;
3173
3174	tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags);
3175	if (!tic)
3176		return NULL;
3177
3178	/*
3179	 * Permanent reservations have up to 'cnt'-1 active log operations
3180	 * in the log.  A unit in this case is the amount of space for one
3181	 * of these log operations.  Normal reservations have a cnt of 1
3182	 * and their unit amount is the total amount of space required.
3183	 *
3184	 * The following lines of code account for non-transaction data
3185	 * which occupy space in the on-disk log.
3186	 *
3187	 * Normal form of a transaction is:
3188	 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3189	 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3190	 *
3191	 * We need to account for all the leadup data and trailer data
3192	 * around the transaction data.
3193	 * And then we need to account for the worst case in terms of using
3194	 * more space.
3195	 * The worst case will happen if:
3196	 * - the placement of the transaction happens to be such that the
3197	 *   roundoff is at its maximum
3198	 * - the transaction data is synced before the commit record is synced
3199	 *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3200	 *   Therefore the commit record is in its own Log Record.
3201	 *   This can happen as the commit record is called with its
3202	 *   own region to xlog_write().
3203	 *   This then means that in the worst case, roundoff can happen for
3204	 *   the commit-rec as well.
3205	 *   The commit-rec is smaller than padding in this scenario and so it is
3206	 *   not added separately.
3207	 */
3208
3209	/* for trans header */
3210	unit_bytes += sizeof(xlog_op_header_t);
3211	unit_bytes += sizeof(xfs_trans_header_t);
3212
3213	/* for start-rec */
3214	unit_bytes += sizeof(xlog_op_header_t);
3215
3216	/*
3217	 * for LR headers - the space for data in an iclog is the size minus
3218	 * the space used for the headers. If we use the iclog size, then we
3219	 * undercalculate the number of headers required.
3220	 *
3221	 * Furthermore - the addition of op headers for split-recs might
3222	 * increase the space required enough to require more log and op
3223	 * headers, so take that into account too.
3224	 *
3225	 * IMPORTANT: This reservation makes the assumption that if this
3226	 * transaction is the first in an iclog and hence has the LR headers
3227	 * accounted to it, then the remaining space in the iclog is
3228	 * exclusively for this transaction.  i.e. if the transaction is larger
3229	 * than the iclog, it will be the only thing in that iclog.
3230	 * Fundamentally, this means we must pass the entire log vector to
3231	 * xlog_write to guarantee this.
3232	 */
3233	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3234	num_headers = howmany(unit_bytes, iclog_space);
3235
3236	/* for split-recs - ophdrs added when data split over LRs */
3237	unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3238
3239	/* add extra header reservations if we overrun */
3240	while (!num_headers ||
3241	       howmany(unit_bytes, iclog_space) > num_headers) {
3242		unit_bytes += sizeof(xlog_op_header_t);
3243		num_headers++;
3244	}
3245	unit_bytes += log->l_iclog_hsize * num_headers;
3246
3247	/* for commit-rec LR header - note: padding will subsume the ophdr */
3248	unit_bytes += log->l_iclog_hsize;
3249
3250	/* for roundoff padding for transaction data and one for commit record */
3251	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
3252	    log->l_mp->m_sb.sb_logsunit > 1) {
3253		/* log su roundoff */
3254		unit_bytes += 2*log->l_mp->m_sb.sb_logsunit;
3255	} else {
3256		/* BB roundoff */
3257		unit_bytes += 2*BBSIZE;
3258        }
3259
3260	atomic_set(&tic->t_ref, 1);
3261	tic->t_task		= current;
3262	INIT_LIST_HEAD(&tic->t_queue);
3263	tic->t_unit_res		= unit_bytes;
3264	tic->t_curr_res		= unit_bytes;
3265	tic->t_cnt		= cnt;
3266	tic->t_ocnt		= cnt;
3267	tic->t_tid		= random32();
3268	tic->t_clientid		= client;
3269	tic->t_flags		= XLOG_TIC_INITED;
3270	tic->t_trans_type	= 0;
3271	if (permanent)
3272		tic->t_flags |= XLOG_TIC_PERM_RESERV;
 
3273
3274	xlog_tic_reset_res(tic);
3275
3276	return tic;
3277}
3278
3279
3280/******************************************************************************
3281 *
3282 *		Log debug routines
3283 *
3284 ******************************************************************************
3285 */
3286#if defined(DEBUG)
3287/*
3288 * Make sure that the destination ptr is within the valid data region of
3289 * one of the iclogs.  This uses backup pointers stored in a different
3290 * part of the log in case we trash the log structure.
3291 */
3292void
3293xlog_verify_dest_ptr(
3294	struct xlog	*log,
3295	char		*ptr)
3296{
3297	int i;
3298	int good_ptr = 0;
3299
3300	for (i = 0; i < log->l_iclog_bufs; i++) {
3301		if (ptr >= log->l_iclog_bak[i] &&
3302		    ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
3303			good_ptr++;
3304	}
3305
3306	if (!good_ptr)
3307		xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3308}
3309
3310/*
3311 * Check to make sure the grant write head didn't just over lap the tail.  If
3312 * the cycles are the same, we can't be overlapping.  Otherwise, make sure that
3313 * the cycles differ by exactly one and check the byte count.
3314 *
3315 * This check is run unlocked, so can give false positives. Rather than assert
3316 * on failures, use a warn-once flag and a panic tag to allow the admin to
3317 * determine if they want to panic the machine when such an error occurs. For
3318 * debug kernels this will have the same effect as using an assert but, unlinke
3319 * an assert, it can be turned off at runtime.
3320 */
3321STATIC void
3322xlog_verify_grant_tail(
3323	struct xlog	*log)
3324{
3325	int		tail_cycle, tail_blocks;
3326	int		cycle, space;
3327
3328	xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3329	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3330	if (tail_cycle != cycle) {
3331		if (cycle - 1 != tail_cycle &&
3332		    !(log->l_flags & XLOG_TAIL_WARN)) {
3333			xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3334				"%s: cycle - 1 != tail_cycle", __func__);
3335			log->l_flags |= XLOG_TAIL_WARN;
3336		}
3337
3338		if (space > BBTOB(tail_blocks) &&
3339		    !(log->l_flags & XLOG_TAIL_WARN)) {
3340			xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3341				"%s: space > BBTOB(tail_blocks)", __func__);
3342			log->l_flags |= XLOG_TAIL_WARN;
3343		}
3344	}
3345}
3346
3347/* check if it will fit */
3348STATIC void
3349xlog_verify_tail_lsn(xlog_t	    *log,
3350		     xlog_in_core_t *iclog,
3351		     xfs_lsn_t	    tail_lsn)
3352{
3353    int blocks;
3354
3355    if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3356	blocks =
3357	    log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3358	if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3359		xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3360    } else {
3361	ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3362
3363	if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3364		xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3365
3366	blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3367	if (blocks < BTOBB(iclog->ic_offset) + 1)
3368		xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3369    }
3370}	/* xlog_verify_tail_lsn */
3371
3372/*
3373 * Perform a number of checks on the iclog before writing to disk.
3374 *
3375 * 1. Make sure the iclogs are still circular
3376 * 2. Make sure we have a good magic number
3377 * 3. Make sure we don't have magic numbers in the data
3378 * 4. Check fields of each log operation header for:
3379 *	A. Valid client identifier
3380 *	B. tid ptr value falls in valid ptr space (user space code)
3381 *	C. Length in log record header is correct according to the
3382 *		individual operation headers within record.
3383 * 5. When a bwrite will occur within 5 blocks of the front of the physical
3384 *	log, check the preceding blocks of the physical log to make sure all
3385 *	the cycle numbers agree with the current cycle number.
3386 */
3387STATIC void
3388xlog_verify_iclog(xlog_t	 *log,
3389		  xlog_in_core_t *iclog,
3390		  int		 count,
3391		  boolean_t	 syncing)
3392{
3393	xlog_op_header_t	*ophead;
3394	xlog_in_core_t		*icptr;
3395	xlog_in_core_2_t	*xhdr;
3396	xfs_caddr_t		ptr;
3397	xfs_caddr_t		base_ptr;
3398	__psint_t		field_offset;
3399	__uint8_t		clientid;
3400	int			len, i, j, k, op_len;
3401	int			idx;
3402
3403	/* check validity of iclog pointers */
3404	spin_lock(&log->l_icloglock);
3405	icptr = log->l_iclog;
3406	for (i=0; i < log->l_iclog_bufs; i++) {
3407		if (icptr == NULL)
3408			xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3409		icptr = icptr->ic_next;
3410	}
3411	if (icptr != log->l_iclog)
3412		xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3413	spin_unlock(&log->l_icloglock);
3414
3415	/* check log magic numbers */
3416	if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3417		xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3418
3419	ptr = (xfs_caddr_t) &iclog->ic_header;
3420	for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
3421	     ptr += BBSIZE) {
3422		if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3423			xfs_emerg(log->l_mp, "%s: unexpected magic num",
3424				__func__);
3425	}
3426
3427	/* check fields */
3428	len = be32_to_cpu(iclog->ic_header.h_num_logops);
3429	ptr = iclog->ic_datap;
3430	base_ptr = ptr;
3431	ophead = (xlog_op_header_t *)ptr;
3432	xhdr = iclog->ic_data;
3433	for (i = 0; i < len; i++) {
3434		ophead = (xlog_op_header_t *)ptr;
3435
3436		/* clientid is only 1 byte */
3437		field_offset = (__psint_t)
3438			       ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr);
3439		if (syncing == B_FALSE || (field_offset & 0x1ff)) {
3440			clientid = ophead->oh_clientid;
3441		} else {
3442			idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap);
3443			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3444				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3445				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3446				clientid = xlog_get_client_id(
3447					xhdr[j].hic_xheader.xh_cycle_data[k]);
3448			} else {
3449				clientid = xlog_get_client_id(
3450					iclog->ic_header.h_cycle_data[idx]);
3451			}
3452		}
3453		if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
3454			xfs_warn(log->l_mp,
3455				"%s: invalid clientid %d op 0x%p offset 0x%lx",
3456				__func__, clientid, ophead,
3457				(unsigned long)field_offset);
3458
3459		/* check length */
3460		field_offset = (__psint_t)
3461			       ((xfs_caddr_t)&(ophead->oh_len) - base_ptr);
3462		if (syncing == B_FALSE || (field_offset & 0x1ff)) {
3463			op_len = be32_to_cpu(ophead->oh_len);
3464		} else {
3465			idx = BTOBBT((__psint_t)&ophead->oh_len -
3466				    (__psint_t)iclog->ic_datap);
3467			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3468				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3469				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3470				op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3471			} else {
3472				op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3473			}
3474		}
3475		ptr += sizeof(xlog_op_header_t) + op_len;
3476	}
3477}	/* xlog_verify_iclog */
3478#endif
3479
3480/*
3481 * Mark all iclogs IOERROR. l_icloglock is held by the caller.
3482 */
3483STATIC int
3484xlog_state_ioerror(
3485	xlog_t	*log)
3486{
3487	xlog_in_core_t	*iclog, *ic;
3488
3489	iclog = log->l_iclog;
3490	if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
3491		/*
3492		 * Mark all the incore logs IOERROR.
3493		 * From now on, no log flushes will result.
3494		 */
3495		ic = iclog;
3496		do {
3497			ic->ic_state = XLOG_STATE_IOERROR;
3498			ic = ic->ic_next;
3499		} while (ic != iclog);
3500		return 0;
3501	}
3502	/*
3503	 * Return non-zero, if state transition has already happened.
3504	 */
3505	return 1;
3506}
3507
3508/*
3509 * This is called from xfs_force_shutdown, when we're forcibly
3510 * shutting down the filesystem, typically because of an IO error.
3511 * Our main objectives here are to make sure that:
3512 *	a. the filesystem gets marked 'SHUTDOWN' for all interested
3513 *	   parties to find out, 'atomically'.
3514 *	b. those who're sleeping on log reservations, pinned objects and
3515 *	    other resources get woken up, and be told the bad news.
3516 *	c. nothing new gets queued up after (a) and (b) are done.
3517 *	d. if !logerror, flush the iclogs to disk, then seal them off
3518 *	   for business.
3519 *
3520 * Note: for delayed logging the !logerror case needs to flush the regions
3521 * held in memory out to the iclogs before flushing them to disk. This needs
3522 * to be done before the log is marked as shutdown, otherwise the flush to the
3523 * iclogs will fail.
3524 */
3525int
3526xfs_log_force_umount(
3527	struct xfs_mount	*mp,
3528	int			logerror)
3529{
 
3530	xlog_t		*log;
3531	int		retval;
3532
3533	log = mp->m_log;
3534
3535	/*
3536	 * If this happens during log recovery, don't worry about
3537	 * locking; the log isn't open for business yet.
3538	 */
3539	if (!log ||
3540	    log->l_flags & XLOG_ACTIVE_RECOVERY) {
3541		mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3542		if (mp->m_sb_bp)
3543			XFS_BUF_DONE(mp->m_sb_bp);
3544		return 0;
3545	}
3546
3547	/*
3548	 * Somebody could've already done the hard work for us.
3549	 * No need to get locks for this.
3550	 */
3551	if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
3552		ASSERT(XLOG_FORCED_SHUTDOWN(log));
3553		return 1;
3554	}
3555	retval = 0;
3556
3557	/*
3558	 * Flush the in memory commit item list before marking the log as
3559	 * being shut down. We need to do it in this order to ensure all the
3560	 * completed transactions are flushed to disk with the xfs_log_force()
3561	 * call below.
3562	 */
3563	if (!logerror)
3564		xlog_cil_force(log);
3565
3566	/*
3567	 * mark the filesystem and the as in a shutdown state and wake
3568	 * everybody up to tell them the bad news.
3569	 */
3570	spin_lock(&log->l_icloglock);
3571	mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3572	if (mp->m_sb_bp)
3573		XFS_BUF_DONE(mp->m_sb_bp);
3574
3575	/*
3576	 * This flag is sort of redundant because of the mount flag, but
3577	 * it's good to maintain the separation between the log and the rest
3578	 * of XFS.
3579	 */
3580	log->l_flags |= XLOG_IO_ERROR;
3581
3582	/*
3583	 * If we hit a log error, we want to mark all the iclogs IOERROR
3584	 * while we're still holding the loglock.
3585	 */
3586	if (logerror)
3587		retval = xlog_state_ioerror(log);
3588	spin_unlock(&log->l_icloglock);
3589
3590	/*
3591	 * We don't want anybody waiting for log reservations after this. That
3592	 * means we have to wake up everybody queued up on reserveq as well as
3593	 * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
3594	 * we don't enqueue anything once the SHUTDOWN flag is set, and this
3595	 * action is protected by the grant locks.
3596	 */
3597	xlog_grant_head_wake_all(&log->l_reserve_head);
3598	xlog_grant_head_wake_all(&log->l_write_head);
 
 
 
 
 
 
 
3599
3600	if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
3601		ASSERT(!logerror);
3602		/*
3603		 * Force the incore logs to disk before shutting the
3604		 * log down completely.
3605		 */
3606		_xfs_log_force(mp, XFS_LOG_SYNC, NULL);
3607
3608		spin_lock(&log->l_icloglock);
3609		retval = xlog_state_ioerror(log);
3610		spin_unlock(&log->l_icloglock);
3611	}
3612	/*
3613	 * Wake up everybody waiting on xfs_log_force.
3614	 * Callback all log item committed functions as if the
3615	 * log writes were completed.
3616	 */
3617	xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
3618
3619#ifdef XFSERRORDEBUG
3620	{
3621		xlog_in_core_t	*iclog;
3622
3623		spin_lock(&log->l_icloglock);
3624		iclog = log->l_iclog;
3625		do {
3626			ASSERT(iclog->ic_callback == 0);
3627			iclog = iclog->ic_next;
3628		} while (iclog != log->l_iclog);
3629		spin_unlock(&log->l_icloglock);
3630	}
3631#endif
3632	/* return non-zero if log IOERROR transition had already happened */
3633	return retval;
3634}
3635
3636STATIC int
3637xlog_iclogs_empty(xlog_t *log)
3638{
3639	xlog_in_core_t	*iclog;
3640
3641	iclog = log->l_iclog;
3642	do {
3643		/* endianness does not matter here, zero is zero in
3644		 * any language.
3645		 */
3646		if (iclog->ic_header.h_num_logops)
3647			return 0;
3648		iclog = iclog->ic_next;
3649	} while (iclog != log->l_iclog);
3650	return 1;
3651}
v3.1
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_bit.h"
  22#include "xfs_log.h"
  23#include "xfs_inum.h"
  24#include "xfs_trans.h"
  25#include "xfs_sb.h"
  26#include "xfs_ag.h"
  27#include "xfs_mount.h"
  28#include "xfs_error.h"
  29#include "xfs_log_priv.h"
  30#include "xfs_buf_item.h"
  31#include "xfs_bmap_btree.h"
  32#include "xfs_alloc_btree.h"
  33#include "xfs_ialloc_btree.h"
  34#include "xfs_log_recover.h"
  35#include "xfs_trans_priv.h"
  36#include "xfs_dinode.h"
  37#include "xfs_inode.h"
  38#include "xfs_rw.h"
  39#include "xfs_trace.h"
  40
  41kmem_zone_t	*xfs_log_ticket_zone;
  42
  43/* Local miscellaneous function prototypes */
  44STATIC int	 xlog_commit_record(struct log *log, struct xlog_ticket *ticket,
  45				    xlog_in_core_t **, xfs_lsn_t *);
 
 
 
 
 
  46STATIC xlog_t *  xlog_alloc_log(xfs_mount_t	*mp,
  47				xfs_buftarg_t	*log_target,
  48				xfs_daddr_t	blk_offset,
  49				int		num_bblks);
  50STATIC int	 xlog_space_left(struct log *log, atomic64_t *head);
 
 
 
  51STATIC int	 xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
  52STATIC void	 xlog_dealloc_log(xlog_t *log);
  53
  54/* local state machine functions */
  55STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
  56STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog);
  57STATIC int  xlog_state_get_iclog_space(xlog_t		*log,
  58				       int		len,
  59				       xlog_in_core_t	**iclog,
  60				       xlog_ticket_t	*ticket,
  61				       int		*continued_write,
  62				       int		*logoffsetp);
  63STATIC int  xlog_state_release_iclog(xlog_t		*log,
  64				     xlog_in_core_t	*iclog);
  65STATIC void xlog_state_switch_iclogs(xlog_t		*log,
  66				     xlog_in_core_t *iclog,
  67				     int		eventual_size);
  68STATIC void xlog_state_want_sync(xlog_t	*log, xlog_in_core_t *iclog);
  69
  70/* local functions to manipulate grant head */
  71STATIC int  xlog_grant_log_space(xlog_t		*log,
  72				 xlog_ticket_t	*xtic);
  73STATIC void xlog_grant_push_ail(struct log	*log,
  74				int		need_bytes);
  75STATIC void xlog_regrant_reserve_log_space(xlog_t	 *log,
  76					   xlog_ticket_t *ticket);
  77STATIC int xlog_regrant_write_log_space(xlog_t		*log,
  78					 xlog_ticket_t  *ticket);
  79STATIC void xlog_ungrant_log_space(xlog_t	 *log,
  80				   xlog_ticket_t *ticket);
  81
  82#if defined(DEBUG)
  83STATIC void	xlog_verify_dest_ptr(xlog_t *log, char *ptr);
  84STATIC void	xlog_verify_grant_tail(struct log *log);
 
 
  85STATIC void	xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
  86				  int count, boolean_t syncing);
  87STATIC void	xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
  88				     xfs_lsn_t tail_lsn);
  89#else
  90#define xlog_verify_dest_ptr(a,b)
  91#define xlog_verify_grant_tail(a)
  92#define xlog_verify_iclog(a,b,c,d)
  93#define xlog_verify_tail_lsn(a,b,c)
  94#endif
  95
  96STATIC int	xlog_iclogs_empty(xlog_t *log);
  97
  98static void
  99xlog_grant_sub_space(
 100	struct log	*log,
 101	atomic64_t	*head,
 102	int		bytes)
 103{
 104	int64_t	head_val = atomic64_read(head);
 105	int64_t new, old;
 106
 107	do {
 108		int	cycle, space;
 109
 110		xlog_crack_grant_head_val(head_val, &cycle, &space);
 111
 112		space -= bytes;
 113		if (space < 0) {
 114			space += log->l_logsize;
 115			cycle--;
 116		}
 117
 118		old = head_val;
 119		new = xlog_assign_grant_head_val(cycle, space);
 120		head_val = atomic64_cmpxchg(head, old, new);
 121	} while (head_val != old);
 122}
 123
 124static void
 125xlog_grant_add_space(
 126	struct log	*log,
 127	atomic64_t	*head,
 128	int		bytes)
 129{
 130	int64_t	head_val = atomic64_read(head);
 131	int64_t new, old;
 132
 133	do {
 134		int		tmp;
 135		int		cycle, space;
 136
 137		xlog_crack_grant_head_val(head_val, &cycle, &space);
 138
 139		tmp = log->l_logsize - space;
 140		if (tmp > bytes)
 141			space += bytes;
 142		else {
 143			space = bytes - tmp;
 144			cycle++;
 145		}
 146
 147		old = head_val;
 148		new = xlog_assign_grant_head_val(cycle, space);
 149		head_val = atomic64_cmpxchg(head, old, new);
 150	} while (head_val != old);
 151}
 152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153static void
 154xlog_tic_reset_res(xlog_ticket_t *tic)
 155{
 156	tic->t_res_num = 0;
 157	tic->t_res_arr_sum = 0;
 158	tic->t_res_num_ophdrs = 0;
 159}
 160
 161static void
 162xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)
 163{
 164	if (tic->t_res_num == XLOG_TIC_LEN_MAX) {
 165		/* add to overflow and start again */
 166		tic->t_res_o_flow += tic->t_res_arr_sum;
 167		tic->t_res_num = 0;
 168		tic->t_res_arr_sum = 0;
 169	}
 170
 171	tic->t_res_arr[tic->t_res_num].r_len = len;
 172	tic->t_res_arr[tic->t_res_num].r_type = type;
 173	tic->t_res_arr_sum += len;
 174	tic->t_res_num++;
 175}
 176
 177/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178 * NOTES:
 179 *
 180 *	1. currblock field gets updated at startup and after in-core logs
 181 *		marked as with WANT_SYNC.
 182 */
 183
 184/*
 185 * This routine is called when a user of a log manager ticket is done with
 186 * the reservation.  If the ticket was ever used, then a commit record for
 187 * the associated transaction is written out as a log operation header with
 188 * no data.  The flag XLOG_TIC_INITED is set when the first write occurs with
 189 * a given ticket.  If the ticket was one with a permanent reservation, then
 190 * a few operations are done differently.  Permanent reservation tickets by
 191 * default don't release the reservation.  They just commit the current
 192 * transaction with the belief that the reservation is still needed.  A flag
 193 * must be passed in before permanent reservations are actually released.
 194 * When these type of tickets are not released, they need to be set into
 195 * the inited state again.  By doing this, a start record will be written
 196 * out when the next write occurs.
 197 */
 198xfs_lsn_t
 199xfs_log_done(
 200	struct xfs_mount	*mp,
 201	struct xlog_ticket	*ticket,
 202	struct xlog_in_core	**iclog,
 203	uint			flags)
 204{
 205	struct log		*log = mp->m_log;
 206	xfs_lsn_t		lsn = 0;
 207
 208	if (XLOG_FORCED_SHUTDOWN(log) ||
 209	    /*
 210	     * If nothing was ever written, don't write out commit record.
 211	     * If we get an error, just continue and give back the log ticket.
 212	     */
 213	    (((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
 214	     (xlog_commit_record(log, ticket, iclog, &lsn)))) {
 215		lsn = (xfs_lsn_t) -1;
 216		if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
 217			flags |= XFS_LOG_REL_PERM_RESERV;
 218		}
 219	}
 220
 221
 222	if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
 223	    (flags & XFS_LOG_REL_PERM_RESERV)) {
 224		trace_xfs_log_done_nonperm(log, ticket);
 225
 226		/*
 227		 * Release ticket if not permanent reservation or a specific
 228		 * request has been made to release a permanent reservation.
 229		 */
 230		xlog_ungrant_log_space(log, ticket);
 231		xfs_log_ticket_put(ticket);
 232	} else {
 233		trace_xfs_log_done_perm(log, ticket);
 234
 235		xlog_regrant_reserve_log_space(log, ticket);
 236		/* If this ticket was a permanent reservation and we aren't
 237		 * trying to release it, reset the inited flags; so next time
 238		 * we write, a start record will be written out.
 239		 */
 240		ticket->t_flags |= XLOG_TIC_INITED;
 241	}
 242
 243	return lsn;
 244}
 245
 246/*
 247 * Attaches a new iclog I/O completion callback routine during
 248 * transaction commit.  If the log is in error state, a non-zero
 249 * return code is handed back and the caller is responsible for
 250 * executing the callback at an appropriate time.
 251 */
 252int
 253xfs_log_notify(
 254	struct xfs_mount	*mp,
 255	struct xlog_in_core	*iclog,
 256	xfs_log_callback_t	*cb)
 257{
 258	int	abortflg;
 259
 260	spin_lock(&iclog->ic_callback_lock);
 261	abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
 262	if (!abortflg) {
 263		ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
 264			      (iclog->ic_state == XLOG_STATE_WANT_SYNC));
 265		cb->cb_next = NULL;
 266		*(iclog->ic_callback_tail) = cb;
 267		iclog->ic_callback_tail = &(cb->cb_next);
 268	}
 269	spin_unlock(&iclog->ic_callback_lock);
 270	return abortflg;
 271}
 272
 273int
 274xfs_log_release_iclog(
 275	struct xfs_mount	*mp,
 276	struct xlog_in_core	*iclog)
 277{
 278	if (xlog_state_release_iclog(mp->m_log, iclog)) {
 279		xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
 280		return EIO;
 281	}
 282
 283	return 0;
 284}
 285
 286/*
 287 *  1. Reserve an amount of on-disk log space and return a ticket corresponding
 288 *	to the reservation.
 289 *  2. Potentially, push buffers at tail of log to disk.
 290 *
 291 * Each reservation is going to reserve extra space for a log record header.
 292 * When writes happen to the on-disk log, we don't subtract the length of the
 293 * log record header from any reservation.  By wasting space in each
 294 * reservation, we prevent over allocation problems.
 295 */
 296int
 297xfs_log_reserve(
 298	struct xfs_mount	*mp,
 299	int		 	unit_bytes,
 300	int		 	cnt,
 301	struct xlog_ticket	**ticket,
 302	__uint8_t	 	client,
 303	uint		 	flags,
 304	uint		 	t_type)
 305{
 306	struct log		*log = mp->m_log;
 307	struct xlog_ticket	*internal_ticket;
 308	int			retval = 0;
 309
 310	ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
 311
 312	if (XLOG_FORCED_SHUTDOWN(log))
 313		return XFS_ERROR(EIO);
 314
 315	XFS_STATS_INC(xs_try_logspace);
 316
 317
 318	if (*ticket != NULL) {
 319		ASSERT(flags & XFS_LOG_PERM_RESERV);
 320		internal_ticket = *ticket;
 321
 322		/*
 323		 * this is a new transaction on the ticket, so we need to
 324		 * change the transaction ID so that the next transaction has a
 325		 * different TID in the log. Just add one to the existing tid
 326		 * so that we can see chains of rolling transactions in the log
 327		 * easily.
 328		 */
 329		internal_ticket->t_tid++;
 330
 331		trace_xfs_log_reserve(log, internal_ticket);
 332
 333		xlog_grant_push_ail(log, internal_ticket->t_unit_res);
 334		retval = xlog_regrant_write_log_space(log, internal_ticket);
 335	} else {
 336		/* may sleep if need to allocate more tickets */
 337		internal_ticket = xlog_ticket_alloc(log, unit_bytes, cnt,
 338						  client, flags,
 339						  KM_SLEEP|KM_MAYFAIL);
 340		if (!internal_ticket)
 341			return XFS_ERROR(ENOMEM);
 342		internal_ticket->t_trans_type = t_type;
 343		*ticket = internal_ticket;
 344
 345		trace_xfs_log_reserve(log, internal_ticket);
 346
 347		xlog_grant_push_ail(log,
 348				    (internal_ticket->t_unit_res *
 349				     internal_ticket->t_cnt));
 350		retval = xlog_grant_log_space(log, internal_ticket);
 351	}
 352
 353	return retval;
 354}	/* xfs_log_reserve */
 355
 356
 357/*
 358 * Mount a log filesystem
 359 *
 360 * mp		- ubiquitous xfs mount point structure
 361 * log_target	- buftarg of on-disk log device
 362 * blk_offset	- Start block # where block size is 512 bytes (BBSIZE)
 363 * num_bblocks	- Number of BBSIZE blocks in on-disk log
 364 *
 365 * Return error or zero.
 366 */
 367int
 368xfs_log_mount(
 369	xfs_mount_t	*mp,
 370	xfs_buftarg_t	*log_target,
 371	xfs_daddr_t	blk_offset,
 372	int		num_bblks)
 373{
 374	int		error;
 375
 376	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
 377		xfs_notice(mp, "Mounting Filesystem");
 378	else {
 379		xfs_notice(mp,
 380"Mounting filesystem in no-recovery mode.  Filesystem will be inconsistent.");
 381		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 382	}
 383
 384	mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
 385	if (IS_ERR(mp->m_log)) {
 386		error = -PTR_ERR(mp->m_log);
 387		goto out;
 388	}
 389
 390	/*
 391	 * Initialize the AIL now we have a log.
 392	 */
 393	error = xfs_trans_ail_init(mp);
 394	if (error) {
 395		xfs_warn(mp, "AIL initialisation failed: error %d", error);
 396		goto out_free_log;
 397	}
 398	mp->m_log->l_ailp = mp->m_ail;
 399
 400	/*
 401	 * skip log recovery on a norecovery mount.  pretend it all
 402	 * just worked.
 403	 */
 404	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
 405		int	readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
 406
 407		if (readonly)
 408			mp->m_flags &= ~XFS_MOUNT_RDONLY;
 409
 410		error = xlog_recover(mp->m_log);
 411
 412		if (readonly)
 413			mp->m_flags |= XFS_MOUNT_RDONLY;
 414		if (error) {
 415			xfs_warn(mp, "log mount/recovery failed: error %d",
 416				error);
 417			goto out_destroy_ail;
 418		}
 419	}
 420
 421	/* Normal transactions can now occur */
 422	mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
 423
 424	/*
 425	 * Now the log has been fully initialised and we know were our
 426	 * space grant counters are, we can initialise the permanent ticket
 427	 * needed for delayed logging to work.
 428	 */
 429	xlog_cil_init_post_recovery(mp->m_log);
 430
 431	return 0;
 432
 433out_destroy_ail:
 434	xfs_trans_ail_destroy(mp);
 435out_free_log:
 436	xlog_dealloc_log(mp->m_log);
 437out:
 438	return error;
 439}
 440
 441/*
 442 * Finish the recovery of the file system.  This is separate from
 443 * the xfs_log_mount() call, because it depends on the code in
 444 * xfs_mountfs() to read in the root and real-time bitmap inodes
 445 * between calling xfs_log_mount() and here.
 446 *
 447 * mp		- ubiquitous xfs mount point structure
 448 */
 449int
 450xfs_log_mount_finish(xfs_mount_t *mp)
 451{
 452	int	error;
 453
 454	if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
 455		error = xlog_recover_finish(mp->m_log);
 456	else {
 457		error = 0;
 458		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 459	}
 460
 461	return error;
 462}
 463
 464/*
 465 * Final log writes as part of unmount.
 466 *
 467 * Mark the filesystem clean as unmount happens.  Note that during relocation
 468 * this routine needs to be executed as part of source-bag while the
 469 * deallocation must not be done until source-end.
 470 */
 471
 472/*
 473 * Unmount record used to have a string "Unmount filesystem--" in the
 474 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
 475 * We just write the magic number now since that particular field isn't
 476 * currently architecture converted and "nUmount" is a bit foo.
 477 * As far as I know, there weren't any dependencies on the old behaviour.
 478 */
 479
 480int
 481xfs_log_unmount_write(xfs_mount_t *mp)
 482{
 483	xlog_t		 *log = mp->m_log;
 484	xlog_in_core_t	 *iclog;
 485#ifdef DEBUG
 486	xlog_in_core_t	 *first_iclog;
 487#endif
 488	xlog_ticket_t	*tic = NULL;
 489	xfs_lsn_t	 lsn;
 490	int		 error;
 491
 492	/*
 493	 * Don't write out unmount record on read-only mounts.
 494	 * Or, if we are doing a forced umount (typically because of IO errors).
 495	 */
 496	if (mp->m_flags & XFS_MOUNT_RDONLY)
 497		return 0;
 498
 499	error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
 500	ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
 501
 502#ifdef DEBUG
 503	first_iclog = iclog = log->l_iclog;
 504	do {
 505		if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
 506			ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
 507			ASSERT(iclog->ic_offset == 0);
 508		}
 509		iclog = iclog->ic_next;
 510	} while (iclog != first_iclog);
 511#endif
 512	if (! (XLOG_FORCED_SHUTDOWN(log))) {
 513		error = xfs_log_reserve(mp, 600, 1, &tic,
 514					XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
 515		if (!error) {
 516			/* the data section must be 32 bit size aligned */
 517			struct {
 518			    __uint16_t magic;
 519			    __uint16_t pad1;
 520			    __uint32_t pad2; /* may as well make it 64 bits */
 521			} magic = {
 522				.magic = XLOG_UNMOUNT_TYPE,
 523			};
 524			struct xfs_log_iovec reg = {
 525				.i_addr = &magic,
 526				.i_len = sizeof(magic),
 527				.i_type = XLOG_REG_TYPE_UNMOUNT,
 528			};
 529			struct xfs_log_vec vec = {
 530				.lv_niovecs = 1,
 531				.lv_iovecp = &reg,
 532			};
 533
 534			/* remove inited flag */
 535			tic->t_flags = 0;
 
 536			error = xlog_write(log, &vec, tic, &lsn,
 537					   NULL, XLOG_UNMOUNT_TRANS);
 538			/*
 539			 * At this point, we're umounting anyway,
 540			 * so there's no point in transitioning log state
 541			 * to IOERROR. Just continue...
 542			 */
 543		}
 544
 545		if (error)
 546			xfs_alert(mp, "%s: unmount record failed", __func__);
 547
 548
 549		spin_lock(&log->l_icloglock);
 550		iclog = log->l_iclog;
 551		atomic_inc(&iclog->ic_refcnt);
 552		xlog_state_want_sync(log, iclog);
 553		spin_unlock(&log->l_icloglock);
 554		error = xlog_state_release_iclog(log, iclog);
 555
 556		spin_lock(&log->l_icloglock);
 557		if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
 558		      iclog->ic_state == XLOG_STATE_DIRTY)) {
 559			if (!XLOG_FORCED_SHUTDOWN(log)) {
 560				xlog_wait(&iclog->ic_force_wait,
 561							&log->l_icloglock);
 562			} else {
 563				spin_unlock(&log->l_icloglock);
 564			}
 565		} else {
 566			spin_unlock(&log->l_icloglock);
 567		}
 568		if (tic) {
 569			trace_xfs_log_umount_write(log, tic);
 570			xlog_ungrant_log_space(log, tic);
 571			xfs_log_ticket_put(tic);
 572		}
 573	} else {
 574		/*
 575		 * We're already in forced_shutdown mode, couldn't
 576		 * even attempt to write out the unmount transaction.
 577		 *
 578		 * Go through the motions of sync'ing and releasing
 579		 * the iclog, even though no I/O will actually happen,
 580		 * we need to wait for other log I/Os that may already
 581		 * be in progress.  Do this as a separate section of
 582		 * code so we'll know if we ever get stuck here that
 583		 * we're in this odd situation of trying to unmount
 584		 * a file system that went into forced_shutdown as
 585		 * the result of an unmount..
 586		 */
 587		spin_lock(&log->l_icloglock);
 588		iclog = log->l_iclog;
 589		atomic_inc(&iclog->ic_refcnt);
 590
 591		xlog_state_want_sync(log, iclog);
 592		spin_unlock(&log->l_icloglock);
 593		error =  xlog_state_release_iclog(log, iclog);
 594
 595		spin_lock(&log->l_icloglock);
 596
 597		if ( ! (   iclog->ic_state == XLOG_STATE_ACTIVE
 598			|| iclog->ic_state == XLOG_STATE_DIRTY
 599			|| iclog->ic_state == XLOG_STATE_IOERROR) ) {
 600
 601				xlog_wait(&iclog->ic_force_wait,
 602							&log->l_icloglock);
 603		} else {
 604			spin_unlock(&log->l_icloglock);
 605		}
 606	}
 607
 608	return error;
 609}	/* xfs_log_unmount_write */
 610
 611/*
 612 * Deallocate log structures for unmount/relocation.
 613 *
 614 * We need to stop the aild from running before we destroy
 615 * and deallocate the log as the aild references the log.
 616 */
 617void
 618xfs_log_unmount(xfs_mount_t *mp)
 619{
 
 620	xfs_trans_ail_destroy(mp);
 621	xlog_dealloc_log(mp->m_log);
 622}
 623
 624void
 625xfs_log_item_init(
 626	struct xfs_mount	*mp,
 627	struct xfs_log_item	*item,
 628	int			type,
 629	struct xfs_item_ops	*ops)
 630{
 631	item->li_mountp = mp;
 632	item->li_ailp = mp->m_ail;
 633	item->li_type = type;
 634	item->li_ops = ops;
 635	item->li_lv = NULL;
 636
 637	INIT_LIST_HEAD(&item->li_ail);
 638	INIT_LIST_HEAD(&item->li_cil);
 639}
 640
 641/*
 642 * Write region vectors to log.  The write happens using the space reservation
 643 * of the ticket (tic).  It is not a requirement that all writes for a given
 644 * transaction occur with one call to xfs_log_write(). However, it is important
 645 * to note that the transaction reservation code makes an assumption about the
 646 * number of log headers a transaction requires that may be violated if you
 647 * don't pass all the transaction vectors in one call....
 648 */
 649int
 650xfs_log_write(
 651	struct xfs_mount	*mp,
 652	struct xfs_log_iovec	reg[],
 653	int			nentries,
 654	struct xlog_ticket	*tic,
 655	xfs_lsn_t		*start_lsn)
 656{
 657	struct log		*log = mp->m_log;
 658	int			error;
 659	struct xfs_log_vec	vec = {
 660		.lv_niovecs = nentries,
 661		.lv_iovecp = reg,
 662	};
 663
 664	if (XLOG_FORCED_SHUTDOWN(log))
 665		return XFS_ERROR(EIO);
 666
 667	error = xlog_write(log, &vec, tic, start_lsn, NULL, 0);
 668	if (error)
 669		xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
 670	return error;
 671}
 672
 673void
 674xfs_log_move_tail(xfs_mount_t	*mp,
 675		  xfs_lsn_t	tail_lsn)
 676{
 677	xlog_ticket_t	*tic;
 678	xlog_t		*log = mp->m_log;
 679	int		need_bytes, free_bytes;
 680
 681	if (XLOG_FORCED_SHUTDOWN(log))
 682		return;
 683
 684	if (tail_lsn == 0)
 685		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 686
 687	/* tail_lsn == 1 implies that we weren't passed a valid value.  */
 688	if (tail_lsn != 1)
 689		atomic64_set(&log->l_tail_lsn, tail_lsn);
 690
 691	if (!list_empty_careful(&log->l_writeq)) {
 692#ifdef DEBUG
 693		if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 694			panic("Recovery problem");
 695#endif
 696		spin_lock(&log->l_grant_write_lock);
 697		free_bytes = xlog_space_left(log, &log->l_grant_write_head);
 698		list_for_each_entry(tic, &log->l_writeq, t_queue) {
 699			ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
 700
 701			if (free_bytes < tic->t_unit_res && tail_lsn != 1)
 702				break;
 703			tail_lsn = 0;
 704			free_bytes -= tic->t_unit_res;
 705			trace_xfs_log_regrant_write_wake_up(log, tic);
 706			wake_up(&tic->t_wait);
 707		}
 708		spin_unlock(&log->l_grant_write_lock);
 709	}
 710
 711	if (!list_empty_careful(&log->l_reserveq)) {
 712#ifdef DEBUG
 713		if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 714			panic("Recovery problem");
 715#endif
 716		spin_lock(&log->l_grant_reserve_lock);
 717		free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
 718		list_for_each_entry(tic, &log->l_reserveq, t_queue) {
 719			if (tic->t_flags & XLOG_TIC_PERM_RESERV)
 720				need_bytes = tic->t_unit_res*tic->t_cnt;
 721			else
 722				need_bytes = tic->t_unit_res;
 723			if (free_bytes < need_bytes && tail_lsn != 1)
 724				break;
 725			tail_lsn = 0;
 726			free_bytes -= need_bytes;
 727			trace_xfs_log_grant_wake_up(log, tic);
 728			wake_up(&tic->t_wait);
 729		}
 730		spin_unlock(&log->l_grant_reserve_lock);
 731	}
 732}
 733
 734/*
 735 * Determine if we have a transaction that has gone to disk
 736 * that needs to be covered. To begin the transition to the idle state
 737 * firstly the log needs to be idle (no AIL and nothing in the iclogs).
 738 * If we are then in a state where covering is needed, the caller is informed
 739 * that dummy transactions are required to move the log into the idle state.
 740 *
 741 * Because this is called as part of the sync process, we should also indicate
 742 * that dummy transactions should be issued in anything but the covered or
 743 * idle states. This ensures that the log tail is accurately reflected in
 744 * the log at the end of the sync, hence if a crash occurrs avoids replay
 745 * of transactions where the metadata is already on disk.
 746 */
 747int
 748xfs_log_need_covered(xfs_mount_t *mp)
 749{
 750	int		needed = 0;
 751	xlog_t		*log = mp->m_log;
 752
 753	if (!xfs_fs_writable(mp))
 754		return 0;
 755
 756	spin_lock(&log->l_icloglock);
 757	switch (log->l_covered_state) {
 758	case XLOG_STATE_COVER_DONE:
 759	case XLOG_STATE_COVER_DONE2:
 760	case XLOG_STATE_COVER_IDLE:
 761		break;
 762	case XLOG_STATE_COVER_NEED:
 763	case XLOG_STATE_COVER_NEED2:
 764		if (!xfs_ail_min_lsn(log->l_ailp) &&
 765		    xlog_iclogs_empty(log)) {
 766			if (log->l_covered_state == XLOG_STATE_COVER_NEED)
 767				log->l_covered_state = XLOG_STATE_COVER_DONE;
 768			else
 769				log->l_covered_state = XLOG_STATE_COVER_DONE2;
 770		}
 771		/* FALLTHRU */
 772	default:
 773		needed = 1;
 774		break;
 775	}
 776	spin_unlock(&log->l_icloglock);
 777	return needed;
 778}
 779
 780/******************************************************************************
 781 *
 782 *	local routines
 783 *
 784 ******************************************************************************
 785 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 786
 787/* xfs_trans_tail_ail returns 0 when there is nothing in the list.
 788 * The log manager must keep track of the last LR which was committed
 789 * to disk.  The lsn of this LR will become the new tail_lsn whenever
 790 * xfs_trans_tail_ail returns 0.  If we don't do this, we run into
 791 * the situation where stuff could be written into the log but nothing
 792 * was ever in the AIL when asked.  Eventually, we panic since the
 793 * tail hits the head.
 794 *
 795 * We may be holding the log iclog lock upon entering this routine.
 796 */
 797xfs_lsn_t
 798xlog_assign_tail_lsn(
 799	struct xfs_mount	*mp)
 800{
 801	xfs_lsn_t		tail_lsn;
 802	struct log		*log = mp->m_log;
 803
 804	tail_lsn = xfs_ail_min_lsn(mp->m_ail);
 805	if (!tail_lsn)
 806		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 807
 808	atomic64_set(&log->l_tail_lsn, tail_lsn);
 809	return tail_lsn;
 810}
 811
 812/*
 813 * Return the space in the log between the tail and the head.  The head
 814 * is passed in the cycle/bytes formal parms.  In the special case where
 815 * the reserve head has wrapped passed the tail, this calculation is no
 816 * longer valid.  In this case, just return 0 which means there is no space
 817 * in the log.  This works for all places where this function is called
 818 * with the reserve head.  Of course, if the write head were to ever
 819 * wrap the tail, we should blow up.  Rather than catch this case here,
 820 * we depend on other ASSERTions in other parts of the code.   XXXmiken
 821 *
 822 * This code also handles the case where the reservation head is behind
 823 * the tail.  The details of this case are described below, but the end
 824 * result is that we return the size of the log as the amount of space left.
 825 */
 826STATIC int
 827xlog_space_left(
 828	struct log	*log,
 829	atomic64_t	*head)
 830{
 831	int		free_bytes;
 832	int		tail_bytes;
 833	int		tail_cycle;
 834	int		head_cycle;
 835	int		head_bytes;
 836
 837	xlog_crack_grant_head(head, &head_cycle, &head_bytes);
 838	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
 839	tail_bytes = BBTOB(tail_bytes);
 840	if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
 841		free_bytes = log->l_logsize - (head_bytes - tail_bytes);
 842	else if (tail_cycle + 1 < head_cycle)
 843		return 0;
 844	else if (tail_cycle < head_cycle) {
 845		ASSERT(tail_cycle == (head_cycle - 1));
 846		free_bytes = tail_bytes - head_bytes;
 847	} else {
 848		/*
 849		 * The reservation head is behind the tail.
 850		 * In this case we just want to return the size of the
 851		 * log as the amount of space left.
 852		 */
 853		xfs_alert(log->l_mp,
 854			"xlog_space_left: head behind tail\n"
 855			"  tail_cycle = %d, tail_bytes = %d\n"
 856			"  GH   cycle = %d, GH   bytes = %d",
 857			tail_cycle, tail_bytes, head_cycle, head_bytes);
 858		ASSERT(0);
 859		free_bytes = log->l_logsize;
 860	}
 861	return free_bytes;
 862}
 863
 864
 865/*
 866 * Log function which is called when an io completes.
 867 *
 868 * The log manager needs its own routine, in order to control what
 869 * happens with the buffer after the write completes.
 870 */
 871void
 872xlog_iodone(xfs_buf_t *bp)
 873{
 874	xlog_in_core_t	*iclog = bp->b_fspriv;
 875	xlog_t		*l = iclog->ic_log;
 876	int		aborted = 0;
 877
 878	/*
 879	 * Race to shutdown the filesystem if we see an error.
 880	 */
 881	if (XFS_TEST_ERROR((xfs_buf_geterror(bp)), l->l_mp,
 882			XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
 883		xfs_ioerror_alert("xlog_iodone", l->l_mp, bp, XFS_BUF_ADDR(bp));
 884		XFS_BUF_STALE(bp);
 885		xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR);
 886		/*
 887		 * This flag will be propagated to the trans-committed
 888		 * callback routines to let them know that the log-commit
 889		 * didn't succeed.
 890		 */
 891		aborted = XFS_LI_ABORTED;
 892	} else if (iclog->ic_state & XLOG_STATE_IOERROR) {
 893		aborted = XFS_LI_ABORTED;
 894	}
 895
 896	/* log I/O is always issued ASYNC */
 897	ASSERT(XFS_BUF_ISASYNC(bp));
 898	xlog_state_done_syncing(iclog, aborted);
 899	/*
 900	 * do not reference the buffer (bp) here as we could race
 901	 * with it being freed after writing the unmount record to the
 902	 * log.
 903	 */
 904
 905}	/* xlog_iodone */
 906
 907/*
 908 * Return size of each in-core log record buffer.
 909 *
 910 * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
 911 *
 912 * If the filesystem blocksize is too large, we may need to choose a
 913 * larger size since the directory code currently logs entire blocks.
 914 */
 915
 916STATIC void
 917xlog_get_iclog_buffer_size(xfs_mount_t	*mp,
 918			   xlog_t	*log)
 919{
 920	int size;
 921	int xhdrs;
 922
 923	if (mp->m_logbufs <= 0)
 924		log->l_iclog_bufs = XLOG_MAX_ICLOGS;
 925	else
 926		log->l_iclog_bufs = mp->m_logbufs;
 927
 928	/*
 929	 * Buffer size passed in from mount system call.
 930	 */
 931	if (mp->m_logbsize > 0) {
 932		size = log->l_iclog_size = mp->m_logbsize;
 933		log->l_iclog_size_log = 0;
 934		while (size != 1) {
 935			log->l_iclog_size_log++;
 936			size >>= 1;
 937		}
 938
 939		if (xfs_sb_version_haslogv2(&mp->m_sb)) {
 940			/* # headers = size / 32k
 941			 * one header holds cycles from 32k of data
 942			 */
 943
 944			xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
 945			if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE)
 946				xhdrs++;
 947			log->l_iclog_hsize = xhdrs << BBSHIFT;
 948			log->l_iclog_heads = xhdrs;
 949		} else {
 950			ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE);
 951			log->l_iclog_hsize = BBSIZE;
 952			log->l_iclog_heads = 1;
 953		}
 954		goto done;
 955	}
 956
 957	/* All machines use 32kB buffers by default. */
 958	log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
 959	log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
 960
 961	/* the default log size is 16k or 32k which is one header sector */
 962	log->l_iclog_hsize = BBSIZE;
 963	log->l_iclog_heads = 1;
 964
 965done:
 966	/* are we being asked to make the sizes selected above visible? */
 967	if (mp->m_logbufs == 0)
 968		mp->m_logbufs = log->l_iclog_bufs;
 969	if (mp->m_logbsize == 0)
 970		mp->m_logbsize = log->l_iclog_size;
 971}	/* xlog_get_iclog_buffer_size */
 972
 973
 974/*
 975 * This routine initializes some of the log structure for a given mount point.
 976 * Its primary purpose is to fill in enough, so recovery can occur.  However,
 977 * some other stuff may be filled in too.
 978 */
 979STATIC xlog_t *
 980xlog_alloc_log(xfs_mount_t	*mp,
 981	       xfs_buftarg_t	*log_target,
 982	       xfs_daddr_t	blk_offset,
 983	       int		num_bblks)
 984{
 985	xlog_t			*log;
 986	xlog_rec_header_t	*head;
 987	xlog_in_core_t		**iclogp;
 988	xlog_in_core_t		*iclog, *prev_iclog=NULL;
 989	xfs_buf_t		*bp;
 990	int			i;
 991	int			error = ENOMEM;
 992	uint			log2_size = 0;
 993
 994	log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL);
 995	if (!log) {
 996		xfs_warn(mp, "Log allocation failed: No memory!");
 997		goto out;
 998	}
 999
1000	log->l_mp	   = mp;
1001	log->l_targ	   = log_target;
1002	log->l_logsize     = BBTOB(num_bblks);
1003	log->l_logBBstart  = blk_offset;
1004	log->l_logBBsize   = num_bblks;
1005	log->l_covered_state = XLOG_STATE_COVER_IDLE;
1006	log->l_flags	   |= XLOG_ACTIVE_RECOVERY;
1007
1008	log->l_prev_block  = -1;
1009	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1010	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1011	xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1012	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
1013	xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
1014	xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
1015	INIT_LIST_HEAD(&log->l_reserveq);
1016	INIT_LIST_HEAD(&log->l_writeq);
1017	spin_lock_init(&log->l_grant_reserve_lock);
1018	spin_lock_init(&log->l_grant_write_lock);
1019
1020	error = EFSCORRUPTED;
1021	if (xfs_sb_version_hassector(&mp->m_sb)) {
1022	        log2_size = mp->m_sb.sb_logsectlog;
1023		if (log2_size < BBSHIFT) {
1024			xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1025				log2_size, BBSHIFT);
1026			goto out_free_log;
1027		}
1028
1029	        log2_size -= BBSHIFT;
1030		if (log2_size > mp->m_sectbb_log) {
1031			xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1032				log2_size, mp->m_sectbb_log);
1033			goto out_free_log;
1034		}
1035
1036		/* for larger sector sizes, must have v2 or external log */
1037		if (log2_size && log->l_logBBstart > 0 &&
1038			    !xfs_sb_version_haslogv2(&mp->m_sb)) {
1039			xfs_warn(mp,
1040		"log sector size (0x%x) invalid for configuration.",
1041				log2_size);
1042			goto out_free_log;
1043		}
1044	}
1045	log->l_sectBBsize = 1 << log2_size;
1046
1047	xlog_get_iclog_buffer_size(mp, log);
1048
1049	error = ENOMEM;
1050	bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
1051	if (!bp)
1052		goto out_free_log;
1053	bp->b_iodone = xlog_iodone;
1054	ASSERT(xfs_buf_islocked(bp));
1055	log->l_xbuf = bp;
1056
1057	spin_lock_init(&log->l_icloglock);
1058	init_waitqueue_head(&log->l_flush_wait);
1059
1060	/* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
1061	ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
1062
1063	iclogp = &log->l_iclog;
1064	/*
1065	 * The amount of memory to allocate for the iclog structure is
1066	 * rather funky due to the way the structure is defined.  It is
1067	 * done this way so that we can use different sizes for machines
1068	 * with different amounts of memory.  See the definition of
1069	 * xlog_in_core_t in xfs_log_priv.h for details.
1070	 */
1071	ASSERT(log->l_iclog_size >= 4096);
1072	for (i=0; i < log->l_iclog_bufs; i++) {
1073		*iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL);
1074		if (!*iclogp)
1075			goto out_free_iclog;
1076
1077		iclog = *iclogp;
1078		iclog->ic_prev = prev_iclog;
1079		prev_iclog = iclog;
1080
1081		bp = xfs_buf_get_uncached(mp->m_logdev_targp,
1082						log->l_iclog_size, 0);
1083		if (!bp)
1084			goto out_free_iclog;
1085
1086		bp->b_iodone = xlog_iodone;
1087		iclog->ic_bp = bp;
1088		iclog->ic_data = bp->b_addr;
1089#ifdef DEBUG
1090		log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
1091#endif
1092		head = &iclog->ic_header;
1093		memset(head, 0, sizeof(xlog_rec_header_t));
1094		head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1095		head->h_version = cpu_to_be32(
1096			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1097		head->h_size = cpu_to_be32(log->l_iclog_size);
1098		/* new fields */
1099		head->h_fmt = cpu_to_be32(XLOG_FMT);
1100		memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1101
1102		iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize;
1103		iclog->ic_state = XLOG_STATE_ACTIVE;
1104		iclog->ic_log = log;
1105		atomic_set(&iclog->ic_refcnt, 0);
1106		spin_lock_init(&iclog->ic_callback_lock);
1107		iclog->ic_callback_tail = &(iclog->ic_callback);
1108		iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
1109
1110		ASSERT(xfs_buf_islocked(iclog->ic_bp));
1111		init_waitqueue_head(&iclog->ic_force_wait);
1112		init_waitqueue_head(&iclog->ic_write_wait);
1113
1114		iclogp = &iclog->ic_next;
1115	}
1116	*iclogp = log->l_iclog;			/* complete ring */
1117	log->l_iclog->ic_prev = prev_iclog;	/* re-write 1st prev ptr */
1118
1119	error = xlog_cil_init(log);
1120	if (error)
1121		goto out_free_iclog;
1122	return log;
1123
1124out_free_iclog:
1125	for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1126		prev_iclog = iclog->ic_next;
1127		if (iclog->ic_bp)
1128			xfs_buf_free(iclog->ic_bp);
1129		kmem_free(iclog);
1130	}
1131	spinlock_destroy(&log->l_icloglock);
1132	xfs_buf_free(log->l_xbuf);
1133out_free_log:
1134	kmem_free(log);
1135out:
1136	return ERR_PTR(-error);
1137}	/* xlog_alloc_log */
1138
1139
1140/*
1141 * Write out the commit record of a transaction associated with the given
1142 * ticket.  Return the lsn of the commit record.
1143 */
1144STATIC int
1145xlog_commit_record(
1146	struct log		*log,
1147	struct xlog_ticket	*ticket,
1148	struct xlog_in_core	**iclog,
1149	xfs_lsn_t		*commitlsnp)
1150{
1151	struct xfs_mount *mp = log->l_mp;
1152	int	error;
1153	struct xfs_log_iovec reg = {
1154		.i_addr = NULL,
1155		.i_len = 0,
1156		.i_type = XLOG_REG_TYPE_COMMIT,
1157	};
1158	struct xfs_log_vec vec = {
1159		.lv_niovecs = 1,
1160		.lv_iovecp = &reg,
1161	};
1162
1163	ASSERT_ALWAYS(iclog);
1164	error = xlog_write(log, &vec, ticket, commitlsnp, iclog,
1165					XLOG_COMMIT_TRANS);
1166	if (error)
1167		xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
1168	return error;
1169}
1170
1171/*
1172 * Push on the buffer cache code if we ever use more than 75% of the on-disk
1173 * log space.  This code pushes on the lsn which would supposedly free up
1174 * the 25% which we want to leave free.  We may need to adopt a policy which
1175 * pushes on an lsn which is further along in the log once we reach the high
1176 * water mark.  In this manner, we would be creating a low water mark.
1177 */
1178STATIC void
1179xlog_grant_push_ail(
1180	struct log	*log,
1181	int		need_bytes)
1182{
1183	xfs_lsn_t	threshold_lsn = 0;
1184	xfs_lsn_t	last_sync_lsn;
1185	int		free_blocks;
1186	int		free_bytes;
1187	int		threshold_block;
1188	int		threshold_cycle;
1189	int		free_threshold;
1190
1191	ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1192
1193	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
1194	free_blocks = BTOBBT(free_bytes);
1195
1196	/*
1197	 * Set the threshold for the minimum number of free blocks in the
1198	 * log to the maximum of what the caller needs, one quarter of the
1199	 * log, and 256 blocks.
1200	 */
1201	free_threshold = BTOBB(need_bytes);
1202	free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
1203	free_threshold = MAX(free_threshold, 256);
1204	if (free_blocks >= free_threshold)
1205		return;
1206
1207	xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1208						&threshold_block);
1209	threshold_block += free_threshold;
1210	if (threshold_block >= log->l_logBBsize) {
1211		threshold_block -= log->l_logBBsize;
1212		threshold_cycle += 1;
1213	}
1214	threshold_lsn = xlog_assign_lsn(threshold_cycle,
1215					threshold_block);
1216	/*
1217	 * Don't pass in an lsn greater than the lsn of the last
1218	 * log record known to be on disk. Use a snapshot of the last sync lsn
1219	 * so that it doesn't change between the compare and the set.
1220	 */
1221	last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1222	if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1223		threshold_lsn = last_sync_lsn;
1224
1225	/*
1226	 * Get the transaction layer to kick the dirty buffers out to
1227	 * disk asynchronously. No point in trying to do this if
1228	 * the filesystem is shutting down.
1229	 */
1230	if (!XLOG_FORCED_SHUTDOWN(log))
1231		xfs_ail_push(log->l_ailp, threshold_lsn);
1232}
1233
1234/*
1235 * The bdstrat callback function for log bufs. This gives us a central
1236 * place to trap bufs in case we get hit by a log I/O error and need to
1237 * shutdown. Actually, in practice, even when we didn't get a log error,
1238 * we transition the iclogs to IOERROR state *after* flushing all existing
1239 * iclogs to disk. This is because we don't want anymore new transactions to be
1240 * started or completed afterwards.
1241 */
1242STATIC int
1243xlog_bdstrat(
1244	struct xfs_buf		*bp)
1245{
1246	struct xlog_in_core	*iclog = bp->b_fspriv;
1247
1248	if (iclog->ic_state & XLOG_STATE_IOERROR) {
1249		xfs_buf_ioerror(bp, EIO);
1250		XFS_BUF_STALE(bp);
1251		xfs_buf_ioend(bp, 0);
1252		/*
1253		 * It would seem logical to return EIO here, but we rely on
1254		 * the log state machine to propagate I/O errors instead of
1255		 * doing it here.
1256		 */
1257		return 0;
1258	}
1259
1260	xfs_buf_iorequest(bp);
1261	return 0;
1262}
1263
1264/*
1265 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 
1266 * fashion.  Previously, we should have moved the current iclog
1267 * ptr in the log to point to the next available iclog.  This allows further
1268 * write to continue while this code syncs out an iclog ready to go.
1269 * Before an in-core log can be written out, the data section must be scanned
1270 * to save away the 1st word of each BBSIZE block into the header.  We replace
1271 * it with the current cycle count.  Each BBSIZE block is tagged with the
1272 * cycle count because there in an implicit assumption that drives will
1273 * guarantee that entire 512 byte blocks get written at once.  In other words,
1274 * we can't have part of a 512 byte block written and part not written.  By
1275 * tagging each block, we will know which blocks are valid when recovering
1276 * after an unclean shutdown.
1277 *
1278 * This routine is single threaded on the iclog.  No other thread can be in
1279 * this routine with the same iclog.  Changing contents of iclog can there-
1280 * fore be done without grabbing the state machine lock.  Updating the global
1281 * log will require grabbing the lock though.
1282 *
1283 * The entire log manager uses a logical block numbering scheme.  Only
1284 * log_sync (and then only bwrite()) know about the fact that the log may
1285 * not start with block zero on a given device.  The log block start offset
1286 * is added immediately before calling bwrite().
1287 */
1288
1289STATIC int
1290xlog_sync(xlog_t		*log,
1291	  xlog_in_core_t	*iclog)
1292{
1293	xfs_caddr_t	dptr;		/* pointer to byte sized element */
1294	xfs_buf_t	*bp;
1295	int		i;
1296	uint		count;		/* byte count of bwrite */
1297	uint		count_init;	/* initial count before roundup */
1298	int		roundoff;       /* roundoff to BB or stripe */
1299	int		split = 0;	/* split write into two regions */
1300	int		error;
1301	int		v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
1302
1303	XFS_STATS_INC(xs_log_writes);
1304	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
1305
1306	/* Add for LR header */
1307	count_init = log->l_iclog_hsize + iclog->ic_offset;
1308
1309	/* Round out the log write size */
1310	if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
1311		/* we have a v2 stripe unit to use */
1312		count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
1313	} else {
1314		count = BBTOB(BTOBB(count_init));
1315	}
1316	roundoff = count - count_init;
1317	ASSERT(roundoff >= 0);
1318	ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && 
1319                roundoff < log->l_mp->m_sb.sb_logsunit)
1320		|| 
1321		(log->l_mp->m_sb.sb_logsunit <= 1 && 
1322		 roundoff < BBTOB(1)));
1323
1324	/* move grant heads by roundoff in sync */
1325	xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff);
1326	xlog_grant_add_space(log, &log->l_grant_write_head, roundoff);
1327
1328	/* put cycle number in every block */
1329	xlog_pack_data(log, iclog, roundoff); 
1330
1331	/* real byte length */
1332	if (v2) {
1333		iclog->ic_header.h_len =
1334			cpu_to_be32(iclog->ic_offset + roundoff);
1335	} else {
1336		iclog->ic_header.h_len =
1337			cpu_to_be32(iclog->ic_offset);
1338	}
1339
1340	bp = iclog->ic_bp;
1341	XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
1342
1343	XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
1344
1345	/* Do we need to split this write into 2 parts? */
1346	if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) {
1347		split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)));
1348		count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp));
1349		iclog->ic_bwritecnt = 2;	/* split into 2 writes */
1350	} else {
1351		iclog->ic_bwritecnt = 1;
1352	}
1353	XFS_BUF_SET_COUNT(bp, count);
1354	bp->b_fspriv = iclog;
1355	XFS_BUF_ZEROFLAGS(bp);
1356	XFS_BUF_ASYNC(bp);
1357	bp->b_flags |= XBF_SYNCIO;
1358
1359	if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
1360		bp->b_flags |= XBF_FUA;
1361
1362		/*
1363		 * Flush the data device before flushing the log to make
1364		 * sure all meta data written back from the AIL actually made
1365		 * it to disk before stamping the new log tail LSN into the
1366		 * log buffer.  For an external log we need to issue the
1367		 * flush explicitly, and unfortunately synchronously here;
1368		 * for an internal log we can simply use the block layer
1369		 * state machine for preflushes.
1370		 */
1371		if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
1372			xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
1373		else
1374			bp->b_flags |= XBF_FLUSH;
1375	}
1376
1377	ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1378	ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1379
1380	xlog_verify_iclog(log, iclog, count, B_TRUE);
1381
1382	/* account for log which doesn't start at block #0 */
1383	XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1384	/*
1385	 * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
1386	 * is shutting down.
1387	 */
1388	XFS_BUF_WRITE(bp);
1389
1390	if ((error = xlog_bdstrat(bp))) {
1391		xfs_ioerror_alert("xlog_sync", log->l_mp, bp,
1392				  XFS_BUF_ADDR(bp));
1393		return error;
1394	}
1395	if (split) {
1396		bp = iclog->ic_log->l_xbuf;
1397		XFS_BUF_SET_ADDR(bp, 0);	     /* logical 0 */
1398		xfs_buf_associate_memory(bp,
1399				(char *)&iclog->ic_header + count, split);
1400		bp->b_fspriv = iclog;
1401		XFS_BUF_ZEROFLAGS(bp);
1402		XFS_BUF_ASYNC(bp);
1403		bp->b_flags |= XBF_SYNCIO;
1404		if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1405			bp->b_flags |= XBF_FUA;
1406		dptr = bp->b_addr;
1407		/*
1408		 * Bump the cycle numbers at the start of each block
1409		 * since this part of the buffer is at the start of
1410		 * a new cycle.  Watch out for the header magic number
1411		 * case, though.
1412		 */
1413		for (i = 0; i < split; i += BBSIZE) {
1414			be32_add_cpu((__be32 *)dptr, 1);
1415			if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM)
1416				be32_add_cpu((__be32 *)dptr, 1);
1417			dptr += BBSIZE;
1418		}
1419
1420		ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1421		ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1422
1423		/* account for internal log which doesn't start at block #0 */
1424		XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1425		XFS_BUF_WRITE(bp);
1426		if ((error = xlog_bdstrat(bp))) {
1427			xfs_ioerror_alert("xlog_sync (split)", log->l_mp,
1428					  bp, XFS_BUF_ADDR(bp));
1429			return error;
1430		}
1431	}
1432	return 0;
1433}	/* xlog_sync */
1434
1435
1436/*
1437 * Deallocate a log structure
1438 */
1439STATIC void
1440xlog_dealloc_log(xlog_t *log)
1441{
1442	xlog_in_core_t	*iclog, *next_iclog;
1443	int		i;
1444
1445	xlog_cil_destroy(log);
1446
1447	/*
1448	 * always need to ensure that the extra buffer does not point to memory
1449	 * owned by another log buffer before we free it.
1450	 */
1451	xfs_buf_set_empty(log->l_xbuf, log->l_iclog_size);
1452	xfs_buf_free(log->l_xbuf);
1453
1454	iclog = log->l_iclog;
1455	for (i=0; i<log->l_iclog_bufs; i++) {
1456		xfs_buf_free(iclog->ic_bp);
1457		next_iclog = iclog->ic_next;
1458		kmem_free(iclog);
1459		iclog = next_iclog;
1460	}
1461	spinlock_destroy(&log->l_icloglock);
1462
1463	log->l_mp->m_log = NULL;
1464	kmem_free(log);
1465}	/* xlog_dealloc_log */
1466
1467/*
1468 * Update counters atomically now that memcpy is done.
1469 */
1470/* ARGSUSED */
1471static inline void
1472xlog_state_finish_copy(xlog_t		*log,
1473		       xlog_in_core_t	*iclog,
1474		       int		record_cnt,
1475		       int		copy_bytes)
1476{
1477	spin_lock(&log->l_icloglock);
1478
1479	be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1480	iclog->ic_offset += copy_bytes;
1481
1482	spin_unlock(&log->l_icloglock);
1483}	/* xlog_state_finish_copy */
1484
1485
1486
1487
1488/*
1489 * print out info relating to regions written which consume
1490 * the reservation
1491 */
1492void
1493xlog_print_tic_res(
1494	struct xfs_mount	*mp,
1495	struct xlog_ticket	*ticket)
1496{
1497	uint i;
1498	uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
1499
1500	/* match with XLOG_REG_TYPE_* in xfs_log.h */
1501	static char *res_type_str[XLOG_REG_TYPE_MAX] = {
1502	    "bformat",
1503	    "bchunk",
1504	    "efi_format",
1505	    "efd_format",
1506	    "iformat",
1507	    "icore",
1508	    "iext",
1509	    "ibroot",
1510	    "ilocal",
1511	    "iattr_ext",
1512	    "iattr_broot",
1513	    "iattr_local",
1514	    "qformat",
1515	    "dquot",
1516	    "quotaoff",
1517	    "LR header",
1518	    "unmount",
1519	    "commit",
1520	    "trans header"
1521	};
1522	static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
1523	    "SETATTR_NOT_SIZE",
1524	    "SETATTR_SIZE",
1525	    "INACTIVE",
1526	    "CREATE",
1527	    "CREATE_TRUNC",
1528	    "TRUNCATE_FILE",
1529	    "REMOVE",
1530	    "LINK",
1531	    "RENAME",
1532	    "MKDIR",
1533	    "RMDIR",
1534	    "SYMLINK",
1535	    "SET_DMATTRS",
1536	    "GROWFS",
1537	    "STRAT_WRITE",
1538	    "DIOSTRAT",
1539	    "WRITE_SYNC",
1540	    "WRITEID",
1541	    "ADDAFORK",
1542	    "ATTRINVAL",
1543	    "ATRUNCATE",
1544	    "ATTR_SET",
1545	    "ATTR_RM",
1546	    "ATTR_FLAG",
1547	    "CLEAR_AGI_BUCKET",
1548	    "QM_SBCHANGE",
1549	    "DUMMY1",
1550	    "DUMMY2",
1551	    "QM_QUOTAOFF",
1552	    "QM_DQALLOC",
1553	    "QM_SETQLIM",
1554	    "QM_DQCLUSTER",
1555	    "QM_QINOCREATE",
1556	    "QM_QUOTAOFF_END",
1557	    "SB_UNIT",
1558	    "FSYNC_TS",
1559	    "GROWFSRT_ALLOC",
1560	    "GROWFSRT_ZERO",
1561	    "GROWFSRT_FREE",
1562	    "SWAPEXT"
1563	};
1564
1565	xfs_warn(mp,
1566		"xfs_log_write: reservation summary:\n"
1567		"  trans type  = %s (%u)\n"
1568		"  unit res    = %d bytes\n"
1569		"  current res = %d bytes\n"
1570		"  total reg   = %u bytes (o/flow = %u bytes)\n"
1571		"  ophdrs      = %u (ophdr space = %u bytes)\n"
1572		"  ophdr + reg = %u bytes\n"
1573		"  num regions = %u\n",
1574		((ticket->t_trans_type <= 0 ||
1575		  ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
1576		  "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
1577		ticket->t_trans_type,
1578		ticket->t_unit_res,
1579		ticket->t_curr_res,
1580		ticket->t_res_arr_sum, ticket->t_res_o_flow,
1581		ticket->t_res_num_ophdrs, ophdr_spc,
1582		ticket->t_res_arr_sum +
1583		ticket->t_res_o_flow + ophdr_spc,
1584		ticket->t_res_num);
1585
1586	for (i = 0; i < ticket->t_res_num; i++) {
1587		uint r_type = ticket->t_res_arr[i].r_type;
1588		xfs_warn(mp, "region[%u]: %s - %u bytes\n", i,
1589			    ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
1590			    "bad-rtype" : res_type_str[r_type-1]),
1591			    ticket->t_res_arr[i].r_len);
1592	}
1593
1594	xfs_alert_tag(mp, XFS_PTAG_LOGRES,
1595		"xfs_log_write: reservation ran out. Need to up reservation");
1596	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1597}
1598
1599/*
1600 * Calculate the potential space needed by the log vector.  Each region gets
1601 * its own xlog_op_header_t and may need to be double word aligned.
1602 */
1603static int
1604xlog_write_calc_vec_length(
1605	struct xlog_ticket	*ticket,
1606	struct xfs_log_vec	*log_vector)
1607{
1608	struct xfs_log_vec	*lv;
1609	int			headers = 0;
1610	int			len = 0;
1611	int			i;
1612
1613	/* acct for start rec of xact */
1614	if (ticket->t_flags & XLOG_TIC_INITED)
1615		headers++;
1616
1617	for (lv = log_vector; lv; lv = lv->lv_next) {
1618		headers += lv->lv_niovecs;
1619
1620		for (i = 0; i < lv->lv_niovecs; i++) {
1621			struct xfs_log_iovec	*vecp = &lv->lv_iovecp[i];
1622
1623			len += vecp->i_len;
1624			xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type);
1625		}
1626	}
1627
1628	ticket->t_res_num_ophdrs += headers;
1629	len += headers * sizeof(struct xlog_op_header);
1630
1631	return len;
1632}
1633
1634/*
1635 * If first write for transaction, insert start record  We can't be trying to
1636 * commit if we are inited.  We can't have any "partial_copy" if we are inited.
1637 */
1638static int
1639xlog_write_start_rec(
1640	struct xlog_op_header	*ophdr,
1641	struct xlog_ticket	*ticket)
1642{
1643	if (!(ticket->t_flags & XLOG_TIC_INITED))
1644		return 0;
1645
1646	ophdr->oh_tid	= cpu_to_be32(ticket->t_tid);
1647	ophdr->oh_clientid = ticket->t_clientid;
1648	ophdr->oh_len = 0;
1649	ophdr->oh_flags = XLOG_START_TRANS;
1650	ophdr->oh_res2 = 0;
1651
1652	ticket->t_flags &= ~XLOG_TIC_INITED;
1653
1654	return sizeof(struct xlog_op_header);
1655}
1656
1657static xlog_op_header_t *
1658xlog_write_setup_ophdr(
1659	struct log		*log,
1660	struct xlog_op_header	*ophdr,
1661	struct xlog_ticket	*ticket,
1662	uint			flags)
1663{
1664	ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
1665	ophdr->oh_clientid = ticket->t_clientid;
1666	ophdr->oh_res2 = 0;
1667
1668	/* are we copying a commit or unmount record? */
1669	ophdr->oh_flags = flags;
1670
1671	/*
1672	 * We've seen logs corrupted with bad transaction client ids.  This
1673	 * makes sure that XFS doesn't generate them on.  Turn this into an EIO
1674	 * and shut down the filesystem.
1675	 */
1676	switch (ophdr->oh_clientid)  {
1677	case XFS_TRANSACTION:
1678	case XFS_VOLUME:
1679	case XFS_LOG:
1680		break;
1681	default:
1682		xfs_warn(log->l_mp,
1683			"Bad XFS transaction clientid 0x%x in ticket 0x%p",
1684			ophdr->oh_clientid, ticket);
1685		return NULL;
1686	}
1687
1688	return ophdr;
1689}
1690
1691/*
1692 * Set up the parameters of the region copy into the log. This has
1693 * to handle region write split across multiple log buffers - this
1694 * state is kept external to this function so that this code can
1695 * can be written in an obvious, self documenting manner.
1696 */
1697static int
1698xlog_write_setup_copy(
1699	struct xlog_ticket	*ticket,
1700	struct xlog_op_header	*ophdr,
1701	int			space_available,
1702	int			space_required,
1703	int			*copy_off,
1704	int			*copy_len,
1705	int			*last_was_partial_copy,
1706	int			*bytes_consumed)
1707{
1708	int			still_to_copy;
1709
1710	still_to_copy = space_required - *bytes_consumed;
1711	*copy_off = *bytes_consumed;
1712
1713	if (still_to_copy <= space_available) {
1714		/* write of region completes here */
1715		*copy_len = still_to_copy;
1716		ophdr->oh_len = cpu_to_be32(*copy_len);
1717		if (*last_was_partial_copy)
1718			ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
1719		*last_was_partial_copy = 0;
1720		*bytes_consumed = 0;
1721		return 0;
1722	}
1723
1724	/* partial write of region, needs extra log op header reservation */
1725	*copy_len = space_available;
1726	ophdr->oh_len = cpu_to_be32(*copy_len);
1727	ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
1728	if (*last_was_partial_copy)
1729		ophdr->oh_flags |= XLOG_WAS_CONT_TRANS;
1730	*bytes_consumed += *copy_len;
1731	(*last_was_partial_copy)++;
1732
1733	/* account for new log op header */
1734	ticket->t_curr_res -= sizeof(struct xlog_op_header);
1735	ticket->t_res_num_ophdrs++;
1736
1737	return sizeof(struct xlog_op_header);
1738}
1739
1740static int
1741xlog_write_copy_finish(
1742	struct log		*log,
1743	struct xlog_in_core	*iclog,
1744	uint			flags,
1745	int			*record_cnt,
1746	int			*data_cnt,
1747	int			*partial_copy,
1748	int			*partial_copy_len,
1749	int			log_offset,
1750	struct xlog_in_core	**commit_iclog)
1751{
1752	if (*partial_copy) {
1753		/*
1754		 * This iclog has already been marked WANT_SYNC by
1755		 * xlog_state_get_iclog_space.
1756		 */
1757		xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
1758		*record_cnt = 0;
1759		*data_cnt = 0;
1760		return xlog_state_release_iclog(log, iclog);
1761	}
1762
1763	*partial_copy = 0;
1764	*partial_copy_len = 0;
1765
1766	if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
1767		/* no more space in this iclog - push it. */
1768		xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
1769		*record_cnt = 0;
1770		*data_cnt = 0;
1771
1772		spin_lock(&log->l_icloglock);
1773		xlog_state_want_sync(log, iclog);
1774		spin_unlock(&log->l_icloglock);
1775
1776		if (!commit_iclog)
1777			return xlog_state_release_iclog(log, iclog);
1778		ASSERT(flags & XLOG_COMMIT_TRANS);
1779		*commit_iclog = iclog;
1780	}
1781
1782	return 0;
1783}
1784
1785/*
1786 * Write some region out to in-core log
1787 *
1788 * This will be called when writing externally provided regions or when
1789 * writing out a commit record for a given transaction.
1790 *
1791 * General algorithm:
1792 *	1. Find total length of this write.  This may include adding to the
1793 *		lengths passed in.
1794 *	2. Check whether we violate the tickets reservation.
1795 *	3. While writing to this iclog
1796 *	    A. Reserve as much space in this iclog as can get
1797 *	    B. If this is first write, save away start lsn
1798 *	    C. While writing this region:
1799 *		1. If first write of transaction, write start record
1800 *		2. Write log operation header (header per region)
1801 *		3. Find out if we can fit entire region into this iclog
1802 *		4. Potentially, verify destination memcpy ptr
1803 *		5. Memcpy (partial) region
1804 *		6. If partial copy, release iclog; otherwise, continue
1805 *			copying more regions into current iclog
1806 *	4. Mark want sync bit (in simulation mode)
1807 *	5. Release iclog for potential flush to on-disk log.
1808 *
1809 * ERRORS:
1810 * 1.	Panic if reservation is overrun.  This should never happen since
1811 *	reservation amounts are generated internal to the filesystem.
1812 * NOTES:
1813 * 1. Tickets are single threaded data structures.
1814 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
1815 *	syncing routine.  When a single log_write region needs to span
1816 *	multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
1817 *	on all log operation writes which don't contain the end of the
1818 *	region.  The XLOG_END_TRANS bit is used for the in-core log
1819 *	operation which contains the end of the continued log_write region.
1820 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
1821 *	we don't really know exactly how much space will be used.  As a result,
1822 *	we don't update ic_offset until the end when we know exactly how many
1823 *	bytes have been written out.
1824 */
1825int
1826xlog_write(
1827	struct log		*log,
1828	struct xfs_log_vec	*log_vector,
1829	struct xlog_ticket	*ticket,
1830	xfs_lsn_t		*start_lsn,
1831	struct xlog_in_core	**commit_iclog,
1832	uint			flags)
1833{
1834	struct xlog_in_core	*iclog = NULL;
1835	struct xfs_log_iovec	*vecp;
1836	struct xfs_log_vec	*lv;
1837	int			len;
1838	int			index;
1839	int			partial_copy = 0;
1840	int			partial_copy_len = 0;
1841	int			contwr = 0;
1842	int			record_cnt = 0;
1843	int			data_cnt = 0;
1844	int			error;
1845
1846	*start_lsn = 0;
1847
1848	len = xlog_write_calc_vec_length(ticket, log_vector);
1849	if (log->l_cilp) {
1850		/*
1851		 * Region headers and bytes are already accounted for.
1852		 * We only need to take into account start records and
1853		 * split regions in this function.
1854		 */
1855		if (ticket->t_flags & XLOG_TIC_INITED)
1856			ticket->t_curr_res -= sizeof(xlog_op_header_t);
1857
1858		/*
1859		 * Commit record headers need to be accounted for. These
1860		 * come in as separate writes so are easy to detect.
1861		 */
1862		if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
1863			ticket->t_curr_res -= sizeof(xlog_op_header_t);
1864	} else
1865		ticket->t_curr_res -= len;
 
 
 
 
 
 
1866
1867	if (ticket->t_curr_res < 0)
1868		xlog_print_tic_res(log->l_mp, ticket);
1869
1870	index = 0;
1871	lv = log_vector;
1872	vecp = lv->lv_iovecp;
1873	while (lv && index < lv->lv_niovecs) {
1874		void		*ptr;
1875		int		log_offset;
1876
1877		error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
1878						   &contwr, &log_offset);
1879		if (error)
1880			return error;
1881
1882		ASSERT(log_offset <= iclog->ic_size - 1);
1883		ptr = iclog->ic_datap + log_offset;
1884
1885		/* start_lsn is the first lsn written to. That's all we need. */
1886		if (!*start_lsn)
1887			*start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
1888
1889		/*
1890		 * This loop writes out as many regions as can fit in the amount
1891		 * of space which was allocated by xlog_state_get_iclog_space().
1892		 */
1893		while (lv && index < lv->lv_niovecs) {
1894			struct xfs_log_iovec	*reg = &vecp[index];
1895			struct xlog_op_header	*ophdr;
1896			int			start_rec_copy;
1897			int			copy_len;
1898			int			copy_off;
1899
1900			ASSERT(reg->i_len % sizeof(__int32_t) == 0);
1901			ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0);
1902
1903			start_rec_copy = xlog_write_start_rec(ptr, ticket);
1904			if (start_rec_copy) {
1905				record_cnt++;
1906				xlog_write_adv_cnt(&ptr, &len, &log_offset,
1907						   start_rec_copy);
1908			}
1909
1910			ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
1911			if (!ophdr)
1912				return XFS_ERROR(EIO);
1913
1914			xlog_write_adv_cnt(&ptr, &len, &log_offset,
1915					   sizeof(struct xlog_op_header));
1916
1917			len += xlog_write_setup_copy(ticket, ophdr,
1918						     iclog->ic_size-log_offset,
1919						     reg->i_len,
1920						     &copy_off, &copy_len,
1921						     &partial_copy,
1922						     &partial_copy_len);
1923			xlog_verify_dest_ptr(log, ptr);
1924
1925			/* copy region */
1926			ASSERT(copy_len >= 0);
1927			memcpy(ptr, reg->i_addr + copy_off, copy_len);
1928			xlog_write_adv_cnt(&ptr, &len, &log_offset, copy_len);
1929
1930			copy_len += start_rec_copy + sizeof(xlog_op_header_t);
1931			record_cnt++;
1932			data_cnt += contwr ? copy_len : 0;
1933
1934			error = xlog_write_copy_finish(log, iclog, flags,
1935						       &record_cnt, &data_cnt,
1936						       &partial_copy,
1937						       &partial_copy_len,
1938						       log_offset,
1939						       commit_iclog);
1940			if (error)
1941				return error;
1942
1943			/*
1944			 * if we had a partial copy, we need to get more iclog
1945			 * space but we don't want to increment the region
1946			 * index because there is still more is this region to
1947			 * write.
1948			 *
1949			 * If we completed writing this region, and we flushed
1950			 * the iclog (indicated by resetting of the record
1951			 * count), then we also need to get more log space. If
1952			 * this was the last record, though, we are done and
1953			 * can just return.
1954			 */
1955			if (partial_copy)
1956				break;
1957
1958			if (++index == lv->lv_niovecs) {
1959				lv = lv->lv_next;
1960				index = 0;
1961				if (lv)
1962					vecp = lv->lv_iovecp;
1963			}
1964			if (record_cnt == 0) {
1965				if (!lv)
1966					return 0;
1967				break;
1968			}
1969		}
1970	}
1971
1972	ASSERT(len == 0);
1973
1974	xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
1975	if (!commit_iclog)
1976		return xlog_state_release_iclog(log, iclog);
1977
1978	ASSERT(flags & XLOG_COMMIT_TRANS);
1979	*commit_iclog = iclog;
1980	return 0;
1981}
1982
1983
1984/*****************************************************************************
1985 *
1986 *		State Machine functions
1987 *
1988 *****************************************************************************
1989 */
1990
1991/* Clean iclogs starting from the head.  This ordering must be
1992 * maintained, so an iclog doesn't become ACTIVE beyond one that
1993 * is SYNCING.  This is also required to maintain the notion that we use
1994 * a ordered wait queue to hold off would be writers to the log when every
1995 * iclog is trying to sync to disk.
1996 *
1997 * State Change: DIRTY -> ACTIVE
1998 */
1999STATIC void
2000xlog_state_clean_log(xlog_t *log)
2001{
2002	xlog_in_core_t	*iclog;
2003	int changed = 0;
2004
2005	iclog = log->l_iclog;
2006	do {
2007		if (iclog->ic_state == XLOG_STATE_DIRTY) {
2008			iclog->ic_state	= XLOG_STATE_ACTIVE;
2009			iclog->ic_offset       = 0;
2010			ASSERT(iclog->ic_callback == NULL);
2011			/*
2012			 * If the number of ops in this iclog indicate it just
2013			 * contains the dummy transaction, we can
2014			 * change state into IDLE (the second time around).
2015			 * Otherwise we should change the state into
2016			 * NEED a dummy.
2017			 * We don't need to cover the dummy.
2018			 */
2019			if (!changed &&
2020			   (be32_to_cpu(iclog->ic_header.h_num_logops) ==
2021			   		XLOG_COVER_OPS)) {
2022				changed = 1;
2023			} else {
2024				/*
2025				 * We have two dirty iclogs so start over
2026				 * This could also be num of ops indicates
2027				 * this is not the dummy going out.
2028				 */
2029				changed = 2;
2030			}
2031			iclog->ic_header.h_num_logops = 0;
2032			memset(iclog->ic_header.h_cycle_data, 0,
2033			      sizeof(iclog->ic_header.h_cycle_data));
2034			iclog->ic_header.h_lsn = 0;
2035		} else if (iclog->ic_state == XLOG_STATE_ACTIVE)
2036			/* do nothing */;
2037		else
2038			break;	/* stop cleaning */
2039		iclog = iclog->ic_next;
2040	} while (iclog != log->l_iclog);
2041
2042	/* log is locked when we are called */
2043	/*
2044	 * Change state for the dummy log recording.
2045	 * We usually go to NEED. But we go to NEED2 if the changed indicates
2046	 * we are done writing the dummy record.
2047	 * If we are done with the second dummy recored (DONE2), then
2048	 * we go to IDLE.
2049	 */
2050	if (changed) {
2051		switch (log->l_covered_state) {
2052		case XLOG_STATE_COVER_IDLE:
2053		case XLOG_STATE_COVER_NEED:
2054		case XLOG_STATE_COVER_NEED2:
2055			log->l_covered_state = XLOG_STATE_COVER_NEED;
2056			break;
2057
2058		case XLOG_STATE_COVER_DONE:
2059			if (changed == 1)
2060				log->l_covered_state = XLOG_STATE_COVER_NEED2;
2061			else
2062				log->l_covered_state = XLOG_STATE_COVER_NEED;
2063			break;
2064
2065		case XLOG_STATE_COVER_DONE2:
2066			if (changed == 1)
2067				log->l_covered_state = XLOG_STATE_COVER_IDLE;
2068			else
2069				log->l_covered_state = XLOG_STATE_COVER_NEED;
2070			break;
2071
2072		default:
2073			ASSERT(0);
2074		}
2075	}
2076}	/* xlog_state_clean_log */
2077
2078STATIC xfs_lsn_t
2079xlog_get_lowest_lsn(
2080	xlog_t		*log)
2081{
2082	xlog_in_core_t  *lsn_log;
2083	xfs_lsn_t	lowest_lsn, lsn;
2084
2085	lsn_log = log->l_iclog;
2086	lowest_lsn = 0;
2087	do {
2088	    if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
2089		lsn = be64_to_cpu(lsn_log->ic_header.h_lsn);
2090		if ((lsn && !lowest_lsn) ||
2091		    (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
2092			lowest_lsn = lsn;
2093		}
2094	    }
2095	    lsn_log = lsn_log->ic_next;
2096	} while (lsn_log != log->l_iclog);
2097	return lowest_lsn;
2098}
2099
2100
2101STATIC void
2102xlog_state_do_callback(
2103	xlog_t		*log,
2104	int		aborted,
2105	xlog_in_core_t	*ciclog)
2106{
2107	xlog_in_core_t	   *iclog;
2108	xlog_in_core_t	   *first_iclog;	/* used to know when we've
2109						 * processed all iclogs once */
2110	xfs_log_callback_t *cb, *cb_next;
2111	int		   flushcnt = 0;
2112	xfs_lsn_t	   lowest_lsn;
2113	int		   ioerrors;	/* counter: iclogs with errors */
2114	int		   loopdidcallbacks; /* flag: inner loop did callbacks*/
2115	int		   funcdidcallbacks; /* flag: function did callbacks */
2116	int		   repeats;	/* for issuing console warnings if
2117					 * looping too many times */
2118	int		   wake = 0;
2119
2120	spin_lock(&log->l_icloglock);
2121	first_iclog = iclog = log->l_iclog;
2122	ioerrors = 0;
2123	funcdidcallbacks = 0;
2124	repeats = 0;
2125
2126	do {
2127		/*
2128		 * Scan all iclogs starting with the one pointed to by the
2129		 * log.  Reset this starting point each time the log is
2130		 * unlocked (during callbacks).
2131		 *
2132		 * Keep looping through iclogs until one full pass is made
2133		 * without running any callbacks.
2134		 */
2135		first_iclog = log->l_iclog;
2136		iclog = log->l_iclog;
2137		loopdidcallbacks = 0;
2138		repeats++;
2139
2140		do {
2141
2142			/* skip all iclogs in the ACTIVE & DIRTY states */
2143			if (iclog->ic_state &
2144			    (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) {
2145				iclog = iclog->ic_next;
2146				continue;
2147			}
2148
2149			/*
2150			 * Between marking a filesystem SHUTDOWN and stopping
2151			 * the log, we do flush all iclogs to disk (if there
2152			 * wasn't a log I/O error). So, we do want things to
2153			 * go smoothly in case of just a SHUTDOWN  w/o a
2154			 * LOG_IO_ERROR.
2155			 */
2156			if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
2157				/*
2158				 * Can only perform callbacks in order.  Since
2159				 * this iclog is not in the DONE_SYNC/
2160				 * DO_CALLBACK state, we skip the rest and
2161				 * just try to clean up.  If we set our iclog
2162				 * to DO_CALLBACK, we will not process it when
2163				 * we retry since a previous iclog is in the
2164				 * CALLBACK and the state cannot change since
2165				 * we are holding the l_icloglock.
2166				 */
2167				if (!(iclog->ic_state &
2168					(XLOG_STATE_DONE_SYNC |
2169						 XLOG_STATE_DO_CALLBACK))) {
2170					if (ciclog && (ciclog->ic_state ==
2171							XLOG_STATE_DONE_SYNC)) {
2172						ciclog->ic_state = XLOG_STATE_DO_CALLBACK;
2173					}
2174					break;
2175				}
2176				/*
2177				 * We now have an iclog that is in either the
2178				 * DO_CALLBACK or DONE_SYNC states. The other
2179				 * states (WANT_SYNC, SYNCING, or CALLBACK were
2180				 * caught by the above if and are going to
2181				 * clean (i.e. we aren't doing their callbacks)
2182				 * see the above if.
2183				 */
2184
2185				/*
2186				 * We will do one more check here to see if we
2187				 * have chased our tail around.
2188				 */
2189
2190				lowest_lsn = xlog_get_lowest_lsn(log);
2191				if (lowest_lsn &&
2192				    XFS_LSN_CMP(lowest_lsn,
2193						be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
2194					iclog = iclog->ic_next;
2195					continue; /* Leave this iclog for
2196						   * another thread */
2197				}
2198
2199				iclog->ic_state = XLOG_STATE_CALLBACK;
2200
2201
2202				/*
2203				 * update the last_sync_lsn before we drop the
2204				 * icloglock to ensure we are the only one that
2205				 * can update it.
2206				 */
2207				ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2208					be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
2209				atomic64_set(&log->l_last_sync_lsn,
2210					be64_to_cpu(iclog->ic_header.h_lsn));
2211
2212			} else
2213				ioerrors++;
2214
2215			spin_unlock(&log->l_icloglock);
2216
2217			/*
2218			 * Keep processing entries in the callback list until
2219			 * we come around and it is empty.  We need to
2220			 * atomically see that the list is empty and change the
2221			 * state to DIRTY so that we don't miss any more
2222			 * callbacks being added.
2223			 */
2224			spin_lock(&iclog->ic_callback_lock);
2225			cb = iclog->ic_callback;
2226			while (cb) {
2227				iclog->ic_callback_tail = &(iclog->ic_callback);
2228				iclog->ic_callback = NULL;
2229				spin_unlock(&iclog->ic_callback_lock);
2230
2231				/* perform callbacks in the order given */
2232				for (; cb; cb = cb_next) {
2233					cb_next = cb->cb_next;
2234					cb->cb_func(cb->cb_arg, aborted);
2235				}
2236				spin_lock(&iclog->ic_callback_lock);
2237				cb = iclog->ic_callback;
2238			}
2239
2240			loopdidcallbacks++;
2241			funcdidcallbacks++;
2242
2243			spin_lock(&log->l_icloglock);
2244			ASSERT(iclog->ic_callback == NULL);
2245			spin_unlock(&iclog->ic_callback_lock);
2246			if (!(iclog->ic_state & XLOG_STATE_IOERROR))
2247				iclog->ic_state = XLOG_STATE_DIRTY;
2248
2249			/*
2250			 * Transition from DIRTY to ACTIVE if applicable.
2251			 * NOP if STATE_IOERROR.
2252			 */
2253			xlog_state_clean_log(log);
2254
2255			/* wake up threads waiting in xfs_log_force() */
2256			wake_up_all(&iclog->ic_force_wait);
2257
2258			iclog = iclog->ic_next;
2259		} while (first_iclog != iclog);
2260
2261		if (repeats > 5000) {
2262			flushcnt += repeats;
2263			repeats = 0;
2264			xfs_warn(log->l_mp,
2265				"%s: possible infinite loop (%d iterations)",
2266				__func__, flushcnt);
2267		}
2268	} while (!ioerrors && loopdidcallbacks);
2269
2270	/*
2271	 * make one last gasp attempt to see if iclogs are being left in
2272	 * limbo..
2273	 */
2274#ifdef DEBUG
2275	if (funcdidcallbacks) {
2276		first_iclog = iclog = log->l_iclog;
2277		do {
2278			ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
2279			/*
2280			 * Terminate the loop if iclogs are found in states
2281			 * which will cause other threads to clean up iclogs.
2282			 *
2283			 * SYNCING - i/o completion will go through logs
2284			 * DONE_SYNC - interrupt thread should be waiting for
2285			 *              l_icloglock
2286			 * IOERROR - give up hope all ye who enter here
2287			 */
2288			if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
2289			    iclog->ic_state == XLOG_STATE_SYNCING ||
2290			    iclog->ic_state == XLOG_STATE_DONE_SYNC ||
2291			    iclog->ic_state == XLOG_STATE_IOERROR )
2292				break;
2293			iclog = iclog->ic_next;
2294		} while (first_iclog != iclog);
2295	}
2296#endif
2297
2298	if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
2299		wake = 1;
2300	spin_unlock(&log->l_icloglock);
2301
2302	if (wake)
2303		wake_up_all(&log->l_flush_wait);
2304}
2305
2306
2307/*
2308 * Finish transitioning this iclog to the dirty state.
2309 *
2310 * Make sure that we completely execute this routine only when this is
2311 * the last call to the iclog.  There is a good chance that iclog flushes,
2312 * when we reach the end of the physical log, get turned into 2 separate
2313 * calls to bwrite.  Hence, one iclog flush could generate two calls to this
2314 * routine.  By using the reference count bwritecnt, we guarantee that only
2315 * the second completion goes through.
2316 *
2317 * Callbacks could take time, so they are done outside the scope of the
2318 * global state machine log lock.
2319 */
2320STATIC void
2321xlog_state_done_syncing(
2322	xlog_in_core_t	*iclog,
2323	int		aborted)
2324{
2325	xlog_t		   *log = iclog->ic_log;
2326
2327	spin_lock(&log->l_icloglock);
2328
2329	ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
2330	       iclog->ic_state == XLOG_STATE_IOERROR);
2331	ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2332	ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2);
2333
2334
2335	/*
2336	 * If we got an error, either on the first buffer, or in the case of
2337	 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
2338	 * and none should ever be attempted to be written to disk
2339	 * again.
2340	 */
2341	if (iclog->ic_state != XLOG_STATE_IOERROR) {
2342		if (--iclog->ic_bwritecnt == 1) {
2343			spin_unlock(&log->l_icloglock);
2344			return;
2345		}
2346		iclog->ic_state = XLOG_STATE_DONE_SYNC;
2347	}
2348
2349	/*
2350	 * Someone could be sleeping prior to writing out the next
2351	 * iclog buffer, we wake them all, one will get to do the
2352	 * I/O, the others get to wait for the result.
2353	 */
2354	wake_up_all(&iclog->ic_write_wait);
2355	spin_unlock(&log->l_icloglock);
2356	xlog_state_do_callback(log, aborted, iclog);	/* also cleans log */
2357}	/* xlog_state_done_syncing */
2358
2359
2360/*
2361 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2362 * sleep.  We wait on the flush queue on the head iclog as that should be
2363 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2364 * we will wait here and all new writes will sleep until a sync completes.
2365 *
2366 * The in-core logs are used in a circular fashion. They are not used
2367 * out-of-order even when an iclog past the head is free.
2368 *
2369 * return:
2370 *	* log_offset where xlog_write() can start writing into the in-core
2371 *		log's data space.
2372 *	* in-core log pointer to which xlog_write() should write.
2373 *	* boolean indicating this is a continued write to an in-core log.
2374 *		If this is the last write, then the in-core log's offset field
2375 *		needs to be incremented, depending on the amount of data which
2376 *		is copied.
2377 */
2378STATIC int
2379xlog_state_get_iclog_space(xlog_t	  *log,
2380			   int		  len,
2381			   xlog_in_core_t **iclogp,
2382			   xlog_ticket_t  *ticket,
2383			   int		  *continued_write,
2384			   int		  *logoffsetp)
2385{
2386	int		  log_offset;
2387	xlog_rec_header_t *head;
2388	xlog_in_core_t	  *iclog;
2389	int		  error;
2390
2391restart:
2392	spin_lock(&log->l_icloglock);
2393	if (XLOG_FORCED_SHUTDOWN(log)) {
2394		spin_unlock(&log->l_icloglock);
2395		return XFS_ERROR(EIO);
2396	}
2397
2398	iclog = log->l_iclog;
2399	if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2400		XFS_STATS_INC(xs_log_noiclogs);
2401
2402		/* Wait for log writes to have flushed */
2403		xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2404		goto restart;
2405	}
2406
2407	head = &iclog->ic_header;
2408
2409	atomic_inc(&iclog->ic_refcnt);	/* prevents sync */
2410	log_offset = iclog->ic_offset;
2411
2412	/* On the 1st write to an iclog, figure out lsn.  This works
2413	 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2414	 * committing to.  If the offset is set, that's how many blocks
2415	 * must be written.
2416	 */
2417	if (log_offset == 0) {
2418		ticket->t_curr_res -= log->l_iclog_hsize;
2419		xlog_tic_add_region(ticket,
2420				    log->l_iclog_hsize,
2421				    XLOG_REG_TYPE_LRHEADER);
2422		head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2423		head->h_lsn = cpu_to_be64(
2424			xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2425		ASSERT(log->l_curr_block >= 0);
2426	}
2427
2428	/* If there is enough room to write everything, then do it.  Otherwise,
2429	 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2430	 * bit is on, so this will get flushed out.  Don't update ic_offset
2431	 * until you know exactly how many bytes get copied.  Therefore, wait
2432	 * until later to update ic_offset.
2433	 *
2434	 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2435	 * can fit into remaining data section.
2436	 */
2437	if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2438		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2439
2440		/*
2441		 * If I'm the only one writing to this iclog, sync it to disk.
2442		 * We need to do an atomic compare and decrement here to avoid
2443		 * racing with concurrent atomic_dec_and_lock() calls in
2444		 * xlog_state_release_iclog() when there is more than one
2445		 * reference to the iclog.
2446		 */
2447		if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) {
2448			/* we are the only one */
2449			spin_unlock(&log->l_icloglock);
2450			error = xlog_state_release_iclog(log, iclog);
2451			if (error)
2452				return error;
2453		} else {
2454			spin_unlock(&log->l_icloglock);
2455		}
2456		goto restart;
2457	}
2458
2459	/* Do we have enough room to write the full amount in the remainder
2460	 * of this iclog?  Or must we continue a write on the next iclog and
2461	 * mark this iclog as completely taken?  In the case where we switch
2462	 * iclogs (to mark it taken), this particular iclog will release/sync
2463	 * to disk in xlog_write().
2464	 */
2465	if (len <= iclog->ic_size - iclog->ic_offset) {
2466		*continued_write = 0;
2467		iclog->ic_offset += len;
2468	} else {
2469		*continued_write = 1;
2470		xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2471	}
2472	*iclogp = iclog;
2473
2474	ASSERT(iclog->ic_offset <= iclog->ic_size);
2475	spin_unlock(&log->l_icloglock);
2476
2477	*logoffsetp = log_offset;
2478	return 0;
2479}	/* xlog_state_get_iclog_space */
2480
2481/*
2482 * Atomically get the log space required for a log ticket.
2483 *
2484 * Once a ticket gets put onto the reserveq, it will only return after
2485 * the needed reservation is satisfied.
2486 *
2487 * This function is structured so that it has a lock free fast path. This is
2488 * necessary because every new transaction reservation will come through this
2489 * path. Hence any lock will be globally hot if we take it unconditionally on
2490 * every pass.
2491 *
2492 * As tickets are only ever moved on and off the reserveq under the
2493 * l_grant_reserve_lock, we only need to take that lock if we are going
2494 * to add the ticket to the queue and sleep. We can avoid taking the lock if the
2495 * ticket was never added to the reserveq because the t_queue list head will be
2496 * empty and we hold the only reference to it so it can safely be checked
2497 * unlocked.
2498 */
2499STATIC int
2500xlog_grant_log_space(xlog_t	   *log,
2501		     xlog_ticket_t *tic)
2502{
2503	int		 free_bytes;
2504	int		 need_bytes;
2505
2506#ifdef DEBUG
2507	if (log->l_flags & XLOG_ACTIVE_RECOVERY)
2508		panic("grant Recovery problem");
2509#endif
2510
2511	trace_xfs_log_grant_enter(log, tic);
2512
2513	need_bytes = tic->t_unit_res;
2514	if (tic->t_flags & XFS_LOG_PERM_RESERV)
2515		need_bytes *= tic->t_ocnt;
2516
2517	/* something is already sleeping; insert new transaction at end */
2518	if (!list_empty_careful(&log->l_reserveq)) {
2519		spin_lock(&log->l_grant_reserve_lock);
2520		/* recheck the queue now we are locked */
2521		if (list_empty(&log->l_reserveq)) {
2522			spin_unlock(&log->l_grant_reserve_lock);
2523			goto redo;
2524		}
2525		list_add_tail(&tic->t_queue, &log->l_reserveq);
2526
2527		trace_xfs_log_grant_sleep1(log, tic);
2528
2529		/*
2530		 * Gotta check this before going to sleep, while we're
2531		 * holding the grant lock.
2532		 */
2533		if (XLOG_FORCED_SHUTDOWN(log))
2534			goto error_return;
2535
2536		XFS_STATS_INC(xs_sleep_logspace);
2537		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
2538
2539		/*
2540		 * If we got an error, and the filesystem is shutting down,
2541		 * we'll catch it down below. So just continue...
2542		 */
2543		trace_xfs_log_grant_wake1(log, tic);
2544	}
2545
2546redo:
2547	if (XLOG_FORCED_SHUTDOWN(log))
2548		goto error_return_unlocked;
2549
2550	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
2551	if (free_bytes < need_bytes) {
2552		spin_lock(&log->l_grant_reserve_lock);
2553		if (list_empty(&tic->t_queue))
2554			list_add_tail(&tic->t_queue, &log->l_reserveq);
2555
2556		trace_xfs_log_grant_sleep2(log, tic);
2557
2558		if (XLOG_FORCED_SHUTDOWN(log))
2559			goto error_return;
2560
2561		xlog_grant_push_ail(log, need_bytes);
2562
2563		XFS_STATS_INC(xs_sleep_logspace);
2564		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
2565
2566		trace_xfs_log_grant_wake2(log, tic);
2567		goto redo;
2568	}
2569
2570	if (!list_empty(&tic->t_queue)) {
2571		spin_lock(&log->l_grant_reserve_lock);
2572		list_del_init(&tic->t_queue);
2573		spin_unlock(&log->l_grant_reserve_lock);
2574	}
2575
2576	/* we've got enough space */
2577	xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
2578	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
2579	trace_xfs_log_grant_exit(log, tic);
2580	xlog_verify_grant_tail(log);
2581	return 0;
2582
2583error_return_unlocked:
2584	spin_lock(&log->l_grant_reserve_lock);
2585error_return:
2586	list_del_init(&tic->t_queue);
2587	spin_unlock(&log->l_grant_reserve_lock);
2588	trace_xfs_log_grant_error(log, tic);
2589
2590	/*
2591	 * If we are failing, make sure the ticket doesn't have any
2592	 * current reservations. We don't want to add this back when
2593	 * the ticket/transaction gets cancelled.
2594	 */
2595	tic->t_curr_res = 0;
2596	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
2597	return XFS_ERROR(EIO);
2598}	/* xlog_grant_log_space */
2599
2600
2601/*
2602 * Replenish the byte reservation required by moving the grant write head.
2603 *
2604 * Similar to xlog_grant_log_space, the function is structured to have a lock
2605 * free fast path.
2606 */
2607STATIC int
2608xlog_regrant_write_log_space(xlog_t	   *log,
2609			     xlog_ticket_t *tic)
2610{
2611	int		free_bytes, need_bytes;
2612
2613	tic->t_curr_res = tic->t_unit_res;
2614	xlog_tic_reset_res(tic);
2615
2616	if (tic->t_cnt > 0)
2617		return 0;
2618
2619#ifdef DEBUG
2620	if (log->l_flags & XLOG_ACTIVE_RECOVERY)
2621		panic("regrant Recovery problem");
2622#endif
2623
2624	trace_xfs_log_regrant_write_enter(log, tic);
2625	if (XLOG_FORCED_SHUTDOWN(log))
2626		goto error_return_unlocked;
2627
2628	/* If there are other waiters on the queue then give them a
2629	 * chance at logspace before us. Wake up the first waiters,
2630	 * if we do not wake up all the waiters then go to sleep waiting
2631	 * for more free space, otherwise try to get some space for
2632	 * this transaction.
2633	 */
2634	need_bytes = tic->t_unit_res;
2635	if (!list_empty_careful(&log->l_writeq)) {
2636		struct xlog_ticket *ntic;
2637
2638		spin_lock(&log->l_grant_write_lock);
2639		free_bytes = xlog_space_left(log, &log->l_grant_write_head);
2640		list_for_each_entry(ntic, &log->l_writeq, t_queue) {
2641			ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
2642
2643			if (free_bytes < ntic->t_unit_res)
2644				break;
2645			free_bytes -= ntic->t_unit_res;
2646			wake_up(&ntic->t_wait);
2647		}
2648
2649		if (ntic != list_first_entry(&log->l_writeq,
2650						struct xlog_ticket, t_queue)) {
2651			if (list_empty(&tic->t_queue))
2652				list_add_tail(&tic->t_queue, &log->l_writeq);
2653			trace_xfs_log_regrant_write_sleep1(log, tic);
2654
2655			xlog_grant_push_ail(log, need_bytes);
2656
2657			XFS_STATS_INC(xs_sleep_logspace);
2658			xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
2659			trace_xfs_log_regrant_write_wake1(log, tic);
2660		} else
2661			spin_unlock(&log->l_grant_write_lock);
2662	}
2663
2664redo:
2665	if (XLOG_FORCED_SHUTDOWN(log))
2666		goto error_return_unlocked;
2667
2668	free_bytes = xlog_space_left(log, &log->l_grant_write_head);
2669	if (free_bytes < need_bytes) {
2670		spin_lock(&log->l_grant_write_lock);
2671		if (list_empty(&tic->t_queue))
2672			list_add_tail(&tic->t_queue, &log->l_writeq);
2673
2674		if (XLOG_FORCED_SHUTDOWN(log))
2675			goto error_return;
2676
2677		xlog_grant_push_ail(log, need_bytes);
2678
2679		XFS_STATS_INC(xs_sleep_logspace);
2680		trace_xfs_log_regrant_write_sleep2(log, tic);
2681		xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
2682
2683		trace_xfs_log_regrant_write_wake2(log, tic);
2684		goto redo;
2685	}
2686
2687	if (!list_empty(&tic->t_queue)) {
2688		spin_lock(&log->l_grant_write_lock);
2689		list_del_init(&tic->t_queue);
2690		spin_unlock(&log->l_grant_write_lock);
2691	}
2692
2693	/* we've got enough space */
2694	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
2695	trace_xfs_log_regrant_write_exit(log, tic);
2696	xlog_verify_grant_tail(log);
2697	return 0;
2698
2699
2700 error_return_unlocked:
2701	spin_lock(&log->l_grant_write_lock);
2702 error_return:
2703	list_del_init(&tic->t_queue);
2704	spin_unlock(&log->l_grant_write_lock);
2705	trace_xfs_log_regrant_write_error(log, tic);
2706
2707	/*
2708	 * If we are failing, make sure the ticket doesn't have any
2709	 * current reservations. We don't want to add this back when
2710	 * the ticket/transaction gets cancelled.
2711	 */
2712	tic->t_curr_res = 0;
2713	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
2714	return XFS_ERROR(EIO);
2715}	/* xlog_regrant_write_log_space */
2716
2717
2718/* The first cnt-1 times through here we don't need to
2719 * move the grant write head because the permanent
2720 * reservation has reserved cnt times the unit amount.
2721 * Release part of current permanent unit reservation and
2722 * reset current reservation to be one units worth.  Also
2723 * move grant reservation head forward.
2724 */
2725STATIC void
2726xlog_regrant_reserve_log_space(xlog_t	     *log,
2727			       xlog_ticket_t *ticket)
2728{
2729	trace_xfs_log_regrant_reserve_enter(log, ticket);
2730
2731	if (ticket->t_cnt > 0)
2732		ticket->t_cnt--;
2733
2734	xlog_grant_sub_space(log, &log->l_grant_reserve_head,
2735					ticket->t_curr_res);
2736	xlog_grant_sub_space(log, &log->l_grant_write_head,
2737					ticket->t_curr_res);
2738	ticket->t_curr_res = ticket->t_unit_res;
2739	xlog_tic_reset_res(ticket);
2740
2741	trace_xfs_log_regrant_reserve_sub(log, ticket);
2742
2743	/* just return if we still have some of the pre-reserved space */
2744	if (ticket->t_cnt > 0)
2745		return;
2746
2747	xlog_grant_add_space(log, &log->l_grant_reserve_head,
2748					ticket->t_unit_res);
2749
2750	trace_xfs_log_regrant_reserve_exit(log, ticket);
2751
2752	ticket->t_curr_res = ticket->t_unit_res;
2753	xlog_tic_reset_res(ticket);
2754}	/* xlog_regrant_reserve_log_space */
2755
2756
2757/*
2758 * Give back the space left from a reservation.
2759 *
2760 * All the information we need to make a correct determination of space left
2761 * is present.  For non-permanent reservations, things are quite easy.  The
2762 * count should have been decremented to zero.  We only need to deal with the
2763 * space remaining in the current reservation part of the ticket.  If the
2764 * ticket contains a permanent reservation, there may be left over space which
2765 * needs to be released.  A count of N means that N-1 refills of the current
2766 * reservation can be done before we need to ask for more space.  The first
2767 * one goes to fill up the first current reservation.  Once we run out of
2768 * space, the count will stay at zero and the only space remaining will be
2769 * in the current reservation field.
2770 */
2771STATIC void
2772xlog_ungrant_log_space(xlog_t	     *log,
2773		       xlog_ticket_t *ticket)
2774{
2775	int	bytes;
2776
2777	if (ticket->t_cnt > 0)
2778		ticket->t_cnt--;
2779
2780	trace_xfs_log_ungrant_enter(log, ticket);
2781	trace_xfs_log_ungrant_sub(log, ticket);
2782
2783	/*
2784	 * If this is a permanent reservation ticket, we may be able to free
2785	 * up more space based on the remaining count.
2786	 */
2787	bytes = ticket->t_curr_res;
2788	if (ticket->t_cnt > 0) {
2789		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2790		bytes += ticket->t_unit_res*ticket->t_cnt;
2791	}
2792
2793	xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes);
2794	xlog_grant_sub_space(log, &log->l_grant_write_head, bytes);
2795
2796	trace_xfs_log_ungrant_exit(log, ticket);
2797
2798	xfs_log_move_tail(log->l_mp, 1);
2799}	/* xlog_ungrant_log_space */
2800
2801
2802/*
2803 * Flush iclog to disk if this is the last reference to the given iclog and
2804 * the WANT_SYNC bit is set.
2805 *
2806 * When this function is entered, the iclog is not necessarily in the
2807 * WANT_SYNC state.  It may be sitting around waiting to get filled.
2808 *
2809 *
2810 */
2811STATIC int
2812xlog_state_release_iclog(
2813	xlog_t		*log,
2814	xlog_in_core_t	*iclog)
2815{
2816	int		sync = 0;	/* do we sync? */
2817
2818	if (iclog->ic_state & XLOG_STATE_IOERROR)
2819		return XFS_ERROR(EIO);
2820
2821	ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
2822	if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
2823		return 0;
2824
2825	if (iclog->ic_state & XLOG_STATE_IOERROR) {
2826		spin_unlock(&log->l_icloglock);
2827		return XFS_ERROR(EIO);
2828	}
2829	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
2830	       iclog->ic_state == XLOG_STATE_WANT_SYNC);
2831
2832	if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
2833		/* update tail before writing to iclog */
2834		xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
2835		sync++;
2836		iclog->ic_state = XLOG_STATE_SYNCING;
2837		iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
2838		xlog_verify_tail_lsn(log, iclog, tail_lsn);
2839		/* cycle incremented when incrementing curr_block */
2840	}
2841	spin_unlock(&log->l_icloglock);
2842
2843	/*
2844	 * We let the log lock go, so it's possible that we hit a log I/O
2845	 * error or some other SHUTDOWN condition that marks the iclog
2846	 * as XLOG_STATE_IOERROR before the bwrite. However, we know that
2847	 * this iclog has consistent data, so we ignore IOERROR
2848	 * flags after this point.
2849	 */
2850	if (sync)
2851		return xlog_sync(log, iclog);
2852	return 0;
2853}	/* xlog_state_release_iclog */
2854
2855
2856/*
2857 * This routine will mark the current iclog in the ring as WANT_SYNC
2858 * and move the current iclog pointer to the next iclog in the ring.
2859 * When this routine is called from xlog_state_get_iclog_space(), the
2860 * exact size of the iclog has not yet been determined.  All we know is
2861 * that every data block.  We have run out of space in this log record.
2862 */
2863STATIC void
2864xlog_state_switch_iclogs(xlog_t		*log,
2865			 xlog_in_core_t *iclog,
2866			 int		eventual_size)
2867{
2868	ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
2869	if (!eventual_size)
2870		eventual_size = iclog->ic_offset;
2871	iclog->ic_state = XLOG_STATE_WANT_SYNC;
2872	iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
2873	log->l_prev_block = log->l_curr_block;
2874	log->l_prev_cycle = log->l_curr_cycle;
2875
2876	/* roll log?: ic_offset changed later */
2877	log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
2878
2879	/* Round up to next log-sunit */
2880	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
2881	    log->l_mp->m_sb.sb_logsunit > 1) {
2882		__uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
2883		log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
2884	}
2885
2886	if (log->l_curr_block >= log->l_logBBsize) {
2887		log->l_curr_cycle++;
2888		if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
2889			log->l_curr_cycle++;
2890		log->l_curr_block -= log->l_logBBsize;
2891		ASSERT(log->l_curr_block >= 0);
2892	}
2893	ASSERT(iclog == log->l_iclog);
2894	log->l_iclog = iclog->ic_next;
2895}	/* xlog_state_switch_iclogs */
2896
2897/*
2898 * Write out all data in the in-core log as of this exact moment in time.
2899 *
2900 * Data may be written to the in-core log during this call.  However,
2901 * we don't guarantee this data will be written out.  A change from past
2902 * implementation means this routine will *not* write out zero length LRs.
2903 *
2904 * Basically, we try and perform an intelligent scan of the in-core logs.
2905 * If we determine there is no flushable data, we just return.  There is no
2906 * flushable data if:
2907 *
2908 *	1. the current iclog is active and has no data; the previous iclog
2909 *		is in the active or dirty state.
2910 *	2. the current iclog is drity, and the previous iclog is in the
2911 *		active or dirty state.
2912 *
2913 * We may sleep if:
2914 *
2915 *	1. the current iclog is not in the active nor dirty state.
2916 *	2. the current iclog dirty, and the previous iclog is not in the
2917 *		active nor dirty state.
2918 *	3. the current iclog is active, and there is another thread writing
2919 *		to this particular iclog.
2920 *	4. a) the current iclog is active and has no other writers
2921 *	   b) when we return from flushing out this iclog, it is still
2922 *		not in the active nor dirty state.
2923 */
2924int
2925_xfs_log_force(
2926	struct xfs_mount	*mp,
2927	uint			flags,
2928	int			*log_flushed)
2929{
2930	struct log		*log = mp->m_log;
2931	struct xlog_in_core	*iclog;
2932	xfs_lsn_t		lsn;
2933
2934	XFS_STATS_INC(xs_log_force);
2935
2936	if (log->l_cilp)
2937		xlog_cil_force(log);
2938
2939	spin_lock(&log->l_icloglock);
2940
2941	iclog = log->l_iclog;
2942	if (iclog->ic_state & XLOG_STATE_IOERROR) {
2943		spin_unlock(&log->l_icloglock);
2944		return XFS_ERROR(EIO);
2945	}
2946
2947	/* If the head iclog is not active nor dirty, we just attach
2948	 * ourselves to the head and go to sleep.
2949	 */
2950	if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2951	    iclog->ic_state == XLOG_STATE_DIRTY) {
2952		/*
2953		 * If the head is dirty or (active and empty), then
2954		 * we need to look at the previous iclog.  If the previous
2955		 * iclog is active or dirty we are done.  There is nothing
2956		 * to sync out.  Otherwise, we attach ourselves to the
2957		 * previous iclog and go to sleep.
2958		 */
2959		if (iclog->ic_state == XLOG_STATE_DIRTY ||
2960		    (atomic_read(&iclog->ic_refcnt) == 0
2961		     && iclog->ic_offset == 0)) {
2962			iclog = iclog->ic_prev;
2963			if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2964			    iclog->ic_state == XLOG_STATE_DIRTY)
2965				goto no_sleep;
2966			else
2967				goto maybe_sleep;
2968		} else {
2969			if (atomic_read(&iclog->ic_refcnt) == 0) {
2970				/* We are the only one with access to this
2971				 * iclog.  Flush it out now.  There should
2972				 * be a roundoff of zero to show that someone
2973				 * has already taken care of the roundoff from
2974				 * the previous sync.
2975				 */
2976				atomic_inc(&iclog->ic_refcnt);
2977				lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2978				xlog_state_switch_iclogs(log, iclog, 0);
2979				spin_unlock(&log->l_icloglock);
2980
2981				if (xlog_state_release_iclog(log, iclog))
2982					return XFS_ERROR(EIO);
2983
2984				if (log_flushed)
2985					*log_flushed = 1;
2986				spin_lock(&log->l_icloglock);
2987				if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
2988				    iclog->ic_state != XLOG_STATE_DIRTY)
2989					goto maybe_sleep;
2990				else
2991					goto no_sleep;
2992			} else {
2993				/* Someone else is writing to this iclog.
2994				 * Use its call to flush out the data.  However,
2995				 * the other thread may not force out this LR,
2996				 * so we mark it WANT_SYNC.
2997				 */
2998				xlog_state_switch_iclogs(log, iclog, 0);
2999				goto maybe_sleep;
3000			}
3001		}
3002	}
3003
3004	/* By the time we come around again, the iclog could've been filled
3005	 * which would give it another lsn.  If we have a new lsn, just
3006	 * return because the relevant data has been flushed.
3007	 */
3008maybe_sleep:
3009	if (flags & XFS_LOG_SYNC) {
3010		/*
3011		 * We must check if we're shutting down here, before
3012		 * we wait, while we're holding the l_icloglock.
3013		 * Then we check again after waking up, in case our
3014		 * sleep was disturbed by a bad news.
3015		 */
3016		if (iclog->ic_state & XLOG_STATE_IOERROR) {
3017			spin_unlock(&log->l_icloglock);
3018			return XFS_ERROR(EIO);
3019		}
3020		XFS_STATS_INC(xs_log_force_sleep);
3021		xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3022		/*
3023		 * No need to grab the log lock here since we're
3024		 * only deciding whether or not to return EIO
3025		 * and the memory read should be atomic.
3026		 */
3027		if (iclog->ic_state & XLOG_STATE_IOERROR)
3028			return XFS_ERROR(EIO);
3029		if (log_flushed)
3030			*log_flushed = 1;
3031	} else {
3032
3033no_sleep:
3034		spin_unlock(&log->l_icloglock);
3035	}
3036	return 0;
3037}
3038
3039/*
3040 * Wrapper for _xfs_log_force(), to be used when caller doesn't care
3041 * about errors or whether the log was flushed or not. This is the normal
3042 * interface to use when trying to unpin items or move the log forward.
3043 */
3044void
3045xfs_log_force(
3046	xfs_mount_t	*mp,
3047	uint		flags)
3048{
3049	int	error;
3050
 
3051	error = _xfs_log_force(mp, flags, NULL);
3052	if (error)
3053		xfs_warn(mp, "%s: error %d returned.", __func__, error);
3054}
3055
3056/*
3057 * Force the in-core log to disk for a specific LSN.
3058 *
3059 * Find in-core log with lsn.
3060 *	If it is in the DIRTY state, just return.
3061 *	If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3062 *		state and go to sleep or return.
3063 *	If it is in any other state, go to sleep or return.
3064 *
3065 * Synchronous forces are implemented with a signal variable. All callers
3066 * to force a given lsn to disk will wait on a the sv attached to the
3067 * specific in-core log.  When given in-core log finally completes its
3068 * write to disk, that thread will wake up all threads waiting on the
3069 * sv.
3070 */
3071int
3072_xfs_log_force_lsn(
3073	struct xfs_mount	*mp,
3074	xfs_lsn_t		lsn,
3075	uint			flags,
3076	int			*log_flushed)
3077{
3078	struct log		*log = mp->m_log;
3079	struct xlog_in_core	*iclog;
3080	int			already_slept = 0;
3081
3082	ASSERT(lsn != 0);
3083
3084	XFS_STATS_INC(xs_log_force);
3085
3086	if (log->l_cilp) {
3087		lsn = xlog_cil_force_lsn(log, lsn);
3088		if (lsn == NULLCOMMITLSN)
3089			return 0;
3090	}
3091
3092try_again:
3093	spin_lock(&log->l_icloglock);
3094	iclog = log->l_iclog;
3095	if (iclog->ic_state & XLOG_STATE_IOERROR) {
3096		spin_unlock(&log->l_icloglock);
3097		return XFS_ERROR(EIO);
3098	}
3099
3100	do {
3101		if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3102			iclog = iclog->ic_next;
3103			continue;
3104		}
3105
3106		if (iclog->ic_state == XLOG_STATE_DIRTY) {
3107			spin_unlock(&log->l_icloglock);
3108			return 0;
3109		}
3110
3111		if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3112			/*
3113			 * We sleep here if we haven't already slept (e.g.
3114			 * this is the first time we've looked at the correct
3115			 * iclog buf) and the buffer before us is going to
3116			 * be sync'ed. The reason for this is that if we
3117			 * are doing sync transactions here, by waiting for
3118			 * the previous I/O to complete, we can allow a few
3119			 * more transactions into this iclog before we close
3120			 * it down.
3121			 *
3122			 * Otherwise, we mark the buffer WANT_SYNC, and bump
3123			 * up the refcnt so we can release the log (which
3124			 * drops the ref count).  The state switch keeps new
3125			 * transaction commits from using this buffer.  When
3126			 * the current commits finish writing into the buffer,
3127			 * the refcount will drop to zero and the buffer will
3128			 * go out then.
3129			 */
3130			if (!already_slept &&
3131			    (iclog->ic_prev->ic_state &
3132			     (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
3133				ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3134
3135				XFS_STATS_INC(xs_log_force_sleep);
3136
3137				xlog_wait(&iclog->ic_prev->ic_write_wait,
3138							&log->l_icloglock);
3139				if (log_flushed)
3140					*log_flushed = 1;
3141				already_slept = 1;
3142				goto try_again;
3143			}
3144			atomic_inc(&iclog->ic_refcnt);
3145			xlog_state_switch_iclogs(log, iclog, 0);
3146			spin_unlock(&log->l_icloglock);
3147			if (xlog_state_release_iclog(log, iclog))
3148				return XFS_ERROR(EIO);
3149			if (log_flushed)
3150				*log_flushed = 1;
3151			spin_lock(&log->l_icloglock);
3152		}
3153
3154		if ((flags & XFS_LOG_SYNC) && /* sleep */
3155		    !(iclog->ic_state &
3156		      (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3157			/*
3158			 * Don't wait on completion if we know that we've
3159			 * gotten a log write error.
3160			 */
3161			if (iclog->ic_state & XLOG_STATE_IOERROR) {
3162				spin_unlock(&log->l_icloglock);
3163				return XFS_ERROR(EIO);
3164			}
3165			XFS_STATS_INC(xs_log_force_sleep);
3166			xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3167			/*
3168			 * No need to grab the log lock here since we're
3169			 * only deciding whether or not to return EIO
3170			 * and the memory read should be atomic.
3171			 */
3172			if (iclog->ic_state & XLOG_STATE_IOERROR)
3173				return XFS_ERROR(EIO);
3174
3175			if (log_flushed)
3176				*log_flushed = 1;
3177		} else {		/* just return */
3178			spin_unlock(&log->l_icloglock);
3179		}
3180
3181		return 0;
3182	} while (iclog != log->l_iclog);
3183
3184	spin_unlock(&log->l_icloglock);
3185	return 0;
3186}
3187
3188/*
3189 * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care
3190 * about errors or whether the log was flushed or not. This is the normal
3191 * interface to use when trying to unpin items or move the log forward.
3192 */
3193void
3194xfs_log_force_lsn(
3195	xfs_mount_t	*mp,
3196	xfs_lsn_t	lsn,
3197	uint		flags)
3198{
3199	int	error;
3200
 
3201	error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
3202	if (error)
3203		xfs_warn(mp, "%s: error %d returned.", __func__, error);
3204}
3205
3206/*
3207 * Called when we want to mark the current iclog as being ready to sync to
3208 * disk.
3209 */
3210STATIC void
3211xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
3212{
3213	assert_spin_locked(&log->l_icloglock);
3214
3215	if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3216		xlog_state_switch_iclogs(log, iclog, 0);
3217	} else {
3218		ASSERT(iclog->ic_state &
3219			(XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
3220	}
3221}
3222
3223
3224/*****************************************************************************
3225 *
3226 *		TICKET functions
3227 *
3228 *****************************************************************************
3229 */
3230
3231/*
3232 * Free a used ticket when its refcount falls to zero.
3233 */
3234void
3235xfs_log_ticket_put(
3236	xlog_ticket_t	*ticket)
3237{
3238	ASSERT(atomic_read(&ticket->t_ref) > 0);
3239	if (atomic_dec_and_test(&ticket->t_ref))
3240		kmem_zone_free(xfs_log_ticket_zone, ticket);
3241}
3242
3243xlog_ticket_t *
3244xfs_log_ticket_get(
3245	xlog_ticket_t	*ticket)
3246{
3247	ASSERT(atomic_read(&ticket->t_ref) > 0);
3248	atomic_inc(&ticket->t_ref);
3249	return ticket;
3250}
3251
3252/*
3253 * Allocate and initialise a new log ticket.
3254 */
3255xlog_ticket_t *
3256xlog_ticket_alloc(
3257	struct log	*log,
3258	int		unit_bytes,
3259	int		cnt,
3260	char		client,
3261	uint		xflags,
3262	int		alloc_flags)
3263{
3264	struct xlog_ticket *tic;
3265	uint		num_headers;
3266	int		iclog_space;
3267
3268	tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags);
3269	if (!tic)
3270		return NULL;
3271
3272	/*
3273	 * Permanent reservations have up to 'cnt'-1 active log operations
3274	 * in the log.  A unit in this case is the amount of space for one
3275	 * of these log operations.  Normal reservations have a cnt of 1
3276	 * and their unit amount is the total amount of space required.
3277	 *
3278	 * The following lines of code account for non-transaction data
3279	 * which occupy space in the on-disk log.
3280	 *
3281	 * Normal form of a transaction is:
3282	 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3283	 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3284	 *
3285	 * We need to account for all the leadup data and trailer data
3286	 * around the transaction data.
3287	 * And then we need to account for the worst case in terms of using
3288	 * more space.
3289	 * The worst case will happen if:
3290	 * - the placement of the transaction happens to be such that the
3291	 *   roundoff is at its maximum
3292	 * - the transaction data is synced before the commit record is synced
3293	 *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3294	 *   Therefore the commit record is in its own Log Record.
3295	 *   This can happen as the commit record is called with its
3296	 *   own region to xlog_write().
3297	 *   This then means that in the worst case, roundoff can happen for
3298	 *   the commit-rec as well.
3299	 *   The commit-rec is smaller than padding in this scenario and so it is
3300	 *   not added separately.
3301	 */
3302
3303	/* for trans header */
3304	unit_bytes += sizeof(xlog_op_header_t);
3305	unit_bytes += sizeof(xfs_trans_header_t);
3306
3307	/* for start-rec */
3308	unit_bytes += sizeof(xlog_op_header_t);
3309
3310	/*
3311	 * for LR headers - the space for data in an iclog is the size minus
3312	 * the space used for the headers. If we use the iclog size, then we
3313	 * undercalculate the number of headers required.
3314	 *
3315	 * Furthermore - the addition of op headers for split-recs might
3316	 * increase the space required enough to require more log and op
3317	 * headers, so take that into account too.
3318	 *
3319	 * IMPORTANT: This reservation makes the assumption that if this
3320	 * transaction is the first in an iclog and hence has the LR headers
3321	 * accounted to it, then the remaining space in the iclog is
3322	 * exclusively for this transaction.  i.e. if the transaction is larger
3323	 * than the iclog, it will be the only thing in that iclog.
3324	 * Fundamentally, this means we must pass the entire log vector to
3325	 * xlog_write to guarantee this.
3326	 */
3327	iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3328	num_headers = howmany(unit_bytes, iclog_space);
3329
3330	/* for split-recs - ophdrs added when data split over LRs */
3331	unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3332
3333	/* add extra header reservations if we overrun */
3334	while (!num_headers ||
3335	       howmany(unit_bytes, iclog_space) > num_headers) {
3336		unit_bytes += sizeof(xlog_op_header_t);
3337		num_headers++;
3338	}
3339	unit_bytes += log->l_iclog_hsize * num_headers;
3340
3341	/* for commit-rec LR header - note: padding will subsume the ophdr */
3342	unit_bytes += log->l_iclog_hsize;
3343
3344	/* for roundoff padding for transaction data and one for commit record */
3345	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
3346	    log->l_mp->m_sb.sb_logsunit > 1) {
3347		/* log su roundoff */
3348		unit_bytes += 2*log->l_mp->m_sb.sb_logsunit;
3349	} else {
3350		/* BB roundoff */
3351		unit_bytes += 2*BBSIZE;
3352        }
3353
3354	atomic_set(&tic->t_ref, 1);
 
3355	INIT_LIST_HEAD(&tic->t_queue);
3356	tic->t_unit_res		= unit_bytes;
3357	tic->t_curr_res		= unit_bytes;
3358	tic->t_cnt		= cnt;
3359	tic->t_ocnt		= cnt;
3360	tic->t_tid		= random32();
3361	tic->t_clientid		= client;
3362	tic->t_flags		= XLOG_TIC_INITED;
3363	tic->t_trans_type	= 0;
3364	if (xflags & XFS_LOG_PERM_RESERV)
3365		tic->t_flags |= XLOG_TIC_PERM_RESERV;
3366	init_waitqueue_head(&tic->t_wait);
3367
3368	xlog_tic_reset_res(tic);
3369
3370	return tic;
3371}
3372
3373
3374/******************************************************************************
3375 *
3376 *		Log debug routines
3377 *
3378 ******************************************************************************
3379 */
3380#if defined(DEBUG)
3381/*
3382 * Make sure that the destination ptr is within the valid data region of
3383 * one of the iclogs.  This uses backup pointers stored in a different
3384 * part of the log in case we trash the log structure.
3385 */
3386void
3387xlog_verify_dest_ptr(
3388	struct log	*log,
3389	char		*ptr)
3390{
3391	int i;
3392	int good_ptr = 0;
3393
3394	for (i = 0; i < log->l_iclog_bufs; i++) {
3395		if (ptr >= log->l_iclog_bak[i] &&
3396		    ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
3397			good_ptr++;
3398	}
3399
3400	if (!good_ptr)
3401		xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3402}
3403
3404/*
3405 * Check to make sure the grant write head didn't just over lap the tail.  If
3406 * the cycles are the same, we can't be overlapping.  Otherwise, make sure that
3407 * the cycles differ by exactly one and check the byte count.
3408 *
3409 * This check is run unlocked, so can give false positives. Rather than assert
3410 * on failures, use a warn-once flag and a panic tag to allow the admin to
3411 * determine if they want to panic the machine when such an error occurs. For
3412 * debug kernels this will have the same effect as using an assert but, unlinke
3413 * an assert, it can be turned off at runtime.
3414 */
3415STATIC void
3416xlog_verify_grant_tail(
3417	struct log	*log)
3418{
3419	int		tail_cycle, tail_blocks;
3420	int		cycle, space;
3421
3422	xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
3423	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3424	if (tail_cycle != cycle) {
3425		if (cycle - 1 != tail_cycle &&
3426		    !(log->l_flags & XLOG_TAIL_WARN)) {
3427			xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3428				"%s: cycle - 1 != tail_cycle", __func__);
3429			log->l_flags |= XLOG_TAIL_WARN;
3430		}
3431
3432		if (space > BBTOB(tail_blocks) &&
3433		    !(log->l_flags & XLOG_TAIL_WARN)) {
3434			xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3435				"%s: space > BBTOB(tail_blocks)", __func__);
3436			log->l_flags |= XLOG_TAIL_WARN;
3437		}
3438	}
3439}
3440
3441/* check if it will fit */
3442STATIC void
3443xlog_verify_tail_lsn(xlog_t	    *log,
3444		     xlog_in_core_t *iclog,
3445		     xfs_lsn_t	    tail_lsn)
3446{
3447    int blocks;
3448
3449    if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3450	blocks =
3451	    log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3452	if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3453		xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3454    } else {
3455	ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3456
3457	if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3458		xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3459
3460	blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3461	if (blocks < BTOBB(iclog->ic_offset) + 1)
3462		xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3463    }
3464}	/* xlog_verify_tail_lsn */
3465
3466/*
3467 * Perform a number of checks on the iclog before writing to disk.
3468 *
3469 * 1. Make sure the iclogs are still circular
3470 * 2. Make sure we have a good magic number
3471 * 3. Make sure we don't have magic numbers in the data
3472 * 4. Check fields of each log operation header for:
3473 *	A. Valid client identifier
3474 *	B. tid ptr value falls in valid ptr space (user space code)
3475 *	C. Length in log record header is correct according to the
3476 *		individual operation headers within record.
3477 * 5. When a bwrite will occur within 5 blocks of the front of the physical
3478 *	log, check the preceding blocks of the physical log to make sure all
3479 *	the cycle numbers agree with the current cycle number.
3480 */
3481STATIC void
3482xlog_verify_iclog(xlog_t	 *log,
3483		  xlog_in_core_t *iclog,
3484		  int		 count,
3485		  boolean_t	 syncing)
3486{
3487	xlog_op_header_t	*ophead;
3488	xlog_in_core_t		*icptr;
3489	xlog_in_core_2_t	*xhdr;
3490	xfs_caddr_t		ptr;
3491	xfs_caddr_t		base_ptr;
3492	__psint_t		field_offset;
3493	__uint8_t		clientid;
3494	int			len, i, j, k, op_len;
3495	int			idx;
3496
3497	/* check validity of iclog pointers */
3498	spin_lock(&log->l_icloglock);
3499	icptr = log->l_iclog;
3500	for (i=0; i < log->l_iclog_bufs; i++) {
3501		if (icptr == NULL)
3502			xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3503		icptr = icptr->ic_next;
3504	}
3505	if (icptr != log->l_iclog)
3506		xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3507	spin_unlock(&log->l_icloglock);
3508
3509	/* check log magic numbers */
3510	if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3511		xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3512
3513	ptr = (xfs_caddr_t) &iclog->ic_header;
3514	for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
3515	     ptr += BBSIZE) {
3516		if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3517			xfs_emerg(log->l_mp, "%s: unexpected magic num",
3518				__func__);
3519	}
3520
3521	/* check fields */
3522	len = be32_to_cpu(iclog->ic_header.h_num_logops);
3523	ptr = iclog->ic_datap;
3524	base_ptr = ptr;
3525	ophead = (xlog_op_header_t *)ptr;
3526	xhdr = iclog->ic_data;
3527	for (i = 0; i < len; i++) {
3528		ophead = (xlog_op_header_t *)ptr;
3529
3530		/* clientid is only 1 byte */
3531		field_offset = (__psint_t)
3532			       ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr);
3533		if (syncing == B_FALSE || (field_offset & 0x1ff)) {
3534			clientid = ophead->oh_clientid;
3535		} else {
3536			idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap);
3537			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3538				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3539				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3540				clientid = xlog_get_client_id(
3541					xhdr[j].hic_xheader.xh_cycle_data[k]);
3542			} else {
3543				clientid = xlog_get_client_id(
3544					iclog->ic_header.h_cycle_data[idx]);
3545			}
3546		}
3547		if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
3548			xfs_warn(log->l_mp,
3549				"%s: invalid clientid %d op 0x%p offset 0x%lx",
3550				__func__, clientid, ophead,
3551				(unsigned long)field_offset);
3552
3553		/* check length */
3554		field_offset = (__psint_t)
3555			       ((xfs_caddr_t)&(ophead->oh_len) - base_ptr);
3556		if (syncing == B_FALSE || (field_offset & 0x1ff)) {
3557			op_len = be32_to_cpu(ophead->oh_len);
3558		} else {
3559			idx = BTOBBT((__psint_t)&ophead->oh_len -
3560				    (__psint_t)iclog->ic_datap);
3561			if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3562				j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3563				k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3564				op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3565			} else {
3566				op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3567			}
3568		}
3569		ptr += sizeof(xlog_op_header_t) + op_len;
3570	}
3571}	/* xlog_verify_iclog */
3572#endif
3573
3574/*
3575 * Mark all iclogs IOERROR. l_icloglock is held by the caller.
3576 */
3577STATIC int
3578xlog_state_ioerror(
3579	xlog_t	*log)
3580{
3581	xlog_in_core_t	*iclog, *ic;
3582
3583	iclog = log->l_iclog;
3584	if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
3585		/*
3586		 * Mark all the incore logs IOERROR.
3587		 * From now on, no log flushes will result.
3588		 */
3589		ic = iclog;
3590		do {
3591			ic->ic_state = XLOG_STATE_IOERROR;
3592			ic = ic->ic_next;
3593		} while (ic != iclog);
3594		return 0;
3595	}
3596	/*
3597	 * Return non-zero, if state transition has already happened.
3598	 */
3599	return 1;
3600}
3601
3602/*
3603 * This is called from xfs_force_shutdown, when we're forcibly
3604 * shutting down the filesystem, typically because of an IO error.
3605 * Our main objectives here are to make sure that:
3606 *	a. the filesystem gets marked 'SHUTDOWN' for all interested
3607 *	   parties to find out, 'atomically'.
3608 *	b. those who're sleeping on log reservations, pinned objects and
3609 *	    other resources get woken up, and be told the bad news.
3610 *	c. nothing new gets queued up after (a) and (b) are done.
3611 *	d. if !logerror, flush the iclogs to disk, then seal them off
3612 *	   for business.
3613 *
3614 * Note: for delayed logging the !logerror case needs to flush the regions
3615 * held in memory out to the iclogs before flushing them to disk. This needs
3616 * to be done before the log is marked as shutdown, otherwise the flush to the
3617 * iclogs will fail.
3618 */
3619int
3620xfs_log_force_umount(
3621	struct xfs_mount	*mp,
3622	int			logerror)
3623{
3624	xlog_ticket_t	*tic;
3625	xlog_t		*log;
3626	int		retval;
3627
3628	log = mp->m_log;
3629
3630	/*
3631	 * If this happens during log recovery, don't worry about
3632	 * locking; the log isn't open for business yet.
3633	 */
3634	if (!log ||
3635	    log->l_flags & XLOG_ACTIVE_RECOVERY) {
3636		mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3637		if (mp->m_sb_bp)
3638			XFS_BUF_DONE(mp->m_sb_bp);
3639		return 0;
3640	}
3641
3642	/*
3643	 * Somebody could've already done the hard work for us.
3644	 * No need to get locks for this.
3645	 */
3646	if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
3647		ASSERT(XLOG_FORCED_SHUTDOWN(log));
3648		return 1;
3649	}
3650	retval = 0;
3651
3652	/*
3653	 * Flush the in memory commit item list before marking the log as
3654	 * being shut down. We need to do it in this order to ensure all the
3655	 * completed transactions are flushed to disk with the xfs_log_force()
3656	 * call below.
3657	 */
3658	if (!logerror && (mp->m_flags & XFS_MOUNT_DELAYLOG))
3659		xlog_cil_force(log);
3660
3661	/*
3662	 * mark the filesystem and the as in a shutdown state and wake
3663	 * everybody up to tell them the bad news.
3664	 */
3665	spin_lock(&log->l_icloglock);
3666	mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3667	if (mp->m_sb_bp)
3668		XFS_BUF_DONE(mp->m_sb_bp);
3669
3670	/*
3671	 * This flag is sort of redundant because of the mount flag, but
3672	 * it's good to maintain the separation between the log and the rest
3673	 * of XFS.
3674	 */
3675	log->l_flags |= XLOG_IO_ERROR;
3676
3677	/*
3678	 * If we hit a log error, we want to mark all the iclogs IOERROR
3679	 * while we're still holding the loglock.
3680	 */
3681	if (logerror)
3682		retval = xlog_state_ioerror(log);
3683	spin_unlock(&log->l_icloglock);
3684
3685	/*
3686	 * We don't want anybody waiting for log reservations after this. That
3687	 * means we have to wake up everybody queued up on reserveq as well as
3688	 * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
3689	 * we don't enqueue anything once the SHUTDOWN flag is set, and this
3690	 * action is protected by the grant locks.
3691	 */
3692	spin_lock(&log->l_grant_reserve_lock);
3693	list_for_each_entry(tic, &log->l_reserveq, t_queue)
3694		wake_up(&tic->t_wait);
3695	spin_unlock(&log->l_grant_reserve_lock);
3696
3697	spin_lock(&log->l_grant_write_lock);
3698	list_for_each_entry(tic, &log->l_writeq, t_queue)
3699		wake_up(&tic->t_wait);
3700	spin_unlock(&log->l_grant_write_lock);
3701
3702	if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
3703		ASSERT(!logerror);
3704		/*
3705		 * Force the incore logs to disk before shutting the
3706		 * log down completely.
3707		 */
3708		_xfs_log_force(mp, XFS_LOG_SYNC, NULL);
3709
3710		spin_lock(&log->l_icloglock);
3711		retval = xlog_state_ioerror(log);
3712		spin_unlock(&log->l_icloglock);
3713	}
3714	/*
3715	 * Wake up everybody waiting on xfs_log_force.
3716	 * Callback all log item committed functions as if the
3717	 * log writes were completed.
3718	 */
3719	xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
3720
3721#ifdef XFSERRORDEBUG
3722	{
3723		xlog_in_core_t	*iclog;
3724
3725		spin_lock(&log->l_icloglock);
3726		iclog = log->l_iclog;
3727		do {
3728			ASSERT(iclog->ic_callback == 0);
3729			iclog = iclog->ic_next;
3730		} while (iclog != log->l_iclog);
3731		spin_unlock(&log->l_icloglock);
3732	}
3733#endif
3734	/* return non-zero if log IOERROR transition had already happened */
3735	return retval;
3736}
3737
3738STATIC int
3739xlog_iclogs_empty(xlog_t *log)
3740{
3741	xlog_in_core_t	*iclog;
3742
3743	iclog = log->l_iclog;
3744	do {
3745		/* endianness does not matter here, zero is zero in
3746		 * any language.
3747		 */
3748		if (iclog->ic_header.h_num_logops)
3749			return 0;
3750		iclog = iclog->ic_next;
3751	} while (iclog != log->l_iclog);
3752	return 1;
3753}