Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_errortag.h"
14#include "xfs_error.h"
15#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
17#include "xfs_log.h"
18#include "xfs_log_priv.h"
19#include "xfs_trace.h"
20#include "xfs_sysfs.h"
21#include "xfs_sb.h"
22#include "xfs_health.h"
23
24struct kmem_cache *xfs_log_ticket_cache;
25
26/* Local miscellaneous function prototypes */
27STATIC struct xlog *
28xlog_alloc_log(
29 struct xfs_mount *mp,
30 struct xfs_buftarg *log_target,
31 xfs_daddr_t blk_offset,
32 int num_bblks);
33STATIC int
34xlog_space_left(
35 struct xlog *log,
36 atomic64_t *head);
37STATIC void
38xlog_dealloc_log(
39 struct xlog *log);
40
41/* local state machine functions */
42STATIC void xlog_state_done_syncing(
43 struct xlog_in_core *iclog);
44STATIC void xlog_state_do_callback(
45 struct xlog *log);
46STATIC int
47xlog_state_get_iclog_space(
48 struct xlog *log,
49 int len,
50 struct xlog_in_core **iclog,
51 struct xlog_ticket *ticket,
52 int *logoffsetp);
53STATIC void
54xlog_grant_push_ail(
55 struct xlog *log,
56 int need_bytes);
57STATIC void
58xlog_sync(
59 struct xlog *log,
60 struct xlog_in_core *iclog,
61 struct xlog_ticket *ticket);
62#if defined(DEBUG)
63STATIC void
64xlog_verify_grant_tail(
65 struct xlog *log);
66STATIC void
67xlog_verify_iclog(
68 struct xlog *log,
69 struct xlog_in_core *iclog,
70 int count);
71STATIC void
72xlog_verify_tail_lsn(
73 struct xlog *log,
74 struct xlog_in_core *iclog);
75#else
76#define xlog_verify_grant_tail(a)
77#define xlog_verify_iclog(a,b,c)
78#define xlog_verify_tail_lsn(a,b)
79#endif
80
81STATIC int
82xlog_iclogs_empty(
83 struct xlog *log);
84
85static int
86xfs_log_cover(struct xfs_mount *);
87
88/*
89 * We need to make sure the buffer pointer returned is naturally aligned for the
90 * biggest basic data type we put into it. We have already accounted for this
91 * padding when sizing the buffer.
92 *
93 * However, this padding does not get written into the log, and hence we have to
94 * track the space used by the log vectors separately to prevent log space hangs
95 * due to inaccurate accounting (i.e. a leak) of the used log space through the
96 * CIL context ticket.
97 *
98 * We also add space for the xlog_op_header that describes this region in the
99 * log. This prepends the data region we return to the caller to copy their data
100 * into, so do all the static initialisation of the ophdr now. Because the ophdr
101 * is not 8 byte aligned, we have to be careful to ensure that we align the
102 * start of the buffer such that the region we return to the call is 8 byte
103 * aligned and packed against the tail of the ophdr.
104 */
105void *
106xlog_prepare_iovec(
107 struct xfs_log_vec *lv,
108 struct xfs_log_iovec **vecp,
109 uint type)
110{
111 struct xfs_log_iovec *vec = *vecp;
112 struct xlog_op_header *oph;
113 uint32_t len;
114 void *buf;
115
116 if (vec) {
117 ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
118 vec++;
119 } else {
120 vec = &lv->lv_iovecp[0];
121 }
122
123 len = lv->lv_buf_len + sizeof(struct xlog_op_header);
124 if (!IS_ALIGNED(len, sizeof(uint64_t))) {
125 lv->lv_buf_len = round_up(len, sizeof(uint64_t)) -
126 sizeof(struct xlog_op_header);
127 }
128
129 vec->i_type = type;
130 vec->i_addr = lv->lv_buf + lv->lv_buf_len;
131
132 oph = vec->i_addr;
133 oph->oh_clientid = XFS_TRANSACTION;
134 oph->oh_res2 = 0;
135 oph->oh_flags = 0;
136
137 buf = vec->i_addr + sizeof(struct xlog_op_header);
138 ASSERT(IS_ALIGNED((unsigned long)buf, sizeof(uint64_t)));
139
140 *vecp = vec;
141 return buf;
142}
143
144static void
145xlog_grant_sub_space(
146 struct xlog *log,
147 atomic64_t *head,
148 int bytes)
149{
150 int64_t head_val = atomic64_read(head);
151 int64_t new, old;
152
153 do {
154 int cycle, space;
155
156 xlog_crack_grant_head_val(head_val, &cycle, &space);
157
158 space -= bytes;
159 if (space < 0) {
160 space += log->l_logsize;
161 cycle--;
162 }
163
164 old = head_val;
165 new = xlog_assign_grant_head_val(cycle, space);
166 head_val = atomic64_cmpxchg(head, old, new);
167 } while (head_val != old);
168}
169
170static void
171xlog_grant_add_space(
172 struct xlog *log,
173 atomic64_t *head,
174 int bytes)
175{
176 int64_t head_val = atomic64_read(head);
177 int64_t new, old;
178
179 do {
180 int tmp;
181 int cycle, space;
182
183 xlog_crack_grant_head_val(head_val, &cycle, &space);
184
185 tmp = log->l_logsize - space;
186 if (tmp > bytes)
187 space += bytes;
188 else {
189 space = bytes - tmp;
190 cycle++;
191 }
192
193 old = head_val;
194 new = xlog_assign_grant_head_val(cycle, space);
195 head_val = atomic64_cmpxchg(head, old, new);
196 } while (head_val != old);
197}
198
199STATIC void
200xlog_grant_head_init(
201 struct xlog_grant_head *head)
202{
203 xlog_assign_grant_head(&head->grant, 1, 0);
204 INIT_LIST_HEAD(&head->waiters);
205 spin_lock_init(&head->lock);
206}
207
208STATIC void
209xlog_grant_head_wake_all(
210 struct xlog_grant_head *head)
211{
212 struct xlog_ticket *tic;
213
214 spin_lock(&head->lock);
215 list_for_each_entry(tic, &head->waiters, t_queue)
216 wake_up_process(tic->t_task);
217 spin_unlock(&head->lock);
218}
219
220static inline int
221xlog_ticket_reservation(
222 struct xlog *log,
223 struct xlog_grant_head *head,
224 struct xlog_ticket *tic)
225{
226 if (head == &log->l_write_head) {
227 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
228 return tic->t_unit_res;
229 }
230
231 if (tic->t_flags & XLOG_TIC_PERM_RESERV)
232 return tic->t_unit_res * tic->t_cnt;
233
234 return tic->t_unit_res;
235}
236
237STATIC bool
238xlog_grant_head_wake(
239 struct xlog *log,
240 struct xlog_grant_head *head,
241 int *free_bytes)
242{
243 struct xlog_ticket *tic;
244 int need_bytes;
245 bool woken_task = false;
246
247 list_for_each_entry(tic, &head->waiters, t_queue) {
248
249 /*
250 * There is a chance that the size of the CIL checkpoints in
251 * progress at the last AIL push target calculation resulted in
252 * limiting the target to the log head (l_last_sync_lsn) at the
253 * time. This may not reflect where the log head is now as the
254 * CIL checkpoints may have completed.
255 *
256 * Hence when we are woken here, it may be that the head of the
257 * log that has moved rather than the tail. As the tail didn't
258 * move, there still won't be space available for the
259 * reservation we require. However, if the AIL has already
260 * pushed to the target defined by the old log head location, we
261 * will hang here waiting for something else to update the AIL
262 * push target.
263 *
264 * Therefore, if there isn't space to wake the first waiter on
265 * the grant head, we need to push the AIL again to ensure the
266 * target reflects both the current log tail and log head
267 * position before we wait for the tail to move again.
268 */
269
270 need_bytes = xlog_ticket_reservation(log, head, tic);
271 if (*free_bytes < need_bytes) {
272 if (!woken_task)
273 xlog_grant_push_ail(log, need_bytes);
274 return false;
275 }
276
277 *free_bytes -= need_bytes;
278 trace_xfs_log_grant_wake_up(log, tic);
279 wake_up_process(tic->t_task);
280 woken_task = true;
281 }
282
283 return true;
284}
285
286STATIC int
287xlog_grant_head_wait(
288 struct xlog *log,
289 struct xlog_grant_head *head,
290 struct xlog_ticket *tic,
291 int need_bytes) __releases(&head->lock)
292 __acquires(&head->lock)
293{
294 list_add_tail(&tic->t_queue, &head->waiters);
295
296 do {
297 if (xlog_is_shutdown(log))
298 goto shutdown;
299 xlog_grant_push_ail(log, need_bytes);
300
301 __set_current_state(TASK_UNINTERRUPTIBLE);
302 spin_unlock(&head->lock);
303
304 XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
305
306 trace_xfs_log_grant_sleep(log, tic);
307 schedule();
308 trace_xfs_log_grant_wake(log, tic);
309
310 spin_lock(&head->lock);
311 if (xlog_is_shutdown(log))
312 goto shutdown;
313 } while (xlog_space_left(log, &head->grant) < need_bytes);
314
315 list_del_init(&tic->t_queue);
316 return 0;
317shutdown:
318 list_del_init(&tic->t_queue);
319 return -EIO;
320}
321
322/*
323 * Atomically get the log space required for a log ticket.
324 *
325 * Once a ticket gets put onto head->waiters, it will only return after the
326 * needed reservation is satisfied.
327 *
328 * This function is structured so that it has a lock free fast path. This is
329 * necessary because every new transaction reservation will come through this
330 * path. Hence any lock will be globally hot if we take it unconditionally on
331 * every pass.
332 *
333 * As tickets are only ever moved on and off head->waiters under head->lock, we
334 * only need to take that lock if we are going to add the ticket to the queue
335 * and sleep. We can avoid taking the lock if the ticket was never added to
336 * head->waiters because the t_queue list head will be empty and we hold the
337 * only reference to it so it can safely be checked unlocked.
338 */
339STATIC int
340xlog_grant_head_check(
341 struct xlog *log,
342 struct xlog_grant_head *head,
343 struct xlog_ticket *tic,
344 int *need_bytes)
345{
346 int free_bytes;
347 int error = 0;
348
349 ASSERT(!xlog_in_recovery(log));
350
351 /*
352 * If there are other waiters on the queue then give them a chance at
353 * logspace before us. Wake up the first waiters, if we do not wake
354 * up all the waiters then go to sleep waiting for more free space,
355 * otherwise try to get some space for this transaction.
356 */
357 *need_bytes = xlog_ticket_reservation(log, head, tic);
358 free_bytes = xlog_space_left(log, &head->grant);
359 if (!list_empty_careful(&head->waiters)) {
360 spin_lock(&head->lock);
361 if (!xlog_grant_head_wake(log, head, &free_bytes) ||
362 free_bytes < *need_bytes) {
363 error = xlog_grant_head_wait(log, head, tic,
364 *need_bytes);
365 }
366 spin_unlock(&head->lock);
367 } else if (free_bytes < *need_bytes) {
368 spin_lock(&head->lock);
369 error = xlog_grant_head_wait(log, head, tic, *need_bytes);
370 spin_unlock(&head->lock);
371 }
372
373 return error;
374}
375
376bool
377xfs_log_writable(
378 struct xfs_mount *mp)
379{
380 /*
381 * Do not write to the log on norecovery mounts, if the data or log
382 * devices are read-only, or if the filesystem is shutdown. Read-only
383 * mounts allow internal writes for log recovery and unmount purposes,
384 * so don't restrict that case.
385 */
386 if (xfs_has_norecovery(mp))
387 return false;
388 if (xfs_readonly_buftarg(mp->m_ddev_targp))
389 return false;
390 if (xfs_readonly_buftarg(mp->m_log->l_targ))
391 return false;
392 if (xlog_is_shutdown(mp->m_log))
393 return false;
394 return true;
395}
396
397/*
398 * Replenish the byte reservation required by moving the grant write head.
399 */
400int
401xfs_log_regrant(
402 struct xfs_mount *mp,
403 struct xlog_ticket *tic)
404{
405 struct xlog *log = mp->m_log;
406 int need_bytes;
407 int error = 0;
408
409 if (xlog_is_shutdown(log))
410 return -EIO;
411
412 XFS_STATS_INC(mp, xs_try_logspace);
413
414 /*
415 * This is a new transaction on the ticket, so we need to change the
416 * transaction ID so that the next transaction has a different TID in
417 * the log. Just add one to the existing tid so that we can see chains
418 * of rolling transactions in the log easily.
419 */
420 tic->t_tid++;
421
422 xlog_grant_push_ail(log, tic->t_unit_res);
423
424 tic->t_curr_res = tic->t_unit_res;
425 if (tic->t_cnt > 0)
426 return 0;
427
428 trace_xfs_log_regrant(log, tic);
429
430 error = xlog_grant_head_check(log, &log->l_write_head, tic,
431 &need_bytes);
432 if (error)
433 goto out_error;
434
435 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
436 trace_xfs_log_regrant_exit(log, tic);
437 xlog_verify_grant_tail(log);
438 return 0;
439
440out_error:
441 /*
442 * If we are failing, make sure the ticket doesn't have any current
443 * reservations. We don't want to add this back when the ticket/
444 * transaction gets cancelled.
445 */
446 tic->t_curr_res = 0;
447 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
448 return error;
449}
450
451/*
452 * Reserve log space and return a ticket corresponding to the reservation.
453 *
454 * Each reservation is going to reserve extra space for a log record header.
455 * When writes happen to the on-disk log, we don't subtract the length of the
456 * log record header from any reservation. By wasting space in each
457 * reservation, we prevent over allocation problems.
458 */
459int
460xfs_log_reserve(
461 struct xfs_mount *mp,
462 int unit_bytes,
463 int cnt,
464 struct xlog_ticket **ticp,
465 bool permanent)
466{
467 struct xlog *log = mp->m_log;
468 struct xlog_ticket *tic;
469 int need_bytes;
470 int error = 0;
471
472 if (xlog_is_shutdown(log))
473 return -EIO;
474
475 XFS_STATS_INC(mp, xs_try_logspace);
476
477 ASSERT(*ticp == NULL);
478 tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
479 *ticp = tic;
480
481 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
482 : tic->t_unit_res);
483
484 trace_xfs_log_reserve(log, tic);
485
486 error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
487 &need_bytes);
488 if (error)
489 goto out_error;
490
491 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
492 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
493 trace_xfs_log_reserve_exit(log, tic);
494 xlog_verify_grant_tail(log);
495 return 0;
496
497out_error:
498 /*
499 * If we are failing, make sure the ticket doesn't have any current
500 * reservations. We don't want to add this back when the ticket/
501 * transaction gets cancelled.
502 */
503 tic->t_curr_res = 0;
504 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
505 return error;
506}
507
508/*
509 * Run all the pending iclog callbacks and wake log force waiters and iclog
510 * space waiters so they can process the newly set shutdown state. We really
511 * don't care what order we process callbacks here because the log is shut down
512 * and so state cannot change on disk anymore. However, we cannot wake waiters
513 * until the callbacks have been processed because we may be in unmount and
514 * we must ensure that all AIL operations the callbacks perform have completed
515 * before we tear down the AIL.
516 *
517 * We avoid processing actively referenced iclogs so that we don't run callbacks
518 * while the iclog owner might still be preparing the iclog for IO submssion.
519 * These will be caught by xlog_state_iclog_release() and call this function
520 * again to process any callbacks that may have been added to that iclog.
521 */
522static void
523xlog_state_shutdown_callbacks(
524 struct xlog *log)
525{
526 struct xlog_in_core *iclog;
527 LIST_HEAD(cb_list);
528
529 iclog = log->l_iclog;
530 do {
531 if (atomic_read(&iclog->ic_refcnt)) {
532 /* Reference holder will re-run iclog callbacks. */
533 continue;
534 }
535 list_splice_init(&iclog->ic_callbacks, &cb_list);
536 spin_unlock(&log->l_icloglock);
537
538 xlog_cil_process_committed(&cb_list);
539
540 spin_lock(&log->l_icloglock);
541 wake_up_all(&iclog->ic_write_wait);
542 wake_up_all(&iclog->ic_force_wait);
543 } while ((iclog = iclog->ic_next) != log->l_iclog);
544
545 wake_up_all(&log->l_flush_wait);
546}
547
548/*
549 * Flush iclog to disk if this is the last reference to the given iclog and the
550 * it is in the WANT_SYNC state.
551 *
552 * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
553 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
554 * written to stable storage, and implies that a commit record is contained
555 * within the iclog. We need to ensure that the log tail does not move beyond
556 * the tail that the first commit record in the iclog ordered against, otherwise
557 * correct recovery of that checkpoint becomes dependent on future operations
558 * performed on this iclog.
559 *
560 * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
561 * current tail into iclog. Once the iclog tail is set, future operations must
562 * not modify it, otherwise they potentially violate ordering constraints for
563 * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
564 * the iclog will get zeroed on activation of the iclog after sync, so we
565 * always capture the tail lsn on the iclog on the first NEED_FUA release
566 * regardless of the number of active reference counts on this iclog.
567 */
568int
569xlog_state_release_iclog(
570 struct xlog *log,
571 struct xlog_in_core *iclog,
572 struct xlog_ticket *ticket)
573{
574 xfs_lsn_t tail_lsn;
575 bool last_ref;
576
577 lockdep_assert_held(&log->l_icloglock);
578
579 trace_xlog_iclog_release(iclog, _RET_IP_);
580 /*
581 * Grabbing the current log tail needs to be atomic w.r.t. the writing
582 * of the tail LSN into the iclog so we guarantee that the log tail does
583 * not move between the first time we know that the iclog needs to be
584 * made stable and when we eventually submit it.
585 */
586 if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
587 (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
588 !iclog->ic_header.h_tail_lsn) {
589 tail_lsn = xlog_assign_tail_lsn(log->l_mp);
590 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
591 }
592
593 last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
594
595 if (xlog_is_shutdown(log)) {
596 /*
597 * If there are no more references to this iclog, process the
598 * pending iclog callbacks that were waiting on the release of
599 * this iclog.
600 */
601 if (last_ref)
602 xlog_state_shutdown_callbacks(log);
603 return -EIO;
604 }
605
606 if (!last_ref)
607 return 0;
608
609 if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
610 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
611 return 0;
612 }
613
614 iclog->ic_state = XLOG_STATE_SYNCING;
615 xlog_verify_tail_lsn(log, iclog);
616 trace_xlog_iclog_syncing(iclog, _RET_IP_);
617
618 spin_unlock(&log->l_icloglock);
619 xlog_sync(log, iclog, ticket);
620 spin_lock(&log->l_icloglock);
621 return 0;
622}
623
624/*
625 * Mount a log filesystem
626 *
627 * mp - ubiquitous xfs mount point structure
628 * log_target - buftarg of on-disk log device
629 * blk_offset - Start block # where block size is 512 bytes (BBSIZE)
630 * num_bblocks - Number of BBSIZE blocks in on-disk log
631 *
632 * Return error or zero.
633 */
634int
635xfs_log_mount(
636 xfs_mount_t *mp,
637 xfs_buftarg_t *log_target,
638 xfs_daddr_t blk_offset,
639 int num_bblks)
640{
641 struct xlog *log;
642 bool fatal = xfs_has_crc(mp);
643 int error = 0;
644 int min_logfsbs;
645
646 if (!xfs_has_norecovery(mp)) {
647 xfs_notice(mp, "Mounting V%d Filesystem %pU",
648 XFS_SB_VERSION_NUM(&mp->m_sb),
649 &mp->m_sb.sb_uuid);
650 } else {
651 xfs_notice(mp,
652"Mounting V%d filesystem %pU in no-recovery mode. Filesystem will be inconsistent.",
653 XFS_SB_VERSION_NUM(&mp->m_sb),
654 &mp->m_sb.sb_uuid);
655 ASSERT(xfs_is_readonly(mp));
656 }
657
658 log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
659 if (IS_ERR(log)) {
660 error = PTR_ERR(log);
661 goto out;
662 }
663 mp->m_log = log;
664
665 /*
666 * Validate the given log space and drop a critical message via syslog
667 * if the log size is too small that would lead to some unexpected
668 * situations in transaction log space reservation stage.
669 *
670 * Note: we can't just reject the mount if the validation fails. This
671 * would mean that people would have to downgrade their kernel just to
672 * remedy the situation as there is no way to grow the log (short of
673 * black magic surgery with xfs_db).
674 *
675 * We can, however, reject mounts for CRC format filesystems, as the
676 * mkfs binary being used to make the filesystem should never create a
677 * filesystem with a log that is too small.
678 */
679 min_logfsbs = xfs_log_calc_minimum_size(mp);
680
681 if (mp->m_sb.sb_logblocks < min_logfsbs) {
682 xfs_warn(mp,
683 "Log size %d blocks too small, minimum size is %d blocks",
684 mp->m_sb.sb_logblocks, min_logfsbs);
685 error = -EINVAL;
686 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
687 xfs_warn(mp,
688 "Log size %d blocks too large, maximum size is %lld blocks",
689 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
690 error = -EINVAL;
691 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
692 xfs_warn(mp,
693 "log size %lld bytes too large, maximum size is %lld bytes",
694 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
695 XFS_MAX_LOG_BYTES);
696 error = -EINVAL;
697 } else if (mp->m_sb.sb_logsunit > 1 &&
698 mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) {
699 xfs_warn(mp,
700 "log stripe unit %u bytes must be a multiple of block size",
701 mp->m_sb.sb_logsunit);
702 error = -EINVAL;
703 fatal = true;
704 }
705 if (error) {
706 /*
707 * Log check errors are always fatal on v5; or whenever bad
708 * metadata leads to a crash.
709 */
710 if (fatal) {
711 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
712 ASSERT(0);
713 goto out_free_log;
714 }
715 xfs_crit(mp, "Log size out of supported range.");
716 xfs_crit(mp,
717"Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
718 }
719
720 /*
721 * Initialize the AIL now we have a log.
722 */
723 error = xfs_trans_ail_init(mp);
724 if (error) {
725 xfs_warn(mp, "AIL initialisation failed: error %d", error);
726 goto out_free_log;
727 }
728 log->l_ailp = mp->m_ail;
729
730 /*
731 * skip log recovery on a norecovery mount. pretend it all
732 * just worked.
733 */
734 if (!xfs_has_norecovery(mp)) {
735 /*
736 * log recovery ignores readonly state and so we need to clear
737 * mount-based read only state so it can write to disk.
738 */
739 bool readonly = test_and_clear_bit(XFS_OPSTATE_READONLY,
740 &mp->m_opstate);
741 error = xlog_recover(log);
742 if (readonly)
743 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
744 if (error) {
745 xfs_warn(mp, "log mount/recovery failed: error %d",
746 error);
747 xlog_recover_cancel(log);
748 goto out_destroy_ail;
749 }
750 }
751
752 error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
753 "log");
754 if (error)
755 goto out_destroy_ail;
756
757 /* Normal transactions can now occur */
758 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
759
760 /*
761 * Now the log has been fully initialised and we know were our
762 * space grant counters are, we can initialise the permanent ticket
763 * needed for delayed logging to work.
764 */
765 xlog_cil_init_post_recovery(log);
766
767 return 0;
768
769out_destroy_ail:
770 xfs_trans_ail_destroy(mp);
771out_free_log:
772 xlog_dealloc_log(log);
773out:
774 return error;
775}
776
777/*
778 * Finish the recovery of the file system. This is separate from the
779 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
780 * in the root and real-time bitmap inodes between calling xfs_log_mount() and
781 * here.
782 *
783 * If we finish recovery successfully, start the background log work. If we are
784 * not doing recovery, then we have a RO filesystem and we don't need to start
785 * it.
786 */
787int
788xfs_log_mount_finish(
789 struct xfs_mount *mp)
790{
791 struct xlog *log = mp->m_log;
792 bool readonly;
793 int error = 0;
794
795 if (xfs_has_norecovery(mp)) {
796 ASSERT(xfs_is_readonly(mp));
797 return 0;
798 }
799
800 /*
801 * log recovery ignores readonly state and so we need to clear
802 * mount-based read only state so it can write to disk.
803 */
804 readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
805
806 /*
807 * During the second phase of log recovery, we need iget and
808 * iput to behave like they do for an active filesystem.
809 * xfs_fs_drop_inode needs to be able to prevent the deletion
810 * of inodes before we're done replaying log items on those
811 * inodes. Turn it off immediately after recovery finishes
812 * so that we don't leak the quota inodes if subsequent mount
813 * activities fail.
814 *
815 * We let all inodes involved in redo item processing end up on
816 * the LRU instead of being evicted immediately so that if we do
817 * something to an unlinked inode, the irele won't cause
818 * premature truncation and freeing of the inode, which results
819 * in log recovery failure. We have to evict the unreferenced
820 * lru inodes after clearing SB_ACTIVE because we don't
821 * otherwise clean up the lru if there's a subsequent failure in
822 * xfs_mountfs, which leads to us leaking the inodes if nothing
823 * else (e.g. quotacheck) references the inodes before the
824 * mount failure occurs.
825 */
826 mp->m_super->s_flags |= SB_ACTIVE;
827 xfs_log_work_queue(mp);
828 if (xlog_recovery_needed(log))
829 error = xlog_recover_finish(log);
830 mp->m_super->s_flags &= ~SB_ACTIVE;
831 evict_inodes(mp->m_super);
832
833 /*
834 * Drain the buffer LRU after log recovery. This is required for v4
835 * filesystems to avoid leaving around buffers with NULL verifier ops,
836 * but we do it unconditionally to make sure we're always in a clean
837 * cache state after mount.
838 *
839 * Don't push in the error case because the AIL may have pending intents
840 * that aren't removed until recovery is cancelled.
841 */
842 if (xlog_recovery_needed(log)) {
843 if (!error) {
844 xfs_log_force(mp, XFS_LOG_SYNC);
845 xfs_ail_push_all_sync(mp->m_ail);
846 }
847 xfs_notice(mp, "Ending recovery (logdev: %s)",
848 mp->m_logname ? mp->m_logname : "internal");
849 } else {
850 xfs_info(mp, "Ending clean mount");
851 }
852 xfs_buftarg_drain(mp->m_ddev_targp);
853
854 clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
855 if (readonly)
856 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
857
858 /* Make sure the log is dead if we're returning failure. */
859 ASSERT(!error || xlog_is_shutdown(log));
860
861 return error;
862}
863
864/*
865 * The mount has failed. Cancel the recovery if it hasn't completed and destroy
866 * the log.
867 */
868void
869xfs_log_mount_cancel(
870 struct xfs_mount *mp)
871{
872 xlog_recover_cancel(mp->m_log);
873 xfs_log_unmount(mp);
874}
875
876/*
877 * Flush out the iclog to disk ensuring that device caches are flushed and
878 * the iclog hits stable storage before any completion waiters are woken.
879 */
880static inline int
881xlog_force_iclog(
882 struct xlog_in_core *iclog)
883{
884 atomic_inc(&iclog->ic_refcnt);
885 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
886 if (iclog->ic_state == XLOG_STATE_ACTIVE)
887 xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
888 return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
889}
890
891/*
892 * Cycle all the iclogbuf locks to make sure all log IO completion
893 * is done before we tear down these buffers.
894 */
895static void
896xlog_wait_iclog_completion(struct xlog *log)
897{
898 int i;
899 struct xlog_in_core *iclog = log->l_iclog;
900
901 for (i = 0; i < log->l_iclog_bufs; i++) {
902 down(&iclog->ic_sema);
903 up(&iclog->ic_sema);
904 iclog = iclog->ic_next;
905 }
906}
907
908/*
909 * Wait for the iclog and all prior iclogs to be written disk as required by the
910 * log force state machine. Waiting on ic_force_wait ensures iclog completions
911 * have been ordered and callbacks run before we are woken here, hence
912 * guaranteeing that all the iclogs up to this one are on stable storage.
913 */
914int
915xlog_wait_on_iclog(
916 struct xlog_in_core *iclog)
917 __releases(iclog->ic_log->l_icloglock)
918{
919 struct xlog *log = iclog->ic_log;
920
921 trace_xlog_iclog_wait_on(iclog, _RET_IP_);
922 if (!xlog_is_shutdown(log) &&
923 iclog->ic_state != XLOG_STATE_ACTIVE &&
924 iclog->ic_state != XLOG_STATE_DIRTY) {
925 XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
926 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
927 } else {
928 spin_unlock(&log->l_icloglock);
929 }
930
931 if (xlog_is_shutdown(log))
932 return -EIO;
933 return 0;
934}
935
936/*
937 * Write out an unmount record using the ticket provided. We have to account for
938 * the data space used in the unmount ticket as this write is not done from a
939 * transaction context that has already done the accounting for us.
940 */
941static int
942xlog_write_unmount_record(
943 struct xlog *log,
944 struct xlog_ticket *ticket)
945{
946 struct {
947 struct xlog_op_header ophdr;
948 struct xfs_unmount_log_format ulf;
949 } unmount_rec = {
950 .ophdr = {
951 .oh_clientid = XFS_LOG,
952 .oh_tid = cpu_to_be32(ticket->t_tid),
953 .oh_flags = XLOG_UNMOUNT_TRANS,
954 },
955 .ulf = {
956 .magic = XLOG_UNMOUNT_TYPE,
957 },
958 };
959 struct xfs_log_iovec reg = {
960 .i_addr = &unmount_rec,
961 .i_len = sizeof(unmount_rec),
962 .i_type = XLOG_REG_TYPE_UNMOUNT,
963 };
964 struct xfs_log_vec vec = {
965 .lv_niovecs = 1,
966 .lv_iovecp = ®,
967 };
968 LIST_HEAD(lv_chain);
969 list_add(&vec.lv_list, &lv_chain);
970
971 BUILD_BUG_ON((sizeof(struct xlog_op_header) +
972 sizeof(struct xfs_unmount_log_format)) !=
973 sizeof(unmount_rec));
974
975 /* account for space used by record data */
976 ticket->t_curr_res -= sizeof(unmount_rec);
977
978 return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len);
979}
980
981/*
982 * Mark the filesystem clean by writing an unmount record to the head of the
983 * log.
984 */
985static void
986xlog_unmount_write(
987 struct xlog *log)
988{
989 struct xfs_mount *mp = log->l_mp;
990 struct xlog_in_core *iclog;
991 struct xlog_ticket *tic = NULL;
992 int error;
993
994 error = xfs_log_reserve(mp, 600, 1, &tic, 0);
995 if (error)
996 goto out_err;
997
998 error = xlog_write_unmount_record(log, tic);
999 /*
1000 * At this point, we're umounting anyway, so there's no point in
1001 * transitioning log state to shutdown. Just continue...
1002 */
1003out_err:
1004 if (error)
1005 xfs_alert(mp, "%s: unmount record failed", __func__);
1006
1007 spin_lock(&log->l_icloglock);
1008 iclog = log->l_iclog;
1009 error = xlog_force_iclog(iclog);
1010 xlog_wait_on_iclog(iclog);
1011
1012 if (tic) {
1013 trace_xfs_log_umount_write(log, tic);
1014 xfs_log_ticket_ungrant(log, tic);
1015 }
1016}
1017
1018static void
1019xfs_log_unmount_verify_iclog(
1020 struct xlog *log)
1021{
1022 struct xlog_in_core *iclog = log->l_iclog;
1023
1024 do {
1025 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
1026 ASSERT(iclog->ic_offset == 0);
1027 } while ((iclog = iclog->ic_next) != log->l_iclog);
1028}
1029
1030/*
1031 * Unmount record used to have a string "Unmount filesystem--" in the
1032 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
1033 * We just write the magic number now since that particular field isn't
1034 * currently architecture converted and "Unmount" is a bit foo.
1035 * As far as I know, there weren't any dependencies on the old behaviour.
1036 */
1037static void
1038xfs_log_unmount_write(
1039 struct xfs_mount *mp)
1040{
1041 struct xlog *log = mp->m_log;
1042
1043 if (!xfs_log_writable(mp))
1044 return;
1045
1046 xfs_log_force(mp, XFS_LOG_SYNC);
1047
1048 if (xlog_is_shutdown(log))
1049 return;
1050
1051 /*
1052 * If we think the summary counters are bad, avoid writing the unmount
1053 * record to force log recovery at next mount, after which the summary
1054 * counters will be recalculated. Refer to xlog_check_unmount_rec for
1055 * more details.
1056 */
1057 if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
1058 XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
1059 xfs_alert(mp, "%s: will fix summary counters at next mount",
1060 __func__);
1061 return;
1062 }
1063
1064 xfs_log_unmount_verify_iclog(log);
1065 xlog_unmount_write(log);
1066}
1067
1068/*
1069 * Empty the log for unmount/freeze.
1070 *
1071 * To do this, we first need to shut down the background log work so it is not
1072 * trying to cover the log as we clean up. We then need to unpin all objects in
1073 * the log so we can then flush them out. Once they have completed their IO and
1074 * run the callbacks removing themselves from the AIL, we can cover the log.
1075 */
1076int
1077xfs_log_quiesce(
1078 struct xfs_mount *mp)
1079{
1080 /*
1081 * Clear log incompat features since we're quiescing the log. Report
1082 * failures, though it's not fatal to have a higher log feature
1083 * protection level than the log contents actually require.
1084 */
1085 if (xfs_clear_incompat_log_features(mp)) {
1086 int error;
1087
1088 error = xfs_sync_sb(mp, false);
1089 if (error)
1090 xfs_warn(mp,
1091 "Failed to clear log incompat features on quiesce");
1092 }
1093
1094 cancel_delayed_work_sync(&mp->m_log->l_work);
1095 xfs_log_force(mp, XFS_LOG_SYNC);
1096
1097 /*
1098 * The superblock buffer is uncached and while xfs_ail_push_all_sync()
1099 * will push it, xfs_buftarg_wait() will not wait for it. Further,
1100 * xfs_buf_iowait() cannot be used because it was pushed with the
1101 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
1102 * the IO to complete.
1103 */
1104 xfs_ail_push_all_sync(mp->m_ail);
1105 xfs_buftarg_wait(mp->m_ddev_targp);
1106 xfs_buf_lock(mp->m_sb_bp);
1107 xfs_buf_unlock(mp->m_sb_bp);
1108
1109 return xfs_log_cover(mp);
1110}
1111
1112void
1113xfs_log_clean(
1114 struct xfs_mount *mp)
1115{
1116 xfs_log_quiesce(mp);
1117 xfs_log_unmount_write(mp);
1118}
1119
1120/*
1121 * Shut down and release the AIL and Log.
1122 *
1123 * During unmount, we need to ensure we flush all the dirty metadata objects
1124 * from the AIL so that the log is empty before we write the unmount record to
1125 * the log. Once this is done, we can tear down the AIL and the log.
1126 */
1127void
1128xfs_log_unmount(
1129 struct xfs_mount *mp)
1130{
1131 xfs_log_clean(mp);
1132
1133 /*
1134 * If shutdown has come from iclog IO context, the log
1135 * cleaning will have been skipped and so we need to wait
1136 * for the iclog to complete shutdown processing before we
1137 * tear anything down.
1138 */
1139 xlog_wait_iclog_completion(mp->m_log);
1140
1141 xfs_buftarg_drain(mp->m_ddev_targp);
1142
1143 xfs_trans_ail_destroy(mp);
1144
1145 xfs_sysfs_del(&mp->m_log->l_kobj);
1146
1147 xlog_dealloc_log(mp->m_log);
1148}
1149
1150void
1151xfs_log_item_init(
1152 struct xfs_mount *mp,
1153 struct xfs_log_item *item,
1154 int type,
1155 const struct xfs_item_ops *ops)
1156{
1157 item->li_log = mp->m_log;
1158 item->li_ailp = mp->m_ail;
1159 item->li_type = type;
1160 item->li_ops = ops;
1161 item->li_lv = NULL;
1162
1163 INIT_LIST_HEAD(&item->li_ail);
1164 INIT_LIST_HEAD(&item->li_cil);
1165 INIT_LIST_HEAD(&item->li_bio_list);
1166 INIT_LIST_HEAD(&item->li_trans);
1167}
1168
1169/*
1170 * Wake up processes waiting for log space after we have moved the log tail.
1171 */
1172void
1173xfs_log_space_wake(
1174 struct xfs_mount *mp)
1175{
1176 struct xlog *log = mp->m_log;
1177 int free_bytes;
1178
1179 if (xlog_is_shutdown(log))
1180 return;
1181
1182 if (!list_empty_careful(&log->l_write_head.waiters)) {
1183 ASSERT(!xlog_in_recovery(log));
1184
1185 spin_lock(&log->l_write_head.lock);
1186 free_bytes = xlog_space_left(log, &log->l_write_head.grant);
1187 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1188 spin_unlock(&log->l_write_head.lock);
1189 }
1190
1191 if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1192 ASSERT(!xlog_in_recovery(log));
1193
1194 spin_lock(&log->l_reserve_head.lock);
1195 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1196 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1197 spin_unlock(&log->l_reserve_head.lock);
1198 }
1199}
1200
1201/*
1202 * Determine if we have a transaction that has gone to disk that needs to be
1203 * covered. To begin the transition to the idle state firstly the log needs to
1204 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1205 * we start attempting to cover the log.
1206 *
1207 * Only if we are then in a state where covering is needed, the caller is
1208 * informed that dummy transactions are required to move the log into the idle
1209 * state.
1210 *
1211 * If there are any items in the AIl or CIL, then we do not want to attempt to
1212 * cover the log as we may be in a situation where there isn't log space
1213 * available to run a dummy transaction and this can lead to deadlocks when the
1214 * tail of the log is pinned by an item that is modified in the CIL. Hence
1215 * there's no point in running a dummy transaction at this point because we
1216 * can't start trying to idle the log until both the CIL and AIL are empty.
1217 */
1218static bool
1219xfs_log_need_covered(
1220 struct xfs_mount *mp)
1221{
1222 struct xlog *log = mp->m_log;
1223 bool needed = false;
1224
1225 if (!xlog_cil_empty(log))
1226 return false;
1227
1228 spin_lock(&log->l_icloglock);
1229 switch (log->l_covered_state) {
1230 case XLOG_STATE_COVER_DONE:
1231 case XLOG_STATE_COVER_DONE2:
1232 case XLOG_STATE_COVER_IDLE:
1233 break;
1234 case XLOG_STATE_COVER_NEED:
1235 case XLOG_STATE_COVER_NEED2:
1236 if (xfs_ail_min_lsn(log->l_ailp))
1237 break;
1238 if (!xlog_iclogs_empty(log))
1239 break;
1240
1241 needed = true;
1242 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1243 log->l_covered_state = XLOG_STATE_COVER_DONE;
1244 else
1245 log->l_covered_state = XLOG_STATE_COVER_DONE2;
1246 break;
1247 default:
1248 needed = true;
1249 break;
1250 }
1251 spin_unlock(&log->l_icloglock);
1252 return needed;
1253}
1254
1255/*
1256 * Explicitly cover the log. This is similar to background log covering but
1257 * intended for usage in quiesce codepaths. The caller is responsible to ensure
1258 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1259 * must all be empty.
1260 */
1261static int
1262xfs_log_cover(
1263 struct xfs_mount *mp)
1264{
1265 int error = 0;
1266 bool need_covered;
1267
1268 ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
1269 !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
1270 xlog_is_shutdown(mp->m_log));
1271
1272 if (!xfs_log_writable(mp))
1273 return 0;
1274
1275 /*
1276 * xfs_log_need_covered() is not idempotent because it progresses the
1277 * state machine if the log requires covering. Therefore, we must call
1278 * this function once and use the result until we've issued an sb sync.
1279 * Do so first to make that abundantly clear.
1280 *
1281 * Fall into the covering sequence if the log needs covering or the
1282 * mount has lazy superblock accounting to sync to disk. The sb sync
1283 * used for covering accumulates the in-core counters, so covering
1284 * handles this for us.
1285 */
1286 need_covered = xfs_log_need_covered(mp);
1287 if (!need_covered && !xfs_has_lazysbcount(mp))
1288 return 0;
1289
1290 /*
1291 * To cover the log, commit the superblock twice (at most) in
1292 * independent checkpoints. The first serves as a reference for the
1293 * tail pointer. The sync transaction and AIL push empties the AIL and
1294 * updates the in-core tail to the LSN of the first checkpoint. The
1295 * second commit updates the on-disk tail with the in-core LSN,
1296 * covering the log. Push the AIL one more time to leave it empty, as
1297 * we found it.
1298 */
1299 do {
1300 error = xfs_sync_sb(mp, true);
1301 if (error)
1302 break;
1303 xfs_ail_push_all_sync(mp->m_ail);
1304 } while (xfs_log_need_covered(mp));
1305
1306 return error;
1307}
1308
1309/*
1310 * We may be holding the log iclog lock upon entering this routine.
1311 */
1312xfs_lsn_t
1313xlog_assign_tail_lsn_locked(
1314 struct xfs_mount *mp)
1315{
1316 struct xlog *log = mp->m_log;
1317 struct xfs_log_item *lip;
1318 xfs_lsn_t tail_lsn;
1319
1320 assert_spin_locked(&mp->m_ail->ail_lock);
1321
1322 /*
1323 * To make sure we always have a valid LSN for the log tail we keep
1324 * track of the last LSN which was committed in log->l_last_sync_lsn,
1325 * and use that when the AIL was empty.
1326 */
1327 lip = xfs_ail_min(mp->m_ail);
1328 if (lip)
1329 tail_lsn = lip->li_lsn;
1330 else
1331 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
1332 trace_xfs_log_assign_tail_lsn(log, tail_lsn);
1333 atomic64_set(&log->l_tail_lsn, tail_lsn);
1334 return tail_lsn;
1335}
1336
1337xfs_lsn_t
1338xlog_assign_tail_lsn(
1339 struct xfs_mount *mp)
1340{
1341 xfs_lsn_t tail_lsn;
1342
1343 spin_lock(&mp->m_ail->ail_lock);
1344 tail_lsn = xlog_assign_tail_lsn_locked(mp);
1345 spin_unlock(&mp->m_ail->ail_lock);
1346
1347 return tail_lsn;
1348}
1349
1350/*
1351 * Return the space in the log between the tail and the head. The head
1352 * is passed in the cycle/bytes formal parms. In the special case where
1353 * the reserve head has wrapped passed the tail, this calculation is no
1354 * longer valid. In this case, just return 0 which means there is no space
1355 * in the log. This works for all places where this function is called
1356 * with the reserve head. Of course, if the write head were to ever
1357 * wrap the tail, we should blow up. Rather than catch this case here,
1358 * we depend on other ASSERTions in other parts of the code. XXXmiken
1359 *
1360 * If reservation head is behind the tail, we have a problem. Warn about it,
1361 * but then treat it as if the log is empty.
1362 *
1363 * If the log is shut down, the head and tail may be invalid or out of whack, so
1364 * shortcut invalidity asserts in this case so that we don't trigger them
1365 * falsely.
1366 */
1367STATIC int
1368xlog_space_left(
1369 struct xlog *log,
1370 atomic64_t *head)
1371{
1372 int tail_bytes;
1373 int tail_cycle;
1374 int head_cycle;
1375 int head_bytes;
1376
1377 xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1378 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1379 tail_bytes = BBTOB(tail_bytes);
1380 if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1381 return log->l_logsize - (head_bytes - tail_bytes);
1382 if (tail_cycle + 1 < head_cycle)
1383 return 0;
1384
1385 /* Ignore potential inconsistency when shutdown. */
1386 if (xlog_is_shutdown(log))
1387 return log->l_logsize;
1388
1389 if (tail_cycle < head_cycle) {
1390 ASSERT(tail_cycle == (head_cycle - 1));
1391 return tail_bytes - head_bytes;
1392 }
1393
1394 /*
1395 * The reservation head is behind the tail. In this case we just want to
1396 * return the size of the log as the amount of space left.
1397 */
1398 xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
1399 xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d",
1400 tail_cycle, tail_bytes);
1401 xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d",
1402 head_cycle, head_bytes);
1403 ASSERT(0);
1404 return log->l_logsize;
1405}
1406
1407
1408static void
1409xlog_ioend_work(
1410 struct work_struct *work)
1411{
1412 struct xlog_in_core *iclog =
1413 container_of(work, struct xlog_in_core, ic_end_io_work);
1414 struct xlog *log = iclog->ic_log;
1415 int error;
1416
1417 error = blk_status_to_errno(iclog->ic_bio.bi_status);
1418#ifdef DEBUG
1419 /* treat writes with injected CRC errors as failed */
1420 if (iclog->ic_fail_crc)
1421 error = -EIO;
1422#endif
1423
1424 /*
1425 * Race to shutdown the filesystem if we see an error.
1426 */
1427 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1428 xfs_alert(log->l_mp, "log I/O error %d", error);
1429 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1430 }
1431
1432 xlog_state_done_syncing(iclog);
1433 bio_uninit(&iclog->ic_bio);
1434
1435 /*
1436 * Drop the lock to signal that we are done. Nothing references the
1437 * iclog after this, so an unmount waiting on this lock can now tear it
1438 * down safely. As such, it is unsafe to reference the iclog after the
1439 * unlock as we could race with it being freed.
1440 */
1441 up(&iclog->ic_sema);
1442}
1443
1444/*
1445 * Return size of each in-core log record buffer.
1446 *
1447 * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1448 *
1449 * If the filesystem blocksize is too large, we may need to choose a
1450 * larger size since the directory code currently logs entire blocks.
1451 */
1452STATIC void
1453xlog_get_iclog_buffer_size(
1454 struct xfs_mount *mp,
1455 struct xlog *log)
1456{
1457 if (mp->m_logbufs <= 0)
1458 mp->m_logbufs = XLOG_MAX_ICLOGS;
1459 if (mp->m_logbsize <= 0)
1460 mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
1461
1462 log->l_iclog_bufs = mp->m_logbufs;
1463 log->l_iclog_size = mp->m_logbsize;
1464
1465 /*
1466 * # headers = size / 32k - one header holds cycles from 32k of data.
1467 */
1468 log->l_iclog_heads =
1469 DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE);
1470 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
1471}
1472
1473void
1474xfs_log_work_queue(
1475 struct xfs_mount *mp)
1476{
1477 queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
1478 msecs_to_jiffies(xfs_syncd_centisecs * 10));
1479}
1480
1481/*
1482 * Clear the log incompat flags if we have the opportunity.
1483 *
1484 * This only happens if we're about to log the second dummy transaction as part
1485 * of covering the log and we can get the log incompat feature usage lock.
1486 */
1487static inline void
1488xlog_clear_incompat(
1489 struct xlog *log)
1490{
1491 struct xfs_mount *mp = log->l_mp;
1492
1493 if (!xfs_sb_has_incompat_log_feature(&mp->m_sb,
1494 XFS_SB_FEAT_INCOMPAT_LOG_ALL))
1495 return;
1496
1497 if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
1498 return;
1499
1500 if (!down_write_trylock(&log->l_incompat_users))
1501 return;
1502
1503 xfs_clear_incompat_log_features(mp);
1504 up_write(&log->l_incompat_users);
1505}
1506
1507/*
1508 * Every sync period we need to unpin all items in the AIL and push them to
1509 * disk. If there is nothing dirty, then we might need to cover the log to
1510 * indicate that the filesystem is idle.
1511 */
1512static void
1513xfs_log_worker(
1514 struct work_struct *work)
1515{
1516 struct xlog *log = container_of(to_delayed_work(work),
1517 struct xlog, l_work);
1518 struct xfs_mount *mp = log->l_mp;
1519
1520 /* dgc: errors ignored - not fatal and nowhere to report them */
1521 if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
1522 /*
1523 * Dump a transaction into the log that contains no real change.
1524 * This is needed to stamp the current tail LSN into the log
1525 * during the covering operation.
1526 *
1527 * We cannot use an inode here for this - that will push dirty
1528 * state back up into the VFS and then periodic inode flushing
1529 * will prevent log covering from making progress. Hence we
1530 * synchronously log the superblock instead to ensure the
1531 * superblock is immediately unpinned and can be written back.
1532 */
1533 xlog_clear_incompat(log);
1534 xfs_sync_sb(mp, true);
1535 } else
1536 xfs_log_force(mp, 0);
1537
1538 /* start pushing all the metadata that is currently dirty */
1539 xfs_ail_push_all(mp->m_ail);
1540
1541 /* queue us up again */
1542 xfs_log_work_queue(mp);
1543}
1544
1545/*
1546 * This routine initializes some of the log structure for a given mount point.
1547 * Its primary purpose is to fill in enough, so recovery can occur. However,
1548 * some other stuff may be filled in too.
1549 */
1550STATIC struct xlog *
1551xlog_alloc_log(
1552 struct xfs_mount *mp,
1553 struct xfs_buftarg *log_target,
1554 xfs_daddr_t blk_offset,
1555 int num_bblks)
1556{
1557 struct xlog *log;
1558 xlog_rec_header_t *head;
1559 xlog_in_core_t **iclogp;
1560 xlog_in_core_t *iclog, *prev_iclog=NULL;
1561 int i;
1562 int error = -ENOMEM;
1563 uint log2_size = 0;
1564
1565 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1566 if (!log) {
1567 xfs_warn(mp, "Log allocation failed: No memory!");
1568 goto out;
1569 }
1570
1571 log->l_mp = mp;
1572 log->l_targ = log_target;
1573 log->l_logsize = BBTOB(num_bblks);
1574 log->l_logBBstart = blk_offset;
1575 log->l_logBBsize = num_bblks;
1576 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1577 set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
1578 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1579
1580 log->l_prev_block = -1;
1581 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1582 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1583 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1584 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1585
1586 if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
1587 log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
1588 else
1589 log->l_iclog_roundoff = BBSIZE;
1590
1591 xlog_grant_head_init(&log->l_reserve_head);
1592 xlog_grant_head_init(&log->l_write_head);
1593
1594 error = -EFSCORRUPTED;
1595 if (xfs_has_sector(mp)) {
1596 log2_size = mp->m_sb.sb_logsectlog;
1597 if (log2_size < BBSHIFT) {
1598 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1599 log2_size, BBSHIFT);
1600 goto out_free_log;
1601 }
1602
1603 log2_size -= BBSHIFT;
1604 if (log2_size > mp->m_sectbb_log) {
1605 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1606 log2_size, mp->m_sectbb_log);
1607 goto out_free_log;
1608 }
1609
1610 /* for larger sector sizes, must have v2 or external log */
1611 if (log2_size && log->l_logBBstart > 0 &&
1612 !xfs_has_logv2(mp)) {
1613 xfs_warn(mp,
1614 "log sector size (0x%x) invalid for configuration.",
1615 log2_size);
1616 goto out_free_log;
1617 }
1618 }
1619 log->l_sectBBsize = 1 << log2_size;
1620
1621 init_rwsem(&log->l_incompat_users);
1622
1623 xlog_get_iclog_buffer_size(mp, log);
1624
1625 spin_lock_init(&log->l_icloglock);
1626 init_waitqueue_head(&log->l_flush_wait);
1627
1628 iclogp = &log->l_iclog;
1629 /*
1630 * The amount of memory to allocate for the iclog structure is
1631 * rather funky due to the way the structure is defined. It is
1632 * done this way so that we can use different sizes for machines
1633 * with different amounts of memory. See the definition of
1634 * xlog_in_core_t in xfs_log_priv.h for details.
1635 */
1636 ASSERT(log->l_iclog_size >= 4096);
1637 for (i = 0; i < log->l_iclog_bufs; i++) {
1638 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1639 sizeof(struct bio_vec);
1640
1641 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
1642 if (!iclog)
1643 goto out_free_iclog;
1644
1645 *iclogp = iclog;
1646 iclog->ic_prev = prev_iclog;
1647 prev_iclog = iclog;
1648
1649 iclog->ic_data = kvzalloc(log->l_iclog_size,
1650 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1651 if (!iclog->ic_data)
1652 goto out_free_iclog;
1653 head = &iclog->ic_header;
1654 memset(head, 0, sizeof(xlog_rec_header_t));
1655 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1656 head->h_version = cpu_to_be32(
1657 xfs_has_logv2(log->l_mp) ? 2 : 1);
1658 head->h_size = cpu_to_be32(log->l_iclog_size);
1659 /* new fields */
1660 head->h_fmt = cpu_to_be32(XLOG_FMT);
1661 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1662
1663 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1664 iclog->ic_state = XLOG_STATE_ACTIVE;
1665 iclog->ic_log = log;
1666 atomic_set(&iclog->ic_refcnt, 0);
1667 INIT_LIST_HEAD(&iclog->ic_callbacks);
1668 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
1669
1670 init_waitqueue_head(&iclog->ic_force_wait);
1671 init_waitqueue_head(&iclog->ic_write_wait);
1672 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1673 sema_init(&iclog->ic_sema, 1);
1674
1675 iclogp = &iclog->ic_next;
1676 }
1677 *iclogp = log->l_iclog; /* complete ring */
1678 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
1679
1680 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1681 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM |
1682 WQ_HIGHPRI),
1683 0, mp->m_super->s_id);
1684 if (!log->l_ioend_workqueue)
1685 goto out_free_iclog;
1686
1687 error = xlog_cil_init(log);
1688 if (error)
1689 goto out_destroy_workqueue;
1690 return log;
1691
1692out_destroy_workqueue:
1693 destroy_workqueue(log->l_ioend_workqueue);
1694out_free_iclog:
1695 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1696 prev_iclog = iclog->ic_next;
1697 kmem_free(iclog->ic_data);
1698 kmem_free(iclog);
1699 if (prev_iclog == log->l_iclog)
1700 break;
1701 }
1702out_free_log:
1703 kmem_free(log);
1704out:
1705 return ERR_PTR(error);
1706} /* xlog_alloc_log */
1707
1708/*
1709 * Compute the LSN that we'd need to push the log tail towards in order to have
1710 * (a) enough on-disk log space to log the number of bytes specified, (b) at
1711 * least 25% of the log space free, and (c) at least 256 blocks free. If the
1712 * log free space already meets all three thresholds, this function returns
1713 * NULLCOMMITLSN.
1714 */
1715xfs_lsn_t
1716xlog_grant_push_threshold(
1717 struct xlog *log,
1718 int need_bytes)
1719{
1720 xfs_lsn_t threshold_lsn = 0;
1721 xfs_lsn_t last_sync_lsn;
1722 int free_blocks;
1723 int free_bytes;
1724 int threshold_block;
1725 int threshold_cycle;
1726 int free_threshold;
1727
1728 ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1729
1730 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1731 free_blocks = BTOBBT(free_bytes);
1732
1733 /*
1734 * Set the threshold for the minimum number of free blocks in the
1735 * log to the maximum of what the caller needs, one quarter of the
1736 * log, and 256 blocks.
1737 */
1738 free_threshold = BTOBB(need_bytes);
1739 free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
1740 free_threshold = max(free_threshold, 256);
1741 if (free_blocks >= free_threshold)
1742 return NULLCOMMITLSN;
1743
1744 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1745 &threshold_block);
1746 threshold_block += free_threshold;
1747 if (threshold_block >= log->l_logBBsize) {
1748 threshold_block -= log->l_logBBsize;
1749 threshold_cycle += 1;
1750 }
1751 threshold_lsn = xlog_assign_lsn(threshold_cycle,
1752 threshold_block);
1753 /*
1754 * Don't pass in an lsn greater than the lsn of the last
1755 * log record known to be on disk. Use a snapshot of the last sync lsn
1756 * so that it doesn't change between the compare and the set.
1757 */
1758 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1759 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1760 threshold_lsn = last_sync_lsn;
1761
1762 return threshold_lsn;
1763}
1764
1765/*
1766 * Push the tail of the log if we need to do so to maintain the free log space
1767 * thresholds set out by xlog_grant_push_threshold. We may need to adopt a
1768 * policy which pushes on an lsn which is further along in the log once we
1769 * reach the high water mark. In this manner, we would be creating a low water
1770 * mark.
1771 */
1772STATIC void
1773xlog_grant_push_ail(
1774 struct xlog *log,
1775 int need_bytes)
1776{
1777 xfs_lsn_t threshold_lsn;
1778
1779 threshold_lsn = xlog_grant_push_threshold(log, need_bytes);
1780 if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log))
1781 return;
1782
1783 /*
1784 * Get the transaction layer to kick the dirty buffers out to
1785 * disk asynchronously. No point in trying to do this if
1786 * the filesystem is shutting down.
1787 */
1788 xfs_ail_push(log->l_ailp, threshold_lsn);
1789}
1790
1791/*
1792 * Stamp cycle number in every block
1793 */
1794STATIC void
1795xlog_pack_data(
1796 struct xlog *log,
1797 struct xlog_in_core *iclog,
1798 int roundoff)
1799{
1800 int i, j, k;
1801 int size = iclog->ic_offset + roundoff;
1802 __be32 cycle_lsn;
1803 char *dp;
1804
1805 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1806
1807 dp = iclog->ic_datap;
1808 for (i = 0; i < BTOBB(size); i++) {
1809 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
1810 break;
1811 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1812 *(__be32 *)dp = cycle_lsn;
1813 dp += BBSIZE;
1814 }
1815
1816 if (xfs_has_logv2(log->l_mp)) {
1817 xlog_in_core_2_t *xhdr = iclog->ic_data;
1818
1819 for ( ; i < BTOBB(size); i++) {
1820 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1821 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1822 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
1823 *(__be32 *)dp = cycle_lsn;
1824 dp += BBSIZE;
1825 }
1826
1827 for (i = 1; i < log->l_iclog_heads; i++)
1828 xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
1829 }
1830}
1831
1832/*
1833 * Calculate the checksum for a log buffer.
1834 *
1835 * This is a little more complicated than it should be because the various
1836 * headers and the actual data are non-contiguous.
1837 */
1838__le32
1839xlog_cksum(
1840 struct xlog *log,
1841 struct xlog_rec_header *rhead,
1842 char *dp,
1843 int size)
1844{
1845 uint32_t crc;
1846
1847 /* first generate the crc for the record header ... */
1848 crc = xfs_start_cksum_update((char *)rhead,
1849 sizeof(struct xlog_rec_header),
1850 offsetof(struct xlog_rec_header, h_crc));
1851
1852 /* ... then for additional cycle data for v2 logs ... */
1853 if (xfs_has_logv2(log->l_mp)) {
1854 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
1855 int i;
1856 int xheads;
1857
1858 xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE);
1859
1860 for (i = 1; i < xheads; i++) {
1861 crc = crc32c(crc, &xhdr[i].hic_xheader,
1862 sizeof(struct xlog_rec_ext_header));
1863 }
1864 }
1865
1866 /* ... and finally for the payload */
1867 crc = crc32c(crc, dp, size);
1868
1869 return xfs_end_cksum(crc);
1870}
1871
1872static void
1873xlog_bio_end_io(
1874 struct bio *bio)
1875{
1876 struct xlog_in_core *iclog = bio->bi_private;
1877
1878 queue_work(iclog->ic_log->l_ioend_workqueue,
1879 &iclog->ic_end_io_work);
1880}
1881
1882static int
1883xlog_map_iclog_data(
1884 struct bio *bio,
1885 void *data,
1886 size_t count)
1887{
1888 do {
1889 struct page *page = kmem_to_page(data);
1890 unsigned int off = offset_in_page(data);
1891 size_t len = min_t(size_t, count, PAGE_SIZE - off);
1892
1893 if (bio_add_page(bio, page, len, off) != len)
1894 return -EIO;
1895
1896 data += len;
1897 count -= len;
1898 } while (count);
1899
1900 return 0;
1901}
1902
1903STATIC void
1904xlog_write_iclog(
1905 struct xlog *log,
1906 struct xlog_in_core *iclog,
1907 uint64_t bno,
1908 unsigned int count)
1909{
1910 ASSERT(bno < log->l_logBBsize);
1911 trace_xlog_iclog_write(iclog, _RET_IP_);
1912
1913 /*
1914 * We lock the iclogbufs here so that we can serialise against I/O
1915 * completion during unmount. We might be processing a shutdown
1916 * triggered during unmount, and that can occur asynchronously to the
1917 * unmount thread, and hence we need to ensure that completes before
1918 * tearing down the iclogbufs. Hence we need to hold the buffer lock
1919 * across the log IO to archieve that.
1920 */
1921 down(&iclog->ic_sema);
1922 if (xlog_is_shutdown(log)) {
1923 /*
1924 * It would seem logical to return EIO here, but we rely on
1925 * the log state machine to propagate I/O errors instead of
1926 * doing it here. We kick of the state machine and unlock
1927 * the buffer manually, the code needs to be kept in sync
1928 * with the I/O completion path.
1929 */
1930 xlog_state_done_syncing(iclog);
1931 up(&iclog->ic_sema);
1932 return;
1933 }
1934
1935 /*
1936 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
1937 * IOs coming immediately after this one. This prevents the block layer
1938 * writeback throttle from throttling log writes behind background
1939 * metadata writeback and causing priority inversions.
1940 */
1941 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
1942 howmany(count, PAGE_SIZE),
1943 REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
1944 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1945 iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1946 iclog->ic_bio.bi_private = iclog;
1947
1948 if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
1949 iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1950 /*
1951 * For external log devices, we also need to flush the data
1952 * device cache first to ensure all metadata writeback covered
1953 * by the LSN in this iclog is on stable storage. This is slow,
1954 * but it *must* complete before we issue the external log IO.
1955 *
1956 * If the flush fails, we cannot conclude that past metadata
1957 * writeback from the log succeeded. Repeating the flush is
1958 * not possible, hence we must shut down with log IO error to
1959 * avoid shutdown re-entering this path and erroring out again.
1960 */
1961 if (log->l_targ != log->l_mp->m_ddev_targp &&
1962 blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) {
1963 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1964 return;
1965 }
1966 }
1967 if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
1968 iclog->ic_bio.bi_opf |= REQ_FUA;
1969
1970 iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
1971
1972 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
1973 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1974 return;
1975 }
1976 if (is_vmalloc_addr(iclog->ic_data))
1977 flush_kernel_vmap_range(iclog->ic_data, count);
1978
1979 /*
1980 * If this log buffer would straddle the end of the log we will have
1981 * to split it up into two bios, so that we can continue at the start.
1982 */
1983 if (bno + BTOBB(count) > log->l_logBBsize) {
1984 struct bio *split;
1985
1986 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1987 GFP_NOIO, &fs_bio_set);
1988 bio_chain(split, &iclog->ic_bio);
1989 submit_bio(split);
1990
1991 /* restart at logical offset zero for the remainder */
1992 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1993 }
1994
1995 submit_bio(&iclog->ic_bio);
1996}
1997
1998/*
1999 * We need to bump cycle number for the part of the iclog that is
2000 * written to the start of the log. Watch out for the header magic
2001 * number case, though.
2002 */
2003static void
2004xlog_split_iclog(
2005 struct xlog *log,
2006 void *data,
2007 uint64_t bno,
2008 unsigned int count)
2009{
2010 unsigned int split_offset = BBTOB(log->l_logBBsize - bno);
2011 unsigned int i;
2012
2013 for (i = split_offset; i < count; i += BBSIZE) {
2014 uint32_t cycle = get_unaligned_be32(data + i);
2015
2016 if (++cycle == XLOG_HEADER_MAGIC_NUM)
2017 cycle++;
2018 put_unaligned_be32(cycle, data + i);
2019 }
2020}
2021
2022static int
2023xlog_calc_iclog_size(
2024 struct xlog *log,
2025 struct xlog_in_core *iclog,
2026 uint32_t *roundoff)
2027{
2028 uint32_t count_init, count;
2029
2030 /* Add for LR header */
2031 count_init = log->l_iclog_hsize + iclog->ic_offset;
2032 count = roundup(count_init, log->l_iclog_roundoff);
2033
2034 *roundoff = count - count_init;
2035
2036 ASSERT(count >= count_init);
2037 ASSERT(*roundoff < log->l_iclog_roundoff);
2038 return count;
2039}
2040
2041/*
2042 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
2043 * fashion. Previously, we should have moved the current iclog
2044 * ptr in the log to point to the next available iclog. This allows further
2045 * write to continue while this code syncs out an iclog ready to go.
2046 * Before an in-core log can be written out, the data section must be scanned
2047 * to save away the 1st word of each BBSIZE block into the header. We replace
2048 * it with the current cycle count. Each BBSIZE block is tagged with the
2049 * cycle count because there in an implicit assumption that drives will
2050 * guarantee that entire 512 byte blocks get written at once. In other words,
2051 * we can't have part of a 512 byte block written and part not written. By
2052 * tagging each block, we will know which blocks are valid when recovering
2053 * after an unclean shutdown.
2054 *
2055 * This routine is single threaded on the iclog. No other thread can be in
2056 * this routine with the same iclog. Changing contents of iclog can there-
2057 * fore be done without grabbing the state machine lock. Updating the global
2058 * log will require grabbing the lock though.
2059 *
2060 * The entire log manager uses a logical block numbering scheme. Only
2061 * xlog_write_iclog knows about the fact that the log may not start with
2062 * block zero on a given device.
2063 */
2064STATIC void
2065xlog_sync(
2066 struct xlog *log,
2067 struct xlog_in_core *iclog,
2068 struct xlog_ticket *ticket)
2069{
2070 unsigned int count; /* byte count of bwrite */
2071 unsigned int roundoff; /* roundoff to BB or stripe */
2072 uint64_t bno;
2073 unsigned int size;
2074
2075 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2076 trace_xlog_iclog_sync(iclog, _RET_IP_);
2077
2078 count = xlog_calc_iclog_size(log, iclog, &roundoff);
2079
2080 /*
2081 * If we have a ticket, account for the roundoff via the ticket
2082 * reservation to avoid touching the hot grant heads needlessly.
2083 * Otherwise, we have to move grant heads directly.
2084 */
2085 if (ticket) {
2086 ticket->t_curr_res -= roundoff;
2087 } else {
2088 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
2089 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
2090 }
2091
2092 /* put cycle number in every block */
2093 xlog_pack_data(log, iclog, roundoff);
2094
2095 /* real byte length */
2096 size = iclog->ic_offset;
2097 if (xfs_has_logv2(log->l_mp))
2098 size += roundoff;
2099 iclog->ic_header.h_len = cpu_to_be32(size);
2100
2101 XFS_STATS_INC(log->l_mp, xs_log_writes);
2102 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
2103
2104 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
2105
2106 /* Do we need to split this write into 2 parts? */
2107 if (bno + BTOBB(count) > log->l_logBBsize)
2108 xlog_split_iclog(log, &iclog->ic_header, bno, count);
2109
2110 /* calculcate the checksum */
2111 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
2112 iclog->ic_datap, size);
2113 /*
2114 * Intentionally corrupt the log record CRC based on the error injection
2115 * frequency, if defined. This facilitates testing log recovery in the
2116 * event of torn writes. Hence, set the IOABORT state to abort the log
2117 * write on I/O completion and shutdown the fs. The subsequent mount
2118 * detects the bad CRC and attempts to recover.
2119 */
2120#ifdef DEBUG
2121 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
2122 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
2123 iclog->ic_fail_crc = true;
2124 xfs_warn(log->l_mp,
2125 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
2126 be64_to_cpu(iclog->ic_header.h_lsn));
2127 }
2128#endif
2129 xlog_verify_iclog(log, iclog, count);
2130 xlog_write_iclog(log, iclog, bno, count);
2131}
2132
2133/*
2134 * Deallocate a log structure
2135 */
2136STATIC void
2137xlog_dealloc_log(
2138 struct xlog *log)
2139{
2140 xlog_in_core_t *iclog, *next_iclog;
2141 int i;
2142
2143 /*
2144 * Destroy the CIL after waiting for iclog IO completion because an
2145 * iclog EIO error will try to shut down the log, which accesses the
2146 * CIL to wake up the waiters.
2147 */
2148 xlog_cil_destroy(log);
2149
2150 iclog = log->l_iclog;
2151 for (i = 0; i < log->l_iclog_bufs; i++) {
2152 next_iclog = iclog->ic_next;
2153 kmem_free(iclog->ic_data);
2154 kmem_free(iclog);
2155 iclog = next_iclog;
2156 }
2157
2158 log->l_mp->m_log = NULL;
2159 destroy_workqueue(log->l_ioend_workqueue);
2160 kmem_free(log);
2161}
2162
2163/*
2164 * Update counters atomically now that memcpy is done.
2165 */
2166static inline void
2167xlog_state_finish_copy(
2168 struct xlog *log,
2169 struct xlog_in_core *iclog,
2170 int record_cnt,
2171 int copy_bytes)
2172{
2173 lockdep_assert_held(&log->l_icloglock);
2174
2175 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
2176 iclog->ic_offset += copy_bytes;
2177}
2178
2179/*
2180 * print out info relating to regions written which consume
2181 * the reservation
2182 */
2183void
2184xlog_print_tic_res(
2185 struct xfs_mount *mp,
2186 struct xlog_ticket *ticket)
2187{
2188 xfs_warn(mp, "ticket reservation summary:");
2189 xfs_warn(mp, " unit res = %d bytes", ticket->t_unit_res);
2190 xfs_warn(mp, " current res = %d bytes", ticket->t_curr_res);
2191 xfs_warn(mp, " original count = %d", ticket->t_ocnt);
2192 xfs_warn(mp, " remaining count = %d", ticket->t_cnt);
2193}
2194
2195/*
2196 * Print a summary of the transaction.
2197 */
2198void
2199xlog_print_trans(
2200 struct xfs_trans *tp)
2201{
2202 struct xfs_mount *mp = tp->t_mountp;
2203 struct xfs_log_item *lip;
2204
2205 /* dump core transaction and ticket info */
2206 xfs_warn(mp, "transaction summary:");
2207 xfs_warn(mp, " log res = %d", tp->t_log_res);
2208 xfs_warn(mp, " log count = %d", tp->t_log_count);
2209 xfs_warn(mp, " flags = 0x%x", tp->t_flags);
2210
2211 xlog_print_tic_res(mp, tp->t_ticket);
2212
2213 /* dump each log item */
2214 list_for_each_entry(lip, &tp->t_items, li_trans) {
2215 struct xfs_log_vec *lv = lip->li_lv;
2216 struct xfs_log_iovec *vec;
2217 int i;
2218
2219 xfs_warn(mp, "log item: ");
2220 xfs_warn(mp, " type = 0x%x", lip->li_type);
2221 xfs_warn(mp, " flags = 0x%lx", lip->li_flags);
2222 if (!lv)
2223 continue;
2224 xfs_warn(mp, " niovecs = %d", lv->lv_niovecs);
2225 xfs_warn(mp, " size = %d", lv->lv_size);
2226 xfs_warn(mp, " bytes = %d", lv->lv_bytes);
2227 xfs_warn(mp, " buf len = %d", lv->lv_buf_len);
2228
2229 /* dump each iovec for the log item */
2230 vec = lv->lv_iovecp;
2231 for (i = 0; i < lv->lv_niovecs; i++) {
2232 int dumplen = min(vec->i_len, 32);
2233
2234 xfs_warn(mp, " iovec[%d]", i);
2235 xfs_warn(mp, " type = 0x%x", vec->i_type);
2236 xfs_warn(mp, " len = %d", vec->i_len);
2237 xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i);
2238 xfs_hex_dump(vec->i_addr, dumplen);
2239
2240 vec++;
2241 }
2242 }
2243}
2244
2245static inline void
2246xlog_write_iovec(
2247 struct xlog_in_core *iclog,
2248 uint32_t *log_offset,
2249 void *data,
2250 uint32_t write_len,
2251 int *bytes_left,
2252 uint32_t *record_cnt,
2253 uint32_t *data_cnt)
2254{
2255 ASSERT(*log_offset < iclog->ic_log->l_iclog_size);
2256 ASSERT(*log_offset % sizeof(int32_t) == 0);
2257 ASSERT(write_len % sizeof(int32_t) == 0);
2258
2259 memcpy(iclog->ic_datap + *log_offset, data, write_len);
2260 *log_offset += write_len;
2261 *bytes_left -= write_len;
2262 (*record_cnt)++;
2263 *data_cnt += write_len;
2264}
2265
2266/*
2267 * Write log vectors into a single iclog which is guaranteed by the caller
2268 * to have enough space to write the entire log vector into.
2269 */
2270static void
2271xlog_write_full(
2272 struct xfs_log_vec *lv,
2273 struct xlog_ticket *ticket,
2274 struct xlog_in_core *iclog,
2275 uint32_t *log_offset,
2276 uint32_t *len,
2277 uint32_t *record_cnt,
2278 uint32_t *data_cnt)
2279{
2280 int index;
2281
2282 ASSERT(*log_offset + *len <= iclog->ic_size ||
2283 iclog->ic_state == XLOG_STATE_WANT_SYNC);
2284
2285 /*
2286 * Ordered log vectors have no regions to write so this
2287 * loop will naturally skip them.
2288 */
2289 for (index = 0; index < lv->lv_niovecs; index++) {
2290 struct xfs_log_iovec *reg = &lv->lv_iovecp[index];
2291 struct xlog_op_header *ophdr = reg->i_addr;
2292
2293 ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2294 xlog_write_iovec(iclog, log_offset, reg->i_addr,
2295 reg->i_len, len, record_cnt, data_cnt);
2296 }
2297}
2298
2299static int
2300xlog_write_get_more_iclog_space(
2301 struct xlog_ticket *ticket,
2302 struct xlog_in_core **iclogp,
2303 uint32_t *log_offset,
2304 uint32_t len,
2305 uint32_t *record_cnt,
2306 uint32_t *data_cnt)
2307{
2308 struct xlog_in_core *iclog = *iclogp;
2309 struct xlog *log = iclog->ic_log;
2310 int error;
2311
2312 spin_lock(&log->l_icloglock);
2313 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
2314 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2315 error = xlog_state_release_iclog(log, iclog, ticket);
2316 spin_unlock(&log->l_icloglock);
2317 if (error)
2318 return error;
2319
2320 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2321 log_offset);
2322 if (error)
2323 return error;
2324 *record_cnt = 0;
2325 *data_cnt = 0;
2326 *iclogp = iclog;
2327 return 0;
2328}
2329
2330/*
2331 * Write log vectors into a single iclog which is smaller than the current chain
2332 * length. We write until we cannot fit a full record into the remaining space
2333 * and then stop. We return the log vector that is to be written that cannot
2334 * wholly fit in the iclog.
2335 */
2336static int
2337xlog_write_partial(
2338 struct xfs_log_vec *lv,
2339 struct xlog_ticket *ticket,
2340 struct xlog_in_core **iclogp,
2341 uint32_t *log_offset,
2342 uint32_t *len,
2343 uint32_t *record_cnt,
2344 uint32_t *data_cnt)
2345{
2346 struct xlog_in_core *iclog = *iclogp;
2347 struct xlog_op_header *ophdr;
2348 int index = 0;
2349 uint32_t rlen;
2350 int error;
2351
2352 /* walk the logvec, copying until we run out of space in the iclog */
2353 for (index = 0; index < lv->lv_niovecs; index++) {
2354 struct xfs_log_iovec *reg = &lv->lv_iovecp[index];
2355 uint32_t reg_offset = 0;
2356
2357 /*
2358 * The first region of a continuation must have a non-zero
2359 * length otherwise log recovery will just skip over it and
2360 * start recovering from the next opheader it finds. Because we
2361 * mark the next opheader as a continuation, recovery will then
2362 * incorrectly add the continuation to the previous region and
2363 * that breaks stuff.
2364 *
2365 * Hence if there isn't space for region data after the
2366 * opheader, then we need to start afresh with a new iclog.
2367 */
2368 if (iclog->ic_size - *log_offset <=
2369 sizeof(struct xlog_op_header)) {
2370 error = xlog_write_get_more_iclog_space(ticket,
2371 &iclog, log_offset, *len, record_cnt,
2372 data_cnt);
2373 if (error)
2374 return error;
2375 }
2376
2377 ophdr = reg->i_addr;
2378 rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset);
2379
2380 ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2381 ophdr->oh_len = cpu_to_be32(rlen - sizeof(struct xlog_op_header));
2382 if (rlen != reg->i_len)
2383 ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2384
2385 xlog_write_iovec(iclog, log_offset, reg->i_addr,
2386 rlen, len, record_cnt, data_cnt);
2387
2388 /* If we wrote the whole region, move to the next. */
2389 if (rlen == reg->i_len)
2390 continue;
2391
2392 /*
2393 * We now have a partially written iovec, but it can span
2394 * multiple iclogs so we loop here. First we release the iclog
2395 * we currently have, then we get a new iclog and add a new
2396 * opheader. Then we continue copying from where we were until
2397 * we either complete the iovec or fill the iclog. If we
2398 * complete the iovec, then we increment the index and go right
2399 * back to the top of the outer loop. if we fill the iclog, we
2400 * run the inner loop again.
2401 *
2402 * This is complicated by the tail of a region using all the
2403 * space in an iclog and hence requiring us to release the iclog
2404 * and get a new one before returning to the outer loop. We must
2405 * always guarantee that we exit this inner loop with at least
2406 * space for log transaction opheaders left in the current
2407 * iclog, hence we cannot just terminate the loop at the end
2408 * of the of the continuation. So we loop while there is no
2409 * space left in the current iclog, and check for the end of the
2410 * continuation after getting a new iclog.
2411 */
2412 do {
2413 /*
2414 * Ensure we include the continuation opheader in the
2415 * space we need in the new iclog by adding that size
2416 * to the length we require. This continuation opheader
2417 * needs to be accounted to the ticket as the space it
2418 * consumes hasn't been accounted to the lv we are
2419 * writing.
2420 */
2421 error = xlog_write_get_more_iclog_space(ticket,
2422 &iclog, log_offset,
2423 *len + sizeof(struct xlog_op_header),
2424 record_cnt, data_cnt);
2425 if (error)
2426 return error;
2427
2428 ophdr = iclog->ic_datap + *log_offset;
2429 ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2430 ophdr->oh_clientid = XFS_TRANSACTION;
2431 ophdr->oh_res2 = 0;
2432 ophdr->oh_flags = XLOG_WAS_CONT_TRANS;
2433
2434 ticket->t_curr_res -= sizeof(struct xlog_op_header);
2435 *log_offset += sizeof(struct xlog_op_header);
2436 *data_cnt += sizeof(struct xlog_op_header);
2437
2438 /*
2439 * If rlen fits in the iclog, then end the region
2440 * continuation. Otherwise we're going around again.
2441 */
2442 reg_offset += rlen;
2443 rlen = reg->i_len - reg_offset;
2444 if (rlen <= iclog->ic_size - *log_offset)
2445 ophdr->oh_flags |= XLOG_END_TRANS;
2446 else
2447 ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2448
2449 rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset);
2450 ophdr->oh_len = cpu_to_be32(rlen);
2451
2452 xlog_write_iovec(iclog, log_offset,
2453 reg->i_addr + reg_offset,
2454 rlen, len, record_cnt, data_cnt);
2455
2456 } while (ophdr->oh_flags & XLOG_CONTINUE_TRANS);
2457 }
2458
2459 /*
2460 * No more iovecs remain in this logvec so return the next log vec to
2461 * the caller so it can go back to fast path copying.
2462 */
2463 *iclogp = iclog;
2464 return 0;
2465}
2466
2467/*
2468 * Write some region out to in-core log
2469 *
2470 * This will be called when writing externally provided regions or when
2471 * writing out a commit record for a given transaction.
2472 *
2473 * General algorithm:
2474 * 1. Find total length of this write. This may include adding to the
2475 * lengths passed in.
2476 * 2. Check whether we violate the tickets reservation.
2477 * 3. While writing to this iclog
2478 * A. Reserve as much space in this iclog as can get
2479 * B. If this is first write, save away start lsn
2480 * C. While writing this region:
2481 * 1. If first write of transaction, write start record
2482 * 2. Write log operation header (header per region)
2483 * 3. Find out if we can fit entire region into this iclog
2484 * 4. Potentially, verify destination memcpy ptr
2485 * 5. Memcpy (partial) region
2486 * 6. If partial copy, release iclog; otherwise, continue
2487 * copying more regions into current iclog
2488 * 4. Mark want sync bit (in simulation mode)
2489 * 5. Release iclog for potential flush to on-disk log.
2490 *
2491 * ERRORS:
2492 * 1. Panic if reservation is overrun. This should never happen since
2493 * reservation amounts are generated internal to the filesystem.
2494 * NOTES:
2495 * 1. Tickets are single threaded data structures.
2496 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2497 * syncing routine. When a single log_write region needs to span
2498 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2499 * on all log operation writes which don't contain the end of the
2500 * region. The XLOG_END_TRANS bit is used for the in-core log
2501 * operation which contains the end of the continued log_write region.
2502 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2503 * we don't really know exactly how much space will be used. As a result,
2504 * we don't update ic_offset until the end when we know exactly how many
2505 * bytes have been written out.
2506 */
2507int
2508xlog_write(
2509 struct xlog *log,
2510 struct xfs_cil_ctx *ctx,
2511 struct list_head *lv_chain,
2512 struct xlog_ticket *ticket,
2513 uint32_t len)
2514
2515{
2516 struct xlog_in_core *iclog = NULL;
2517 struct xfs_log_vec *lv;
2518 uint32_t record_cnt = 0;
2519 uint32_t data_cnt = 0;
2520 int error = 0;
2521 int log_offset;
2522
2523 if (ticket->t_curr_res < 0) {
2524 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2525 "ctx ticket reservation ran out. Need to up reservation");
2526 xlog_print_tic_res(log->l_mp, ticket);
2527 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
2528 }
2529
2530 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2531 &log_offset);
2532 if (error)
2533 return error;
2534
2535 ASSERT(log_offset <= iclog->ic_size - 1);
2536
2537 /*
2538 * If we have a context pointer, pass it the first iclog we are
2539 * writing to so it can record state needed for iclog write
2540 * ordering.
2541 */
2542 if (ctx)
2543 xlog_cil_set_ctx_write_state(ctx, iclog);
2544
2545 list_for_each_entry(lv, lv_chain, lv_list) {
2546 /*
2547 * If the entire log vec does not fit in the iclog, punt it to
2548 * the partial copy loop which can handle this case.
2549 */
2550 if (lv->lv_niovecs &&
2551 lv->lv_bytes > iclog->ic_size - log_offset) {
2552 error = xlog_write_partial(lv, ticket, &iclog,
2553 &log_offset, &len, &record_cnt,
2554 &data_cnt);
2555 if (error) {
2556 /*
2557 * We have no iclog to release, so just return
2558 * the error immediately.
2559 */
2560 return error;
2561 }
2562 } else {
2563 xlog_write_full(lv, ticket, iclog, &log_offset,
2564 &len, &record_cnt, &data_cnt);
2565 }
2566 }
2567 ASSERT(len == 0);
2568
2569 /*
2570 * We've already been guaranteed that the last writes will fit inside
2571 * the current iclog, and hence it will already have the space used by
2572 * those writes accounted to it. Hence we do not need to update the
2573 * iclog with the number of bytes written here.
2574 */
2575 spin_lock(&log->l_icloglock);
2576 xlog_state_finish_copy(log, iclog, record_cnt, 0);
2577 error = xlog_state_release_iclog(log, iclog, ticket);
2578 spin_unlock(&log->l_icloglock);
2579
2580 return error;
2581}
2582
2583static void
2584xlog_state_activate_iclog(
2585 struct xlog_in_core *iclog,
2586 int *iclogs_changed)
2587{
2588 ASSERT(list_empty_careful(&iclog->ic_callbacks));
2589 trace_xlog_iclog_activate(iclog, _RET_IP_);
2590
2591 /*
2592 * If the number of ops in this iclog indicate it just contains the
2593 * dummy transaction, we can change state into IDLE (the second time
2594 * around). Otherwise we should change the state into NEED a dummy.
2595 * We don't need to cover the dummy.
2596 */
2597 if (*iclogs_changed == 0 &&
2598 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2599 *iclogs_changed = 1;
2600 } else {
2601 /*
2602 * We have two dirty iclogs so start over. This could also be
2603 * num of ops indicating this is not the dummy going out.
2604 */
2605 *iclogs_changed = 2;
2606 }
2607
2608 iclog->ic_state = XLOG_STATE_ACTIVE;
2609 iclog->ic_offset = 0;
2610 iclog->ic_header.h_num_logops = 0;
2611 memset(iclog->ic_header.h_cycle_data, 0,
2612 sizeof(iclog->ic_header.h_cycle_data));
2613 iclog->ic_header.h_lsn = 0;
2614 iclog->ic_header.h_tail_lsn = 0;
2615}
2616
2617/*
2618 * Loop through all iclogs and mark all iclogs currently marked DIRTY as
2619 * ACTIVE after iclog I/O has completed.
2620 */
2621static void
2622xlog_state_activate_iclogs(
2623 struct xlog *log,
2624 int *iclogs_changed)
2625{
2626 struct xlog_in_core *iclog = log->l_iclog;
2627
2628 do {
2629 if (iclog->ic_state == XLOG_STATE_DIRTY)
2630 xlog_state_activate_iclog(iclog, iclogs_changed);
2631 /*
2632 * The ordering of marking iclogs ACTIVE must be maintained, so
2633 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2634 */
2635 else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2636 break;
2637 } while ((iclog = iclog->ic_next) != log->l_iclog);
2638}
2639
2640static int
2641xlog_covered_state(
2642 int prev_state,
2643 int iclogs_changed)
2644{
2645 /*
2646 * We go to NEED for any non-covering writes. We go to NEED2 if we just
2647 * wrote the first covering record (DONE). We go to IDLE if we just
2648 * wrote the second covering record (DONE2) and remain in IDLE until a
2649 * non-covering write occurs.
2650 */
2651 switch (prev_state) {
2652 case XLOG_STATE_COVER_IDLE:
2653 if (iclogs_changed == 1)
2654 return XLOG_STATE_COVER_IDLE;
2655 fallthrough;
2656 case XLOG_STATE_COVER_NEED:
2657 case XLOG_STATE_COVER_NEED2:
2658 break;
2659 case XLOG_STATE_COVER_DONE:
2660 if (iclogs_changed == 1)
2661 return XLOG_STATE_COVER_NEED2;
2662 break;
2663 case XLOG_STATE_COVER_DONE2:
2664 if (iclogs_changed == 1)
2665 return XLOG_STATE_COVER_IDLE;
2666 break;
2667 default:
2668 ASSERT(0);
2669 }
2670
2671 return XLOG_STATE_COVER_NEED;
2672}
2673
2674STATIC void
2675xlog_state_clean_iclog(
2676 struct xlog *log,
2677 struct xlog_in_core *dirty_iclog)
2678{
2679 int iclogs_changed = 0;
2680
2681 trace_xlog_iclog_clean(dirty_iclog, _RET_IP_);
2682
2683 dirty_iclog->ic_state = XLOG_STATE_DIRTY;
2684
2685 xlog_state_activate_iclogs(log, &iclogs_changed);
2686 wake_up_all(&dirty_iclog->ic_force_wait);
2687
2688 if (iclogs_changed) {
2689 log->l_covered_state = xlog_covered_state(log->l_covered_state,
2690 iclogs_changed);
2691 }
2692}
2693
2694STATIC xfs_lsn_t
2695xlog_get_lowest_lsn(
2696 struct xlog *log)
2697{
2698 struct xlog_in_core *iclog = log->l_iclog;
2699 xfs_lsn_t lowest_lsn = 0, lsn;
2700
2701 do {
2702 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2703 iclog->ic_state == XLOG_STATE_DIRTY)
2704 continue;
2705
2706 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2707 if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
2708 lowest_lsn = lsn;
2709 } while ((iclog = iclog->ic_next) != log->l_iclog);
2710
2711 return lowest_lsn;
2712}
2713
2714/*
2715 * Completion of a iclog IO does not imply that a transaction has completed, as
2716 * transactions can be large enough to span many iclogs. We cannot change the
2717 * tail of the log half way through a transaction as this may be the only
2718 * transaction in the log and moving the tail to point to the middle of it
2719 * will prevent recovery from finding the start of the transaction. Hence we
2720 * should only update the last_sync_lsn if this iclog contains transaction
2721 * completion callbacks on it.
2722 *
2723 * We have to do this before we drop the icloglock to ensure we are the only one
2724 * that can update it.
2725 *
2726 * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
2727 * the reservation grant head pushing. This is due to the fact that the push
2728 * target is bound by the current last_sync_lsn value. Hence if we have a large
2729 * amount of log space bound up in this committing transaction then the
2730 * last_sync_lsn value may be the limiting factor preventing tail pushing from
2731 * freeing space in the log. Hence once we've updated the last_sync_lsn we
2732 * should push the AIL to ensure the push target (and hence the grant head) is
2733 * no longer bound by the old log head location and can move forwards and make
2734 * progress again.
2735 */
2736static void
2737xlog_state_set_callback(
2738 struct xlog *log,
2739 struct xlog_in_core *iclog,
2740 xfs_lsn_t header_lsn)
2741{
2742 trace_xlog_iclog_callback(iclog, _RET_IP_);
2743 iclog->ic_state = XLOG_STATE_CALLBACK;
2744
2745 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2746 header_lsn) <= 0);
2747
2748 if (list_empty_careful(&iclog->ic_callbacks))
2749 return;
2750
2751 atomic64_set(&log->l_last_sync_lsn, header_lsn);
2752 xlog_grant_push_ail(log, 0);
2753}
2754
2755/*
2756 * Return true if we need to stop processing, false to continue to the next
2757 * iclog. The caller will need to run callbacks if the iclog is returned in the
2758 * XLOG_STATE_CALLBACK state.
2759 */
2760static bool
2761xlog_state_iodone_process_iclog(
2762 struct xlog *log,
2763 struct xlog_in_core *iclog)
2764{
2765 xfs_lsn_t lowest_lsn;
2766 xfs_lsn_t header_lsn;
2767
2768 switch (iclog->ic_state) {
2769 case XLOG_STATE_ACTIVE:
2770 case XLOG_STATE_DIRTY:
2771 /*
2772 * Skip all iclogs in the ACTIVE & DIRTY states:
2773 */
2774 return false;
2775 case XLOG_STATE_DONE_SYNC:
2776 /*
2777 * Now that we have an iclog that is in the DONE_SYNC state, do
2778 * one more check here to see if we have chased our tail around.
2779 * If this is not the lowest lsn iclog, then we will leave it
2780 * for another completion to process.
2781 */
2782 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2783 lowest_lsn = xlog_get_lowest_lsn(log);
2784 if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
2785 return false;
2786 xlog_state_set_callback(log, iclog, header_lsn);
2787 return false;
2788 default:
2789 /*
2790 * Can only perform callbacks in order. Since this iclog is not
2791 * in the DONE_SYNC state, we skip the rest and just try to
2792 * clean up.
2793 */
2794 return true;
2795 }
2796}
2797
2798/*
2799 * Loop over all the iclogs, running attached callbacks on them. Return true if
2800 * we ran any callbacks, indicating that we dropped the icloglock. We don't need
2801 * to handle transient shutdown state here at all because
2802 * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown
2803 * cleanup of the callbacks.
2804 */
2805static bool
2806xlog_state_do_iclog_callbacks(
2807 struct xlog *log)
2808 __releases(&log->l_icloglock)
2809 __acquires(&log->l_icloglock)
2810{
2811 struct xlog_in_core *first_iclog = log->l_iclog;
2812 struct xlog_in_core *iclog = first_iclog;
2813 bool ran_callback = false;
2814
2815 do {
2816 LIST_HEAD(cb_list);
2817
2818 if (xlog_state_iodone_process_iclog(log, iclog))
2819 break;
2820 if (iclog->ic_state != XLOG_STATE_CALLBACK) {
2821 iclog = iclog->ic_next;
2822 continue;
2823 }
2824 list_splice_init(&iclog->ic_callbacks, &cb_list);
2825 spin_unlock(&log->l_icloglock);
2826
2827 trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
2828 xlog_cil_process_committed(&cb_list);
2829 trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
2830 ran_callback = true;
2831
2832 spin_lock(&log->l_icloglock);
2833 xlog_state_clean_iclog(log, iclog);
2834 iclog = iclog->ic_next;
2835 } while (iclog != first_iclog);
2836
2837 return ran_callback;
2838}
2839
2840
2841/*
2842 * Loop running iclog completion callbacks until there are no more iclogs in a
2843 * state that can run callbacks.
2844 */
2845STATIC void
2846xlog_state_do_callback(
2847 struct xlog *log)
2848{
2849 int flushcnt = 0;
2850 int repeats = 0;
2851
2852 spin_lock(&log->l_icloglock);
2853 while (xlog_state_do_iclog_callbacks(log)) {
2854 if (xlog_is_shutdown(log))
2855 break;
2856
2857 if (++repeats > 5000) {
2858 flushcnt += repeats;
2859 repeats = 0;
2860 xfs_warn(log->l_mp,
2861 "%s: possible infinite loop (%d iterations)",
2862 __func__, flushcnt);
2863 }
2864 }
2865
2866 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
2867 wake_up_all(&log->l_flush_wait);
2868
2869 spin_unlock(&log->l_icloglock);
2870}
2871
2872
2873/*
2874 * Finish transitioning this iclog to the dirty state.
2875 *
2876 * Callbacks could take time, so they are done outside the scope of the
2877 * global state machine log lock.
2878 */
2879STATIC void
2880xlog_state_done_syncing(
2881 struct xlog_in_core *iclog)
2882{
2883 struct xlog *log = iclog->ic_log;
2884
2885 spin_lock(&log->l_icloglock);
2886 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2887 trace_xlog_iclog_sync_done(iclog, _RET_IP_);
2888
2889 /*
2890 * If we got an error, either on the first buffer, or in the case of
2891 * split log writes, on the second, we shut down the file system and
2892 * no iclogs should ever be attempted to be written to disk again.
2893 */
2894 if (!xlog_is_shutdown(log)) {
2895 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2896 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2897 }
2898
2899 /*
2900 * Someone could be sleeping prior to writing out the next
2901 * iclog buffer, we wake them all, one will get to do the
2902 * I/O, the others get to wait for the result.
2903 */
2904 wake_up_all(&iclog->ic_write_wait);
2905 spin_unlock(&log->l_icloglock);
2906 xlog_state_do_callback(log);
2907}
2908
2909/*
2910 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2911 * sleep. We wait on the flush queue on the head iclog as that should be
2912 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2913 * we will wait here and all new writes will sleep until a sync completes.
2914 *
2915 * The in-core logs are used in a circular fashion. They are not used
2916 * out-of-order even when an iclog past the head is free.
2917 *
2918 * return:
2919 * * log_offset where xlog_write() can start writing into the in-core
2920 * log's data space.
2921 * * in-core log pointer to which xlog_write() should write.
2922 * * boolean indicating this is a continued write to an in-core log.
2923 * If this is the last write, then the in-core log's offset field
2924 * needs to be incremented, depending on the amount of data which
2925 * is copied.
2926 */
2927STATIC int
2928xlog_state_get_iclog_space(
2929 struct xlog *log,
2930 int len,
2931 struct xlog_in_core **iclogp,
2932 struct xlog_ticket *ticket,
2933 int *logoffsetp)
2934{
2935 int log_offset;
2936 xlog_rec_header_t *head;
2937 xlog_in_core_t *iclog;
2938
2939restart:
2940 spin_lock(&log->l_icloglock);
2941 if (xlog_is_shutdown(log)) {
2942 spin_unlock(&log->l_icloglock);
2943 return -EIO;
2944 }
2945
2946 iclog = log->l_iclog;
2947 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2948 XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2949
2950 /* Wait for log writes to have flushed */
2951 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2952 goto restart;
2953 }
2954
2955 head = &iclog->ic_header;
2956
2957 atomic_inc(&iclog->ic_refcnt); /* prevents sync */
2958 log_offset = iclog->ic_offset;
2959
2960 trace_xlog_iclog_get_space(iclog, _RET_IP_);
2961
2962 /* On the 1st write to an iclog, figure out lsn. This works
2963 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2964 * committing to. If the offset is set, that's how many blocks
2965 * must be written.
2966 */
2967 if (log_offset == 0) {
2968 ticket->t_curr_res -= log->l_iclog_hsize;
2969 head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2970 head->h_lsn = cpu_to_be64(
2971 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2972 ASSERT(log->l_curr_block >= 0);
2973 }
2974
2975 /* If there is enough room to write everything, then do it. Otherwise,
2976 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2977 * bit is on, so this will get flushed out. Don't update ic_offset
2978 * until you know exactly how many bytes get copied. Therefore, wait
2979 * until later to update ic_offset.
2980 *
2981 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2982 * can fit into remaining data section.
2983 */
2984 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2985 int error = 0;
2986
2987 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2988
2989 /*
2990 * If we are the only one writing to this iclog, sync it to
2991 * disk. We need to do an atomic compare and decrement here to
2992 * avoid racing with concurrent atomic_dec_and_lock() calls in
2993 * xlog_state_release_iclog() when there is more than one
2994 * reference to the iclog.
2995 */
2996 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2997 error = xlog_state_release_iclog(log, iclog, ticket);
2998 spin_unlock(&log->l_icloglock);
2999 if (error)
3000 return error;
3001 goto restart;
3002 }
3003
3004 /* Do we have enough room to write the full amount in the remainder
3005 * of this iclog? Or must we continue a write on the next iclog and
3006 * mark this iclog as completely taken? In the case where we switch
3007 * iclogs (to mark it taken), this particular iclog will release/sync
3008 * to disk in xlog_write().
3009 */
3010 if (len <= iclog->ic_size - iclog->ic_offset)
3011 iclog->ic_offset += len;
3012 else
3013 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
3014 *iclogp = iclog;
3015
3016 ASSERT(iclog->ic_offset <= iclog->ic_size);
3017 spin_unlock(&log->l_icloglock);
3018
3019 *logoffsetp = log_offset;
3020 return 0;
3021}
3022
3023/*
3024 * The first cnt-1 times a ticket goes through here we don't need to move the
3025 * grant write head because the permanent reservation has reserved cnt times the
3026 * unit amount. Release part of current permanent unit reservation and reset
3027 * current reservation to be one units worth. Also move grant reservation head
3028 * forward.
3029 */
3030void
3031xfs_log_ticket_regrant(
3032 struct xlog *log,
3033 struct xlog_ticket *ticket)
3034{
3035 trace_xfs_log_ticket_regrant(log, ticket);
3036
3037 if (ticket->t_cnt > 0)
3038 ticket->t_cnt--;
3039
3040 xlog_grant_sub_space(log, &log->l_reserve_head.grant,
3041 ticket->t_curr_res);
3042 xlog_grant_sub_space(log, &log->l_write_head.grant,
3043 ticket->t_curr_res);
3044 ticket->t_curr_res = ticket->t_unit_res;
3045
3046 trace_xfs_log_ticket_regrant_sub(log, ticket);
3047
3048 /* just return if we still have some of the pre-reserved space */
3049 if (!ticket->t_cnt) {
3050 xlog_grant_add_space(log, &log->l_reserve_head.grant,
3051 ticket->t_unit_res);
3052 trace_xfs_log_ticket_regrant_exit(log, ticket);
3053
3054 ticket->t_curr_res = ticket->t_unit_res;
3055 }
3056
3057 xfs_log_ticket_put(ticket);
3058}
3059
3060/*
3061 * Give back the space left from a reservation.
3062 *
3063 * All the information we need to make a correct determination of space left
3064 * is present. For non-permanent reservations, things are quite easy. The
3065 * count should have been decremented to zero. We only need to deal with the
3066 * space remaining in the current reservation part of the ticket. If the
3067 * ticket contains a permanent reservation, there may be left over space which
3068 * needs to be released. A count of N means that N-1 refills of the current
3069 * reservation can be done before we need to ask for more space. The first
3070 * one goes to fill up the first current reservation. Once we run out of
3071 * space, the count will stay at zero and the only space remaining will be
3072 * in the current reservation field.
3073 */
3074void
3075xfs_log_ticket_ungrant(
3076 struct xlog *log,
3077 struct xlog_ticket *ticket)
3078{
3079 int bytes;
3080
3081 trace_xfs_log_ticket_ungrant(log, ticket);
3082
3083 if (ticket->t_cnt > 0)
3084 ticket->t_cnt--;
3085
3086 trace_xfs_log_ticket_ungrant_sub(log, ticket);
3087
3088 /*
3089 * If this is a permanent reservation ticket, we may be able to free
3090 * up more space based on the remaining count.
3091 */
3092 bytes = ticket->t_curr_res;
3093 if (ticket->t_cnt > 0) {
3094 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
3095 bytes += ticket->t_unit_res*ticket->t_cnt;
3096 }
3097
3098 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
3099 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
3100
3101 trace_xfs_log_ticket_ungrant_exit(log, ticket);
3102
3103 xfs_log_space_wake(log->l_mp);
3104 xfs_log_ticket_put(ticket);
3105}
3106
3107/*
3108 * This routine will mark the current iclog in the ring as WANT_SYNC and move
3109 * the current iclog pointer to the next iclog in the ring.
3110 */
3111void
3112xlog_state_switch_iclogs(
3113 struct xlog *log,
3114 struct xlog_in_core *iclog,
3115 int eventual_size)
3116{
3117 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3118 assert_spin_locked(&log->l_icloglock);
3119 trace_xlog_iclog_switch(iclog, _RET_IP_);
3120
3121 if (!eventual_size)
3122 eventual_size = iclog->ic_offset;
3123 iclog->ic_state = XLOG_STATE_WANT_SYNC;
3124 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3125 log->l_prev_block = log->l_curr_block;
3126 log->l_prev_cycle = log->l_curr_cycle;
3127
3128 /* roll log?: ic_offset changed later */
3129 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
3130
3131 /* Round up to next log-sunit */
3132 if (log->l_iclog_roundoff > BBSIZE) {
3133 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
3134 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
3135 }
3136
3137 if (log->l_curr_block >= log->l_logBBsize) {
3138 /*
3139 * Rewind the current block before the cycle is bumped to make
3140 * sure that the combined LSN never transiently moves forward
3141 * when the log wraps to the next cycle. This is to support the
3142 * unlocked sample of these fields from xlog_valid_lsn(). Most
3143 * other cases should acquire l_icloglock.
3144 */
3145 log->l_curr_block -= log->l_logBBsize;
3146 ASSERT(log->l_curr_block >= 0);
3147 smp_wmb();
3148 log->l_curr_cycle++;
3149 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
3150 log->l_curr_cycle++;
3151 }
3152 ASSERT(iclog == log->l_iclog);
3153 log->l_iclog = iclog->ic_next;
3154}
3155
3156/*
3157 * Force the iclog to disk and check if the iclog has been completed before
3158 * xlog_force_iclog() returns. This can happen on synchronous (e.g.
3159 * pmem) or fast async storage because we drop the icloglock to issue the IO.
3160 * If completion has already occurred, tell the caller so that it can avoid an
3161 * unnecessary wait on the iclog.
3162 */
3163static int
3164xlog_force_and_check_iclog(
3165 struct xlog_in_core *iclog,
3166 bool *completed)
3167{
3168 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3169 int error;
3170
3171 *completed = false;
3172 error = xlog_force_iclog(iclog);
3173 if (error)
3174 return error;
3175
3176 /*
3177 * If the iclog has already been completed and reused the header LSN
3178 * will have been rewritten by completion
3179 */
3180 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
3181 *completed = true;
3182 return 0;
3183}
3184
3185/*
3186 * Write out all data in the in-core log as of this exact moment in time.
3187 *
3188 * Data may be written to the in-core log during this call. However,
3189 * we don't guarantee this data will be written out. A change from past
3190 * implementation means this routine will *not* write out zero length LRs.
3191 *
3192 * Basically, we try and perform an intelligent scan of the in-core logs.
3193 * If we determine there is no flushable data, we just return. There is no
3194 * flushable data if:
3195 *
3196 * 1. the current iclog is active and has no data; the previous iclog
3197 * is in the active or dirty state.
3198 * 2. the current iclog is drity, and the previous iclog is in the
3199 * active or dirty state.
3200 *
3201 * We may sleep if:
3202 *
3203 * 1. the current iclog is not in the active nor dirty state.
3204 * 2. the current iclog dirty, and the previous iclog is not in the
3205 * active nor dirty state.
3206 * 3. the current iclog is active, and there is another thread writing
3207 * to this particular iclog.
3208 * 4. a) the current iclog is active and has no other writers
3209 * b) when we return from flushing out this iclog, it is still
3210 * not in the active nor dirty state.
3211 */
3212int
3213xfs_log_force(
3214 struct xfs_mount *mp,
3215 uint flags)
3216{
3217 struct xlog *log = mp->m_log;
3218 struct xlog_in_core *iclog;
3219
3220 XFS_STATS_INC(mp, xs_log_force);
3221 trace_xfs_log_force(mp, 0, _RET_IP_);
3222
3223 xlog_cil_force(log);
3224
3225 spin_lock(&log->l_icloglock);
3226 if (xlog_is_shutdown(log))
3227 goto out_error;
3228
3229 iclog = log->l_iclog;
3230 trace_xlog_iclog_force(iclog, _RET_IP_);
3231
3232 if (iclog->ic_state == XLOG_STATE_DIRTY ||
3233 (iclog->ic_state == XLOG_STATE_ACTIVE &&
3234 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
3235 /*
3236 * If the head is dirty or (active and empty), then we need to
3237 * look at the previous iclog.
3238 *
3239 * If the previous iclog is active or dirty we are done. There
3240 * is nothing to sync out. Otherwise, we attach ourselves to the
3241 * previous iclog and go to sleep.
3242 */
3243 iclog = iclog->ic_prev;
3244 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3245 if (atomic_read(&iclog->ic_refcnt) == 0) {
3246 /* We have exclusive access to this iclog. */
3247 bool completed;
3248
3249 if (xlog_force_and_check_iclog(iclog, &completed))
3250 goto out_error;
3251
3252 if (completed)
3253 goto out_unlock;
3254 } else {
3255 /*
3256 * Someone else is still writing to this iclog, so we
3257 * need to ensure that when they release the iclog it
3258 * gets synced immediately as we may be waiting on it.
3259 */
3260 xlog_state_switch_iclogs(log, iclog, 0);
3261 }
3262 }
3263
3264 /*
3265 * The iclog we are about to wait on may contain the checkpoint pushed
3266 * by the above xlog_cil_force() call, but it may not have been pushed
3267 * to disk yet. Like the ACTIVE case above, we need to make sure caches
3268 * are flushed when this iclog is written.
3269 */
3270 if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
3271 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3272
3273 if (flags & XFS_LOG_SYNC)
3274 return xlog_wait_on_iclog(iclog);
3275out_unlock:
3276 spin_unlock(&log->l_icloglock);
3277 return 0;
3278out_error:
3279 spin_unlock(&log->l_icloglock);
3280 return -EIO;
3281}
3282
3283/*
3284 * Force the log to a specific LSN.
3285 *
3286 * If an iclog with that lsn can be found:
3287 * If it is in the DIRTY state, just return.
3288 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3289 * state and go to sleep or return.
3290 * If it is in any other state, go to sleep or return.
3291 *
3292 * Synchronous forces are implemented with a wait queue. All callers trying
3293 * to force a given lsn to disk must wait on the queue attached to the
3294 * specific in-core log. When given in-core log finally completes its write
3295 * to disk, that thread will wake up all threads waiting on the queue.
3296 */
3297static int
3298xlog_force_lsn(
3299 struct xlog *log,
3300 xfs_lsn_t lsn,
3301 uint flags,
3302 int *log_flushed,
3303 bool already_slept)
3304{
3305 struct xlog_in_core *iclog;
3306 bool completed;
3307
3308 spin_lock(&log->l_icloglock);
3309 if (xlog_is_shutdown(log))
3310 goto out_error;
3311
3312 iclog = log->l_iclog;
3313 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3314 trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
3315 iclog = iclog->ic_next;
3316 if (iclog == log->l_iclog)
3317 goto out_unlock;
3318 }
3319
3320 switch (iclog->ic_state) {
3321 case XLOG_STATE_ACTIVE:
3322 /*
3323 * We sleep here if we haven't already slept (e.g. this is the
3324 * first time we've looked at the correct iclog buf) and the
3325 * buffer before us is going to be sync'ed. The reason for this
3326 * is that if we are doing sync transactions here, by waiting
3327 * for the previous I/O to complete, we can allow a few more
3328 * transactions into this iclog before we close it down.
3329 *
3330 * Otherwise, we mark the buffer WANT_SYNC, and bump up the
3331 * refcnt so we can release the log (which drops the ref count).
3332 * The state switch keeps new transaction commits from using
3333 * this buffer. When the current commits finish writing into
3334 * the buffer, the refcount will drop to zero and the buffer
3335 * will go out then.
3336 */
3337 if (!already_slept &&
3338 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
3339 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
3340 xlog_wait(&iclog->ic_prev->ic_write_wait,
3341 &log->l_icloglock);
3342 return -EAGAIN;
3343 }
3344 if (xlog_force_and_check_iclog(iclog, &completed))
3345 goto out_error;
3346 if (log_flushed)
3347 *log_flushed = 1;
3348 if (completed)
3349 goto out_unlock;
3350 break;
3351 case XLOG_STATE_WANT_SYNC:
3352 /*
3353 * This iclog may contain the checkpoint pushed by the
3354 * xlog_cil_force_seq() call, but there are other writers still
3355 * accessing it so it hasn't been pushed to disk yet. Like the
3356 * ACTIVE case above, we need to make sure caches are flushed
3357 * when this iclog is written.
3358 */
3359 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3360 break;
3361 default:
3362 /*
3363 * The entire checkpoint was written by the CIL force and is on
3364 * its way to disk already. It will be stable when it
3365 * completes, so we don't need to manipulate caches here at all.
3366 * We just need to wait for completion if necessary.
3367 */
3368 break;
3369 }
3370
3371 if (flags & XFS_LOG_SYNC)
3372 return xlog_wait_on_iclog(iclog);
3373out_unlock:
3374 spin_unlock(&log->l_icloglock);
3375 return 0;
3376out_error:
3377 spin_unlock(&log->l_icloglock);
3378 return -EIO;
3379}
3380
3381/*
3382 * Force the log to a specific checkpoint sequence.
3383 *
3384 * First force the CIL so that all the required changes have been flushed to the
3385 * iclogs. If the CIL force completed it will return a commit LSN that indicates
3386 * the iclog that needs to be flushed to stable storage. If the caller needs
3387 * a synchronous log force, we will wait on the iclog with the LSN returned by
3388 * xlog_cil_force_seq() to be completed.
3389 */
3390int
3391xfs_log_force_seq(
3392 struct xfs_mount *mp,
3393 xfs_csn_t seq,
3394 uint flags,
3395 int *log_flushed)
3396{
3397 struct xlog *log = mp->m_log;
3398 xfs_lsn_t lsn;
3399 int ret;
3400 ASSERT(seq != 0);
3401
3402 XFS_STATS_INC(mp, xs_log_force);
3403 trace_xfs_log_force(mp, seq, _RET_IP_);
3404
3405 lsn = xlog_cil_force_seq(log, seq);
3406 if (lsn == NULLCOMMITLSN)
3407 return 0;
3408
3409 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
3410 if (ret == -EAGAIN) {
3411 XFS_STATS_INC(mp, xs_log_force_sleep);
3412 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
3413 }
3414 return ret;
3415}
3416
3417/*
3418 * Free a used ticket when its refcount falls to zero.
3419 */
3420void
3421xfs_log_ticket_put(
3422 xlog_ticket_t *ticket)
3423{
3424 ASSERT(atomic_read(&ticket->t_ref) > 0);
3425 if (atomic_dec_and_test(&ticket->t_ref))
3426 kmem_cache_free(xfs_log_ticket_cache, ticket);
3427}
3428
3429xlog_ticket_t *
3430xfs_log_ticket_get(
3431 xlog_ticket_t *ticket)
3432{
3433 ASSERT(atomic_read(&ticket->t_ref) > 0);
3434 atomic_inc(&ticket->t_ref);
3435 return ticket;
3436}
3437
3438/*
3439 * Figure out the total log space unit (in bytes) that would be
3440 * required for a log ticket.
3441 */
3442static int
3443xlog_calc_unit_res(
3444 struct xlog *log,
3445 int unit_bytes,
3446 int *niclogs)
3447{
3448 int iclog_space;
3449 uint num_headers;
3450
3451 /*
3452 * Permanent reservations have up to 'cnt'-1 active log operations
3453 * in the log. A unit in this case is the amount of space for one
3454 * of these log operations. Normal reservations have a cnt of 1
3455 * and their unit amount is the total amount of space required.
3456 *
3457 * The following lines of code account for non-transaction data
3458 * which occupy space in the on-disk log.
3459 *
3460 * Normal form of a transaction is:
3461 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3462 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3463 *
3464 * We need to account for all the leadup data and trailer data
3465 * around the transaction data.
3466 * And then we need to account for the worst case in terms of using
3467 * more space.
3468 * The worst case will happen if:
3469 * - the placement of the transaction happens to be such that the
3470 * roundoff is at its maximum
3471 * - the transaction data is synced before the commit record is synced
3472 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3473 * Therefore the commit record is in its own Log Record.
3474 * This can happen as the commit record is called with its
3475 * own region to xlog_write().
3476 * This then means that in the worst case, roundoff can happen for
3477 * the commit-rec as well.
3478 * The commit-rec is smaller than padding in this scenario and so it is
3479 * not added separately.
3480 */
3481
3482 /* for trans header */
3483 unit_bytes += sizeof(xlog_op_header_t);
3484 unit_bytes += sizeof(xfs_trans_header_t);
3485
3486 /* for start-rec */
3487 unit_bytes += sizeof(xlog_op_header_t);
3488
3489 /*
3490 * for LR headers - the space for data in an iclog is the size minus
3491 * the space used for the headers. If we use the iclog size, then we
3492 * undercalculate the number of headers required.
3493 *
3494 * Furthermore - the addition of op headers for split-recs might
3495 * increase the space required enough to require more log and op
3496 * headers, so take that into account too.
3497 *
3498 * IMPORTANT: This reservation makes the assumption that if this
3499 * transaction is the first in an iclog and hence has the LR headers
3500 * accounted to it, then the remaining space in the iclog is
3501 * exclusively for this transaction. i.e. if the transaction is larger
3502 * than the iclog, it will be the only thing in that iclog.
3503 * Fundamentally, this means we must pass the entire log vector to
3504 * xlog_write to guarantee this.
3505 */
3506 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3507 num_headers = howmany(unit_bytes, iclog_space);
3508
3509 /* for split-recs - ophdrs added when data split over LRs */
3510 unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3511
3512 /* add extra header reservations if we overrun */
3513 while (!num_headers ||
3514 howmany(unit_bytes, iclog_space) > num_headers) {
3515 unit_bytes += sizeof(xlog_op_header_t);
3516 num_headers++;
3517 }
3518 unit_bytes += log->l_iclog_hsize * num_headers;
3519
3520 /* for commit-rec LR header - note: padding will subsume the ophdr */
3521 unit_bytes += log->l_iclog_hsize;
3522
3523 /* roundoff padding for transaction data and one for commit record */
3524 unit_bytes += 2 * log->l_iclog_roundoff;
3525
3526 if (niclogs)
3527 *niclogs = num_headers;
3528 return unit_bytes;
3529}
3530
3531int
3532xfs_log_calc_unit_res(
3533 struct xfs_mount *mp,
3534 int unit_bytes)
3535{
3536 return xlog_calc_unit_res(mp->m_log, unit_bytes, NULL);
3537}
3538
3539/*
3540 * Allocate and initialise a new log ticket.
3541 */
3542struct xlog_ticket *
3543xlog_ticket_alloc(
3544 struct xlog *log,
3545 int unit_bytes,
3546 int cnt,
3547 bool permanent)
3548{
3549 struct xlog_ticket *tic;
3550 int unit_res;
3551
3552 tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL);
3553
3554 unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
3555
3556 atomic_set(&tic->t_ref, 1);
3557 tic->t_task = current;
3558 INIT_LIST_HEAD(&tic->t_queue);
3559 tic->t_unit_res = unit_res;
3560 tic->t_curr_res = unit_res;
3561 tic->t_cnt = cnt;
3562 tic->t_ocnt = cnt;
3563 tic->t_tid = get_random_u32();
3564 if (permanent)
3565 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3566
3567 return tic;
3568}
3569
3570#if defined(DEBUG)
3571/*
3572 * Check to make sure the grant write head didn't just over lap the tail. If
3573 * the cycles are the same, we can't be overlapping. Otherwise, make sure that
3574 * the cycles differ by exactly one and check the byte count.
3575 *
3576 * This check is run unlocked, so can give false positives. Rather than assert
3577 * on failures, use a warn-once flag and a panic tag to allow the admin to
3578 * determine if they want to panic the machine when such an error occurs. For
3579 * debug kernels this will have the same effect as using an assert but, unlinke
3580 * an assert, it can be turned off at runtime.
3581 */
3582STATIC void
3583xlog_verify_grant_tail(
3584 struct xlog *log)
3585{
3586 int tail_cycle, tail_blocks;
3587 int cycle, space;
3588
3589 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3590 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3591 if (tail_cycle != cycle) {
3592 if (cycle - 1 != tail_cycle &&
3593 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3594 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3595 "%s: cycle - 1 != tail_cycle", __func__);
3596 }
3597
3598 if (space > BBTOB(tail_blocks) &&
3599 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3600 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3601 "%s: space > BBTOB(tail_blocks)", __func__);
3602 }
3603 }
3604}
3605
3606/* check if it will fit */
3607STATIC void
3608xlog_verify_tail_lsn(
3609 struct xlog *log,
3610 struct xlog_in_core *iclog)
3611{
3612 xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
3613 int blocks;
3614
3615 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3616 blocks =
3617 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3618 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3619 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3620 } else {
3621 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3622
3623 if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3624 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3625
3626 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3627 if (blocks < BTOBB(iclog->ic_offset) + 1)
3628 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3629 }
3630}
3631
3632/*
3633 * Perform a number of checks on the iclog before writing to disk.
3634 *
3635 * 1. Make sure the iclogs are still circular
3636 * 2. Make sure we have a good magic number
3637 * 3. Make sure we don't have magic numbers in the data
3638 * 4. Check fields of each log operation header for:
3639 * A. Valid client identifier
3640 * B. tid ptr value falls in valid ptr space (user space code)
3641 * C. Length in log record header is correct according to the
3642 * individual operation headers within record.
3643 * 5. When a bwrite will occur within 5 blocks of the front of the physical
3644 * log, check the preceding blocks of the physical log to make sure all
3645 * the cycle numbers agree with the current cycle number.
3646 */
3647STATIC void
3648xlog_verify_iclog(
3649 struct xlog *log,
3650 struct xlog_in_core *iclog,
3651 int count)
3652{
3653 xlog_op_header_t *ophead;
3654 xlog_in_core_t *icptr;
3655 xlog_in_core_2_t *xhdr;
3656 void *base_ptr, *ptr, *p;
3657 ptrdiff_t field_offset;
3658 uint8_t clientid;
3659 int len, i, j, k, op_len;
3660 int idx;
3661
3662 /* check validity of iclog pointers */
3663 spin_lock(&log->l_icloglock);
3664 icptr = log->l_iclog;
3665 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3666 ASSERT(icptr);
3667
3668 if (icptr != log->l_iclog)
3669 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3670 spin_unlock(&log->l_icloglock);
3671
3672 /* check log magic numbers */
3673 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3674 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3675
3676 base_ptr = ptr = &iclog->ic_header;
3677 p = &iclog->ic_header;
3678 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
3679 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3680 xfs_emerg(log->l_mp, "%s: unexpected magic num",
3681 __func__);
3682 }
3683
3684 /* check fields */
3685 len = be32_to_cpu(iclog->ic_header.h_num_logops);
3686 base_ptr = ptr = iclog->ic_datap;
3687 ophead = ptr;
3688 xhdr = iclog->ic_data;
3689 for (i = 0; i < len; i++) {
3690 ophead = ptr;
3691
3692 /* clientid is only 1 byte */
3693 p = &ophead->oh_clientid;
3694 field_offset = p - base_ptr;
3695 if (field_offset & 0x1ff) {
3696 clientid = ophead->oh_clientid;
3697 } else {
3698 idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
3699 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3700 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3701 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3702 clientid = xlog_get_client_id(
3703 xhdr[j].hic_xheader.xh_cycle_data[k]);
3704 } else {
3705 clientid = xlog_get_client_id(
3706 iclog->ic_header.h_cycle_data[idx]);
3707 }
3708 }
3709 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) {
3710 xfs_warn(log->l_mp,
3711 "%s: op %d invalid clientid %d op "PTR_FMT" offset 0x%lx",
3712 __func__, i, clientid, ophead,
3713 (unsigned long)field_offset);
3714 }
3715
3716 /* check length */
3717 p = &ophead->oh_len;
3718 field_offset = p - base_ptr;
3719 if (field_offset & 0x1ff) {
3720 op_len = be32_to_cpu(ophead->oh_len);
3721 } else {
3722 idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
3723 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3724 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3725 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3726 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3727 } else {
3728 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3729 }
3730 }
3731 ptr += sizeof(xlog_op_header_t) + op_len;
3732 }
3733}
3734#endif
3735
3736/*
3737 * Perform a forced shutdown on the log.
3738 *
3739 * This can be called from low level log code to trigger a shutdown, or from the
3740 * high level mount shutdown code when the mount shuts down.
3741 *
3742 * Our main objectives here are to make sure that:
3743 * a. if the shutdown was not due to a log IO error, flush the logs to
3744 * disk. Anything modified after this is ignored.
3745 * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3746 * parties to find out. Nothing new gets queued after this is done.
3747 * c. Tasks sleeping on log reservations, pinned objects and
3748 * other resources get woken up.
3749 * d. The mount is also marked as shut down so that log triggered shutdowns
3750 * still behave the same as if they called xfs_forced_shutdown().
3751 *
3752 * Return true if the shutdown cause was a log IO error and we actually shut the
3753 * log down.
3754 */
3755bool
3756xlog_force_shutdown(
3757 struct xlog *log,
3758 uint32_t shutdown_flags)
3759{
3760 bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR);
3761
3762 if (!log)
3763 return false;
3764
3765 /*
3766 * Flush all the completed transactions to disk before marking the log
3767 * being shut down. We need to do this first as shutting down the log
3768 * before the force will prevent the log force from flushing the iclogs
3769 * to disk.
3770 *
3771 * When we are in recovery, there are no transactions to flush, and
3772 * we don't want to touch the log because we don't want to perturb the
3773 * current head/tail for future recovery attempts. Hence we need to
3774 * avoid a log force in this case.
3775 *
3776 * If we are shutting down due to a log IO error, then we must avoid
3777 * trying to write the log as that may just result in more IO errors and
3778 * an endless shutdown/force loop.
3779 */
3780 if (!log_error && !xlog_in_recovery(log))
3781 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3782
3783 /*
3784 * Atomically set the shutdown state. If the shutdown state is already
3785 * set, there someone else is performing the shutdown and so we are done
3786 * here. This should never happen because we should only ever get called
3787 * once by the first shutdown caller.
3788 *
3789 * Much of the log state machine transitions assume that shutdown state
3790 * cannot change once they hold the log->l_icloglock. Hence we need to
3791 * hold that lock here, even though we use the atomic test_and_set_bit()
3792 * operation to set the shutdown state.
3793 */
3794 spin_lock(&log->l_icloglock);
3795 if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
3796 spin_unlock(&log->l_icloglock);
3797 return false;
3798 }
3799 spin_unlock(&log->l_icloglock);
3800
3801 /*
3802 * If this log shutdown also sets the mount shutdown state, issue a
3803 * shutdown warning message.
3804 */
3805 if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
3806 xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
3807"Filesystem has been shut down due to log error (0x%x).",
3808 shutdown_flags);
3809 xfs_alert(log->l_mp,
3810"Please unmount the filesystem and rectify the problem(s).");
3811 if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
3812 xfs_stack_trace();
3813 }
3814
3815 /*
3816 * We don't want anybody waiting for log reservations after this. That
3817 * means we have to wake up everybody queued up on reserveq as well as
3818 * writeq. In addition, we make sure in xlog_{re}grant_log_space that
3819 * we don't enqueue anything once the SHUTDOWN flag is set, and this
3820 * action is protected by the grant locks.
3821 */
3822 xlog_grant_head_wake_all(&log->l_reserve_head);
3823 xlog_grant_head_wake_all(&log->l_write_head);
3824
3825 /*
3826 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
3827 * as if the log writes were completed. The abort handling in the log
3828 * item committed callback functions will do this again under lock to
3829 * avoid races.
3830 */
3831 spin_lock(&log->l_cilp->xc_push_lock);
3832 wake_up_all(&log->l_cilp->xc_start_wait);
3833 wake_up_all(&log->l_cilp->xc_commit_wait);
3834 spin_unlock(&log->l_cilp->xc_push_lock);
3835
3836 spin_lock(&log->l_icloglock);
3837 xlog_state_shutdown_callbacks(log);
3838 spin_unlock(&log->l_icloglock);
3839
3840 wake_up_var(&log->l_opstate);
3841 return log_error;
3842}
3843
3844STATIC int
3845xlog_iclogs_empty(
3846 struct xlog *log)
3847{
3848 xlog_in_core_t *iclog;
3849
3850 iclog = log->l_iclog;
3851 do {
3852 /* endianness does not matter here, zero is zero in
3853 * any language.
3854 */
3855 if (iclog->ic_header.h_num_logops)
3856 return 0;
3857 iclog = iclog->ic_next;
3858 } while (iclog != log->l_iclog);
3859 return 1;
3860}
3861
3862/*
3863 * Verify that an LSN stamped into a piece of metadata is valid. This is
3864 * intended for use in read verifiers on v5 superblocks.
3865 */
3866bool
3867xfs_log_check_lsn(
3868 struct xfs_mount *mp,
3869 xfs_lsn_t lsn)
3870{
3871 struct xlog *log = mp->m_log;
3872 bool valid;
3873
3874 /*
3875 * norecovery mode skips mount-time log processing and unconditionally
3876 * resets the in-core LSN. We can't validate in this mode, but
3877 * modifications are not allowed anyways so just return true.
3878 */
3879 if (xfs_has_norecovery(mp))
3880 return true;
3881
3882 /*
3883 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
3884 * handled by recovery and thus safe to ignore here.
3885 */
3886 if (lsn == NULLCOMMITLSN)
3887 return true;
3888
3889 valid = xlog_valid_lsn(mp->m_log, lsn);
3890
3891 /* warn the user about what's gone wrong before verifier failure */
3892 if (!valid) {
3893 spin_lock(&log->l_icloglock);
3894 xfs_warn(mp,
3895"Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
3896"Please unmount and run xfs_repair (>= v4.3) to resolve.",
3897 CYCLE_LSN(lsn), BLOCK_LSN(lsn),
3898 log->l_curr_cycle, log->l_curr_block);
3899 spin_unlock(&log->l_icloglock);
3900 }
3901
3902 return valid;
3903}
3904
3905/*
3906 * Notify the log that we're about to start using a feature that is protected
3907 * by a log incompat feature flag. This will prevent log covering from
3908 * clearing those flags.
3909 */
3910void
3911xlog_use_incompat_feat(
3912 struct xlog *log)
3913{
3914 down_read(&log->l_incompat_users);
3915}
3916
3917/* Notify the log that we've finished using log incompat features. */
3918void
3919xlog_drop_incompat_feat(
3920 struct xlog *log)
3921{
3922 up_read(&log->l_incompat_users);
3923}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_errortag.h"
14#include "xfs_error.h"
15#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
17#include "xfs_log.h"
18#include "xfs_log_priv.h"
19#include "xfs_trace.h"
20#include "xfs_sysfs.h"
21#include "xfs_sb.h"
22#include "xfs_health.h"
23
24kmem_zone_t *xfs_log_ticket_zone;
25
26/* Local miscellaneous function prototypes */
27STATIC struct xlog *
28xlog_alloc_log(
29 struct xfs_mount *mp,
30 struct xfs_buftarg *log_target,
31 xfs_daddr_t blk_offset,
32 int num_bblks);
33STATIC int
34xlog_space_left(
35 struct xlog *log,
36 atomic64_t *head);
37STATIC void
38xlog_dealloc_log(
39 struct xlog *log);
40
41/* local state machine functions */
42STATIC void xlog_state_done_syncing(
43 struct xlog_in_core *iclog);
44STATIC int
45xlog_state_get_iclog_space(
46 struct xlog *log,
47 int len,
48 struct xlog_in_core **iclog,
49 struct xlog_ticket *ticket,
50 int *continued_write,
51 int *logoffsetp);
52STATIC void
53xlog_state_switch_iclogs(
54 struct xlog *log,
55 struct xlog_in_core *iclog,
56 int eventual_size);
57STATIC void
58xlog_grant_push_ail(
59 struct xlog *log,
60 int need_bytes);
61STATIC void
62xlog_sync(
63 struct xlog *log,
64 struct xlog_in_core *iclog);
65#if defined(DEBUG)
66STATIC void
67xlog_verify_dest_ptr(
68 struct xlog *log,
69 void *ptr);
70STATIC void
71xlog_verify_grant_tail(
72 struct xlog *log);
73STATIC void
74xlog_verify_iclog(
75 struct xlog *log,
76 struct xlog_in_core *iclog,
77 int count);
78STATIC void
79xlog_verify_tail_lsn(
80 struct xlog *log,
81 struct xlog_in_core *iclog,
82 xfs_lsn_t tail_lsn);
83#else
84#define xlog_verify_dest_ptr(a,b)
85#define xlog_verify_grant_tail(a)
86#define xlog_verify_iclog(a,b,c)
87#define xlog_verify_tail_lsn(a,b,c)
88#endif
89
90STATIC int
91xlog_iclogs_empty(
92 struct xlog *log);
93
94static void
95xlog_grant_sub_space(
96 struct xlog *log,
97 atomic64_t *head,
98 int bytes)
99{
100 int64_t head_val = atomic64_read(head);
101 int64_t new, old;
102
103 do {
104 int cycle, space;
105
106 xlog_crack_grant_head_val(head_val, &cycle, &space);
107
108 space -= bytes;
109 if (space < 0) {
110 space += log->l_logsize;
111 cycle--;
112 }
113
114 old = head_val;
115 new = xlog_assign_grant_head_val(cycle, space);
116 head_val = atomic64_cmpxchg(head, old, new);
117 } while (head_val != old);
118}
119
120static void
121xlog_grant_add_space(
122 struct xlog *log,
123 atomic64_t *head,
124 int bytes)
125{
126 int64_t head_val = atomic64_read(head);
127 int64_t new, old;
128
129 do {
130 int tmp;
131 int cycle, space;
132
133 xlog_crack_grant_head_val(head_val, &cycle, &space);
134
135 tmp = log->l_logsize - space;
136 if (tmp > bytes)
137 space += bytes;
138 else {
139 space = bytes - tmp;
140 cycle++;
141 }
142
143 old = head_val;
144 new = xlog_assign_grant_head_val(cycle, space);
145 head_val = atomic64_cmpxchg(head, old, new);
146 } while (head_val != old);
147}
148
149STATIC void
150xlog_grant_head_init(
151 struct xlog_grant_head *head)
152{
153 xlog_assign_grant_head(&head->grant, 1, 0);
154 INIT_LIST_HEAD(&head->waiters);
155 spin_lock_init(&head->lock);
156}
157
158STATIC void
159xlog_grant_head_wake_all(
160 struct xlog_grant_head *head)
161{
162 struct xlog_ticket *tic;
163
164 spin_lock(&head->lock);
165 list_for_each_entry(tic, &head->waiters, t_queue)
166 wake_up_process(tic->t_task);
167 spin_unlock(&head->lock);
168}
169
170static inline int
171xlog_ticket_reservation(
172 struct xlog *log,
173 struct xlog_grant_head *head,
174 struct xlog_ticket *tic)
175{
176 if (head == &log->l_write_head) {
177 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
178 return tic->t_unit_res;
179 } else {
180 if (tic->t_flags & XLOG_TIC_PERM_RESERV)
181 return tic->t_unit_res * tic->t_cnt;
182 else
183 return tic->t_unit_res;
184 }
185}
186
187STATIC bool
188xlog_grant_head_wake(
189 struct xlog *log,
190 struct xlog_grant_head *head,
191 int *free_bytes)
192{
193 struct xlog_ticket *tic;
194 int need_bytes;
195 bool woken_task = false;
196
197 list_for_each_entry(tic, &head->waiters, t_queue) {
198
199 /*
200 * There is a chance that the size of the CIL checkpoints in
201 * progress at the last AIL push target calculation resulted in
202 * limiting the target to the log head (l_last_sync_lsn) at the
203 * time. This may not reflect where the log head is now as the
204 * CIL checkpoints may have completed.
205 *
206 * Hence when we are woken here, it may be that the head of the
207 * log that has moved rather than the tail. As the tail didn't
208 * move, there still won't be space available for the
209 * reservation we require. However, if the AIL has already
210 * pushed to the target defined by the old log head location, we
211 * will hang here waiting for something else to update the AIL
212 * push target.
213 *
214 * Therefore, if there isn't space to wake the first waiter on
215 * the grant head, we need to push the AIL again to ensure the
216 * target reflects both the current log tail and log head
217 * position before we wait for the tail to move again.
218 */
219
220 need_bytes = xlog_ticket_reservation(log, head, tic);
221 if (*free_bytes < need_bytes) {
222 if (!woken_task)
223 xlog_grant_push_ail(log, need_bytes);
224 return false;
225 }
226
227 *free_bytes -= need_bytes;
228 trace_xfs_log_grant_wake_up(log, tic);
229 wake_up_process(tic->t_task);
230 woken_task = true;
231 }
232
233 return true;
234}
235
236STATIC int
237xlog_grant_head_wait(
238 struct xlog *log,
239 struct xlog_grant_head *head,
240 struct xlog_ticket *tic,
241 int need_bytes) __releases(&head->lock)
242 __acquires(&head->lock)
243{
244 list_add_tail(&tic->t_queue, &head->waiters);
245
246 do {
247 if (XLOG_FORCED_SHUTDOWN(log))
248 goto shutdown;
249 xlog_grant_push_ail(log, need_bytes);
250
251 __set_current_state(TASK_UNINTERRUPTIBLE);
252 spin_unlock(&head->lock);
253
254 XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
255
256 trace_xfs_log_grant_sleep(log, tic);
257 schedule();
258 trace_xfs_log_grant_wake(log, tic);
259
260 spin_lock(&head->lock);
261 if (XLOG_FORCED_SHUTDOWN(log))
262 goto shutdown;
263 } while (xlog_space_left(log, &head->grant) < need_bytes);
264
265 list_del_init(&tic->t_queue);
266 return 0;
267shutdown:
268 list_del_init(&tic->t_queue);
269 return -EIO;
270}
271
272/*
273 * Atomically get the log space required for a log ticket.
274 *
275 * Once a ticket gets put onto head->waiters, it will only return after the
276 * needed reservation is satisfied.
277 *
278 * This function is structured so that it has a lock free fast path. This is
279 * necessary because every new transaction reservation will come through this
280 * path. Hence any lock will be globally hot if we take it unconditionally on
281 * every pass.
282 *
283 * As tickets are only ever moved on and off head->waiters under head->lock, we
284 * only need to take that lock if we are going to add the ticket to the queue
285 * and sleep. We can avoid taking the lock if the ticket was never added to
286 * head->waiters because the t_queue list head will be empty and we hold the
287 * only reference to it so it can safely be checked unlocked.
288 */
289STATIC int
290xlog_grant_head_check(
291 struct xlog *log,
292 struct xlog_grant_head *head,
293 struct xlog_ticket *tic,
294 int *need_bytes)
295{
296 int free_bytes;
297 int error = 0;
298
299 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
300
301 /*
302 * If there are other waiters on the queue then give them a chance at
303 * logspace before us. Wake up the first waiters, if we do not wake
304 * up all the waiters then go to sleep waiting for more free space,
305 * otherwise try to get some space for this transaction.
306 */
307 *need_bytes = xlog_ticket_reservation(log, head, tic);
308 free_bytes = xlog_space_left(log, &head->grant);
309 if (!list_empty_careful(&head->waiters)) {
310 spin_lock(&head->lock);
311 if (!xlog_grant_head_wake(log, head, &free_bytes) ||
312 free_bytes < *need_bytes) {
313 error = xlog_grant_head_wait(log, head, tic,
314 *need_bytes);
315 }
316 spin_unlock(&head->lock);
317 } else if (free_bytes < *need_bytes) {
318 spin_lock(&head->lock);
319 error = xlog_grant_head_wait(log, head, tic, *need_bytes);
320 spin_unlock(&head->lock);
321 }
322
323 return error;
324}
325
326static void
327xlog_tic_reset_res(xlog_ticket_t *tic)
328{
329 tic->t_res_num = 0;
330 tic->t_res_arr_sum = 0;
331 tic->t_res_num_ophdrs = 0;
332}
333
334static void
335xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)
336{
337 if (tic->t_res_num == XLOG_TIC_LEN_MAX) {
338 /* add to overflow and start again */
339 tic->t_res_o_flow += tic->t_res_arr_sum;
340 tic->t_res_num = 0;
341 tic->t_res_arr_sum = 0;
342 }
343
344 tic->t_res_arr[tic->t_res_num].r_len = len;
345 tic->t_res_arr[tic->t_res_num].r_type = type;
346 tic->t_res_arr_sum += len;
347 tic->t_res_num++;
348}
349
350/*
351 * Replenish the byte reservation required by moving the grant write head.
352 */
353int
354xfs_log_regrant(
355 struct xfs_mount *mp,
356 struct xlog_ticket *tic)
357{
358 struct xlog *log = mp->m_log;
359 int need_bytes;
360 int error = 0;
361
362 if (XLOG_FORCED_SHUTDOWN(log))
363 return -EIO;
364
365 XFS_STATS_INC(mp, xs_try_logspace);
366
367 /*
368 * This is a new transaction on the ticket, so we need to change the
369 * transaction ID so that the next transaction has a different TID in
370 * the log. Just add one to the existing tid so that we can see chains
371 * of rolling transactions in the log easily.
372 */
373 tic->t_tid++;
374
375 xlog_grant_push_ail(log, tic->t_unit_res);
376
377 tic->t_curr_res = tic->t_unit_res;
378 xlog_tic_reset_res(tic);
379
380 if (tic->t_cnt > 0)
381 return 0;
382
383 trace_xfs_log_regrant(log, tic);
384
385 error = xlog_grant_head_check(log, &log->l_write_head, tic,
386 &need_bytes);
387 if (error)
388 goto out_error;
389
390 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
391 trace_xfs_log_regrant_exit(log, tic);
392 xlog_verify_grant_tail(log);
393 return 0;
394
395out_error:
396 /*
397 * If we are failing, make sure the ticket doesn't have any current
398 * reservations. We don't want to add this back when the ticket/
399 * transaction gets cancelled.
400 */
401 tic->t_curr_res = 0;
402 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
403 return error;
404}
405
406/*
407 * Reserve log space and return a ticket corresponding to the reservation.
408 *
409 * Each reservation is going to reserve extra space for a log record header.
410 * When writes happen to the on-disk log, we don't subtract the length of the
411 * log record header from any reservation. By wasting space in each
412 * reservation, we prevent over allocation problems.
413 */
414int
415xfs_log_reserve(
416 struct xfs_mount *mp,
417 int unit_bytes,
418 int cnt,
419 struct xlog_ticket **ticp,
420 uint8_t client,
421 bool permanent)
422{
423 struct xlog *log = mp->m_log;
424 struct xlog_ticket *tic;
425 int need_bytes;
426 int error = 0;
427
428 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
429
430 if (XLOG_FORCED_SHUTDOWN(log))
431 return -EIO;
432
433 XFS_STATS_INC(mp, xs_try_logspace);
434
435 ASSERT(*ticp == NULL);
436 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent);
437 *ticp = tic;
438
439 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
440 : tic->t_unit_res);
441
442 trace_xfs_log_reserve(log, tic);
443
444 error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
445 &need_bytes);
446 if (error)
447 goto out_error;
448
449 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
450 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
451 trace_xfs_log_reserve_exit(log, tic);
452 xlog_verify_grant_tail(log);
453 return 0;
454
455out_error:
456 /*
457 * If we are failing, make sure the ticket doesn't have any current
458 * reservations. We don't want to add this back when the ticket/
459 * transaction gets cancelled.
460 */
461 tic->t_curr_res = 0;
462 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
463 return error;
464}
465
466static bool
467__xlog_state_release_iclog(
468 struct xlog *log,
469 struct xlog_in_core *iclog)
470{
471 lockdep_assert_held(&log->l_icloglock);
472
473 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
474 /* update tail before writing to iclog */
475 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
476
477 iclog->ic_state = XLOG_STATE_SYNCING;
478 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
479 xlog_verify_tail_lsn(log, iclog, tail_lsn);
480 /* cycle incremented when incrementing curr_block */
481 return true;
482 }
483
484 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
485 return false;
486}
487
488/*
489 * Flush iclog to disk if this is the last reference to the given iclog and the
490 * it is in the WANT_SYNC state.
491 */
492static int
493xlog_state_release_iclog(
494 struct xlog *log,
495 struct xlog_in_core *iclog)
496{
497 lockdep_assert_held(&log->l_icloglock);
498
499 if (iclog->ic_state == XLOG_STATE_IOERROR)
500 return -EIO;
501
502 if (atomic_dec_and_test(&iclog->ic_refcnt) &&
503 __xlog_state_release_iclog(log, iclog)) {
504 spin_unlock(&log->l_icloglock);
505 xlog_sync(log, iclog);
506 spin_lock(&log->l_icloglock);
507 }
508
509 return 0;
510}
511
512void
513xfs_log_release_iclog(
514 struct xlog_in_core *iclog)
515{
516 struct xlog *log = iclog->ic_log;
517 bool sync = false;
518
519 if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) {
520 if (iclog->ic_state != XLOG_STATE_IOERROR)
521 sync = __xlog_state_release_iclog(log, iclog);
522 spin_unlock(&log->l_icloglock);
523 }
524
525 if (sync)
526 xlog_sync(log, iclog);
527}
528
529/*
530 * Mount a log filesystem
531 *
532 * mp - ubiquitous xfs mount point structure
533 * log_target - buftarg of on-disk log device
534 * blk_offset - Start block # where block size is 512 bytes (BBSIZE)
535 * num_bblocks - Number of BBSIZE blocks in on-disk log
536 *
537 * Return error or zero.
538 */
539int
540xfs_log_mount(
541 xfs_mount_t *mp,
542 xfs_buftarg_t *log_target,
543 xfs_daddr_t blk_offset,
544 int num_bblks)
545{
546 bool fatal = xfs_sb_version_hascrc(&mp->m_sb);
547 int error = 0;
548 int min_logfsbs;
549
550 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
551 xfs_notice(mp, "Mounting V%d Filesystem",
552 XFS_SB_VERSION_NUM(&mp->m_sb));
553 } else {
554 xfs_notice(mp,
555"Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.",
556 XFS_SB_VERSION_NUM(&mp->m_sb));
557 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
558 }
559
560 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
561 if (IS_ERR(mp->m_log)) {
562 error = PTR_ERR(mp->m_log);
563 goto out;
564 }
565
566 /*
567 * Validate the given log space and drop a critical message via syslog
568 * if the log size is too small that would lead to some unexpected
569 * situations in transaction log space reservation stage.
570 *
571 * Note: we can't just reject the mount if the validation fails. This
572 * would mean that people would have to downgrade their kernel just to
573 * remedy the situation as there is no way to grow the log (short of
574 * black magic surgery with xfs_db).
575 *
576 * We can, however, reject mounts for CRC format filesystems, as the
577 * mkfs binary being used to make the filesystem should never create a
578 * filesystem with a log that is too small.
579 */
580 min_logfsbs = xfs_log_calc_minimum_size(mp);
581
582 if (mp->m_sb.sb_logblocks < min_logfsbs) {
583 xfs_warn(mp,
584 "Log size %d blocks too small, minimum size is %d blocks",
585 mp->m_sb.sb_logblocks, min_logfsbs);
586 error = -EINVAL;
587 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
588 xfs_warn(mp,
589 "Log size %d blocks too large, maximum size is %lld blocks",
590 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
591 error = -EINVAL;
592 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
593 xfs_warn(mp,
594 "log size %lld bytes too large, maximum size is %lld bytes",
595 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
596 XFS_MAX_LOG_BYTES);
597 error = -EINVAL;
598 } else if (mp->m_sb.sb_logsunit > 1 &&
599 mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) {
600 xfs_warn(mp,
601 "log stripe unit %u bytes must be a multiple of block size",
602 mp->m_sb.sb_logsunit);
603 error = -EINVAL;
604 fatal = true;
605 }
606 if (error) {
607 /*
608 * Log check errors are always fatal on v5; or whenever bad
609 * metadata leads to a crash.
610 */
611 if (fatal) {
612 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
613 ASSERT(0);
614 goto out_free_log;
615 }
616 xfs_crit(mp, "Log size out of supported range.");
617 xfs_crit(mp,
618"Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
619 }
620
621 /*
622 * Initialize the AIL now we have a log.
623 */
624 error = xfs_trans_ail_init(mp);
625 if (error) {
626 xfs_warn(mp, "AIL initialisation failed: error %d", error);
627 goto out_free_log;
628 }
629 mp->m_log->l_ailp = mp->m_ail;
630
631 /*
632 * skip log recovery on a norecovery mount. pretend it all
633 * just worked.
634 */
635 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
636 int readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
637
638 if (readonly)
639 mp->m_flags &= ~XFS_MOUNT_RDONLY;
640
641 error = xlog_recover(mp->m_log);
642
643 if (readonly)
644 mp->m_flags |= XFS_MOUNT_RDONLY;
645 if (error) {
646 xfs_warn(mp, "log mount/recovery failed: error %d",
647 error);
648 xlog_recover_cancel(mp->m_log);
649 goto out_destroy_ail;
650 }
651 }
652
653 error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
654 "log");
655 if (error)
656 goto out_destroy_ail;
657
658 /* Normal transactions can now occur */
659 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
660
661 /*
662 * Now the log has been fully initialised and we know were our
663 * space grant counters are, we can initialise the permanent ticket
664 * needed for delayed logging to work.
665 */
666 xlog_cil_init_post_recovery(mp->m_log);
667
668 return 0;
669
670out_destroy_ail:
671 xfs_trans_ail_destroy(mp);
672out_free_log:
673 xlog_dealloc_log(mp->m_log);
674out:
675 return error;
676}
677
678/*
679 * Finish the recovery of the file system. This is separate from the
680 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
681 * in the root and real-time bitmap inodes between calling xfs_log_mount() and
682 * here.
683 *
684 * If we finish recovery successfully, start the background log work. If we are
685 * not doing recovery, then we have a RO filesystem and we don't need to start
686 * it.
687 */
688int
689xfs_log_mount_finish(
690 struct xfs_mount *mp)
691{
692 int error = 0;
693 bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
694 bool recovered = mp->m_log->l_flags & XLOG_RECOVERY_NEEDED;
695
696 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
697 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
698 return 0;
699 } else if (readonly) {
700 /* Allow unlinked processing to proceed */
701 mp->m_flags &= ~XFS_MOUNT_RDONLY;
702 }
703
704 /*
705 * During the second phase of log recovery, we need iget and
706 * iput to behave like they do for an active filesystem.
707 * xfs_fs_drop_inode needs to be able to prevent the deletion
708 * of inodes before we're done replaying log items on those
709 * inodes. Turn it off immediately after recovery finishes
710 * so that we don't leak the quota inodes if subsequent mount
711 * activities fail.
712 *
713 * We let all inodes involved in redo item processing end up on
714 * the LRU instead of being evicted immediately so that if we do
715 * something to an unlinked inode, the irele won't cause
716 * premature truncation and freeing of the inode, which results
717 * in log recovery failure. We have to evict the unreferenced
718 * lru inodes after clearing SB_ACTIVE because we don't
719 * otherwise clean up the lru if there's a subsequent failure in
720 * xfs_mountfs, which leads to us leaking the inodes if nothing
721 * else (e.g. quotacheck) references the inodes before the
722 * mount failure occurs.
723 */
724 mp->m_super->s_flags |= SB_ACTIVE;
725 error = xlog_recover_finish(mp->m_log);
726 if (!error)
727 xfs_log_work_queue(mp);
728 mp->m_super->s_flags &= ~SB_ACTIVE;
729 evict_inodes(mp->m_super);
730
731 /*
732 * Drain the buffer LRU after log recovery. This is required for v4
733 * filesystems to avoid leaving around buffers with NULL verifier ops,
734 * but we do it unconditionally to make sure we're always in a clean
735 * cache state after mount.
736 *
737 * Don't push in the error case because the AIL may have pending intents
738 * that aren't removed until recovery is cancelled.
739 */
740 if (!error && recovered) {
741 xfs_log_force(mp, XFS_LOG_SYNC);
742 xfs_ail_push_all_sync(mp->m_ail);
743 }
744 xfs_wait_buftarg(mp->m_ddev_targp);
745
746 if (readonly)
747 mp->m_flags |= XFS_MOUNT_RDONLY;
748
749 return error;
750}
751
752/*
753 * The mount has failed. Cancel the recovery if it hasn't completed and destroy
754 * the log.
755 */
756void
757xfs_log_mount_cancel(
758 struct xfs_mount *mp)
759{
760 xlog_recover_cancel(mp->m_log);
761 xfs_log_unmount(mp);
762}
763
764/*
765 * Wait for the iclog to be written disk, or return an error if the log has been
766 * shut down.
767 */
768static int
769xlog_wait_on_iclog(
770 struct xlog_in_core *iclog)
771 __releases(iclog->ic_log->l_icloglock)
772{
773 struct xlog *log = iclog->ic_log;
774
775 if (!XLOG_FORCED_SHUTDOWN(log) &&
776 iclog->ic_state != XLOG_STATE_ACTIVE &&
777 iclog->ic_state != XLOG_STATE_DIRTY) {
778 XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
779 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
780 } else {
781 spin_unlock(&log->l_icloglock);
782 }
783
784 if (XLOG_FORCED_SHUTDOWN(log))
785 return -EIO;
786 return 0;
787}
788
789/*
790 * Write out an unmount record using the ticket provided. We have to account for
791 * the data space used in the unmount ticket as this write is not done from a
792 * transaction context that has already done the accounting for us.
793 */
794static int
795xlog_write_unmount_record(
796 struct xlog *log,
797 struct xlog_ticket *ticket,
798 xfs_lsn_t *lsn,
799 uint flags)
800{
801 struct xfs_unmount_log_format ulf = {
802 .magic = XLOG_UNMOUNT_TYPE,
803 };
804 struct xfs_log_iovec reg = {
805 .i_addr = &ulf,
806 .i_len = sizeof(ulf),
807 .i_type = XLOG_REG_TYPE_UNMOUNT,
808 };
809 struct xfs_log_vec vec = {
810 .lv_niovecs = 1,
811 .lv_iovecp = ®,
812 };
813
814 /* account for space used by record data */
815 ticket->t_curr_res -= sizeof(ulf);
816 return xlog_write(log, &vec, ticket, lsn, NULL, flags, false);
817}
818
819/*
820 * Mark the filesystem clean by writing an unmount record to the head of the
821 * log.
822 */
823static void
824xlog_unmount_write(
825 struct xlog *log)
826{
827 struct xfs_mount *mp = log->l_mp;
828 struct xlog_in_core *iclog;
829 struct xlog_ticket *tic = NULL;
830 xfs_lsn_t lsn;
831 uint flags = XLOG_UNMOUNT_TRANS;
832 int error;
833
834 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
835 if (error)
836 goto out_err;
837
838 error = xlog_write_unmount_record(log, tic, &lsn, flags);
839 /*
840 * At this point, we're umounting anyway, so there's no point in
841 * transitioning log state to IOERROR. Just continue...
842 */
843out_err:
844 if (error)
845 xfs_alert(mp, "%s: unmount record failed", __func__);
846
847 spin_lock(&log->l_icloglock);
848 iclog = log->l_iclog;
849 atomic_inc(&iclog->ic_refcnt);
850 if (iclog->ic_state == XLOG_STATE_ACTIVE)
851 xlog_state_switch_iclogs(log, iclog, 0);
852 else
853 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
854 iclog->ic_state == XLOG_STATE_IOERROR);
855 error = xlog_state_release_iclog(log, iclog);
856 xlog_wait_on_iclog(iclog);
857
858 if (tic) {
859 trace_xfs_log_umount_write(log, tic);
860 xfs_log_ticket_ungrant(log, tic);
861 }
862}
863
864static void
865xfs_log_unmount_verify_iclog(
866 struct xlog *log)
867{
868 struct xlog_in_core *iclog = log->l_iclog;
869
870 do {
871 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
872 ASSERT(iclog->ic_offset == 0);
873 } while ((iclog = iclog->ic_next) != log->l_iclog);
874}
875
876/*
877 * Unmount record used to have a string "Unmount filesystem--" in the
878 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
879 * We just write the magic number now since that particular field isn't
880 * currently architecture converted and "Unmount" is a bit foo.
881 * As far as I know, there weren't any dependencies on the old behaviour.
882 */
883static void
884xfs_log_unmount_write(
885 struct xfs_mount *mp)
886{
887 struct xlog *log = mp->m_log;
888
889 /*
890 * Don't write out unmount record on norecovery mounts or ro devices.
891 * Or, if we are doing a forced umount (typically because of IO errors).
892 */
893 if (mp->m_flags & XFS_MOUNT_NORECOVERY ||
894 xfs_readonly_buftarg(log->l_targ)) {
895 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
896 return;
897 }
898
899 xfs_log_force(mp, XFS_LOG_SYNC);
900
901 if (XLOG_FORCED_SHUTDOWN(log))
902 return;
903
904 /*
905 * If we think the summary counters are bad, avoid writing the unmount
906 * record to force log recovery at next mount, after which the summary
907 * counters will be recalculated. Refer to xlog_check_unmount_rec for
908 * more details.
909 */
910 if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
911 XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
912 xfs_alert(mp, "%s: will fix summary counters at next mount",
913 __func__);
914 return;
915 }
916
917 xfs_log_unmount_verify_iclog(log);
918 xlog_unmount_write(log);
919}
920
921/*
922 * Empty the log for unmount/freeze.
923 *
924 * To do this, we first need to shut down the background log work so it is not
925 * trying to cover the log as we clean up. We then need to unpin all objects in
926 * the log so we can then flush them out. Once they have completed their IO and
927 * run the callbacks removing themselves from the AIL, we can write the unmount
928 * record.
929 */
930void
931xfs_log_quiesce(
932 struct xfs_mount *mp)
933{
934 cancel_delayed_work_sync(&mp->m_log->l_work);
935 xfs_log_force(mp, XFS_LOG_SYNC);
936
937 /*
938 * The superblock buffer is uncached and while xfs_ail_push_all_sync()
939 * will push it, xfs_wait_buftarg() will not wait for it. Further,
940 * xfs_buf_iowait() cannot be used because it was pushed with the
941 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
942 * the IO to complete.
943 */
944 xfs_ail_push_all_sync(mp->m_ail);
945 xfs_wait_buftarg(mp->m_ddev_targp);
946 xfs_buf_lock(mp->m_sb_bp);
947 xfs_buf_unlock(mp->m_sb_bp);
948
949 xfs_log_unmount_write(mp);
950}
951
952/*
953 * Shut down and release the AIL and Log.
954 *
955 * During unmount, we need to ensure we flush all the dirty metadata objects
956 * from the AIL so that the log is empty before we write the unmount record to
957 * the log. Once this is done, we can tear down the AIL and the log.
958 */
959void
960xfs_log_unmount(
961 struct xfs_mount *mp)
962{
963 xfs_log_quiesce(mp);
964
965 xfs_trans_ail_destroy(mp);
966
967 xfs_sysfs_del(&mp->m_log->l_kobj);
968
969 xlog_dealloc_log(mp->m_log);
970}
971
972void
973xfs_log_item_init(
974 struct xfs_mount *mp,
975 struct xfs_log_item *item,
976 int type,
977 const struct xfs_item_ops *ops)
978{
979 item->li_mountp = mp;
980 item->li_ailp = mp->m_ail;
981 item->li_type = type;
982 item->li_ops = ops;
983 item->li_lv = NULL;
984
985 INIT_LIST_HEAD(&item->li_ail);
986 INIT_LIST_HEAD(&item->li_cil);
987 INIT_LIST_HEAD(&item->li_bio_list);
988 INIT_LIST_HEAD(&item->li_trans);
989}
990
991/*
992 * Wake up processes waiting for log space after we have moved the log tail.
993 */
994void
995xfs_log_space_wake(
996 struct xfs_mount *mp)
997{
998 struct xlog *log = mp->m_log;
999 int free_bytes;
1000
1001 if (XLOG_FORCED_SHUTDOWN(log))
1002 return;
1003
1004 if (!list_empty_careful(&log->l_write_head.waiters)) {
1005 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
1006
1007 spin_lock(&log->l_write_head.lock);
1008 free_bytes = xlog_space_left(log, &log->l_write_head.grant);
1009 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1010 spin_unlock(&log->l_write_head.lock);
1011 }
1012
1013 if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1014 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
1015
1016 spin_lock(&log->l_reserve_head.lock);
1017 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1018 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1019 spin_unlock(&log->l_reserve_head.lock);
1020 }
1021}
1022
1023/*
1024 * Determine if we have a transaction that has gone to disk that needs to be
1025 * covered. To begin the transition to the idle state firstly the log needs to
1026 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1027 * we start attempting to cover the log.
1028 *
1029 * Only if we are then in a state where covering is needed, the caller is
1030 * informed that dummy transactions are required to move the log into the idle
1031 * state.
1032 *
1033 * If there are any items in the AIl or CIL, then we do not want to attempt to
1034 * cover the log as we may be in a situation where there isn't log space
1035 * available to run a dummy transaction and this can lead to deadlocks when the
1036 * tail of the log is pinned by an item that is modified in the CIL. Hence
1037 * there's no point in running a dummy transaction at this point because we
1038 * can't start trying to idle the log until both the CIL and AIL are empty.
1039 */
1040static int
1041xfs_log_need_covered(xfs_mount_t *mp)
1042{
1043 struct xlog *log = mp->m_log;
1044 int needed = 0;
1045
1046 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
1047 return 0;
1048
1049 if (!xlog_cil_empty(log))
1050 return 0;
1051
1052 spin_lock(&log->l_icloglock);
1053 switch (log->l_covered_state) {
1054 case XLOG_STATE_COVER_DONE:
1055 case XLOG_STATE_COVER_DONE2:
1056 case XLOG_STATE_COVER_IDLE:
1057 break;
1058 case XLOG_STATE_COVER_NEED:
1059 case XLOG_STATE_COVER_NEED2:
1060 if (xfs_ail_min_lsn(log->l_ailp))
1061 break;
1062 if (!xlog_iclogs_empty(log))
1063 break;
1064
1065 needed = 1;
1066 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1067 log->l_covered_state = XLOG_STATE_COVER_DONE;
1068 else
1069 log->l_covered_state = XLOG_STATE_COVER_DONE2;
1070 break;
1071 default:
1072 needed = 1;
1073 break;
1074 }
1075 spin_unlock(&log->l_icloglock);
1076 return needed;
1077}
1078
1079/*
1080 * We may be holding the log iclog lock upon entering this routine.
1081 */
1082xfs_lsn_t
1083xlog_assign_tail_lsn_locked(
1084 struct xfs_mount *mp)
1085{
1086 struct xlog *log = mp->m_log;
1087 struct xfs_log_item *lip;
1088 xfs_lsn_t tail_lsn;
1089
1090 assert_spin_locked(&mp->m_ail->ail_lock);
1091
1092 /*
1093 * To make sure we always have a valid LSN for the log tail we keep
1094 * track of the last LSN which was committed in log->l_last_sync_lsn,
1095 * and use that when the AIL was empty.
1096 */
1097 lip = xfs_ail_min(mp->m_ail);
1098 if (lip)
1099 tail_lsn = lip->li_lsn;
1100 else
1101 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
1102 trace_xfs_log_assign_tail_lsn(log, tail_lsn);
1103 atomic64_set(&log->l_tail_lsn, tail_lsn);
1104 return tail_lsn;
1105}
1106
1107xfs_lsn_t
1108xlog_assign_tail_lsn(
1109 struct xfs_mount *mp)
1110{
1111 xfs_lsn_t tail_lsn;
1112
1113 spin_lock(&mp->m_ail->ail_lock);
1114 tail_lsn = xlog_assign_tail_lsn_locked(mp);
1115 spin_unlock(&mp->m_ail->ail_lock);
1116
1117 return tail_lsn;
1118}
1119
1120/*
1121 * Return the space in the log between the tail and the head. The head
1122 * is passed in the cycle/bytes formal parms. In the special case where
1123 * the reserve head has wrapped passed the tail, this calculation is no
1124 * longer valid. In this case, just return 0 which means there is no space
1125 * in the log. This works for all places where this function is called
1126 * with the reserve head. Of course, if the write head were to ever
1127 * wrap the tail, we should blow up. Rather than catch this case here,
1128 * we depend on other ASSERTions in other parts of the code. XXXmiken
1129 *
1130 * This code also handles the case where the reservation head is behind
1131 * the tail. The details of this case are described below, but the end
1132 * result is that we return the size of the log as the amount of space left.
1133 */
1134STATIC int
1135xlog_space_left(
1136 struct xlog *log,
1137 atomic64_t *head)
1138{
1139 int free_bytes;
1140 int tail_bytes;
1141 int tail_cycle;
1142 int head_cycle;
1143 int head_bytes;
1144
1145 xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1146 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1147 tail_bytes = BBTOB(tail_bytes);
1148 if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1149 free_bytes = log->l_logsize - (head_bytes - tail_bytes);
1150 else if (tail_cycle + 1 < head_cycle)
1151 return 0;
1152 else if (tail_cycle < head_cycle) {
1153 ASSERT(tail_cycle == (head_cycle - 1));
1154 free_bytes = tail_bytes - head_bytes;
1155 } else {
1156 /*
1157 * The reservation head is behind the tail.
1158 * In this case we just want to return the size of the
1159 * log as the amount of space left.
1160 */
1161 xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
1162 xfs_alert(log->l_mp,
1163 " tail_cycle = %d, tail_bytes = %d",
1164 tail_cycle, tail_bytes);
1165 xfs_alert(log->l_mp,
1166 " GH cycle = %d, GH bytes = %d",
1167 head_cycle, head_bytes);
1168 ASSERT(0);
1169 free_bytes = log->l_logsize;
1170 }
1171 return free_bytes;
1172}
1173
1174
1175static void
1176xlog_ioend_work(
1177 struct work_struct *work)
1178{
1179 struct xlog_in_core *iclog =
1180 container_of(work, struct xlog_in_core, ic_end_io_work);
1181 struct xlog *log = iclog->ic_log;
1182 int error;
1183
1184 error = blk_status_to_errno(iclog->ic_bio.bi_status);
1185#ifdef DEBUG
1186 /* treat writes with injected CRC errors as failed */
1187 if (iclog->ic_fail_crc)
1188 error = -EIO;
1189#endif
1190
1191 /*
1192 * Race to shutdown the filesystem if we see an error.
1193 */
1194 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1195 xfs_alert(log->l_mp, "log I/O error %d", error);
1196 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
1197 }
1198
1199 xlog_state_done_syncing(iclog);
1200 bio_uninit(&iclog->ic_bio);
1201
1202 /*
1203 * Drop the lock to signal that we are done. Nothing references the
1204 * iclog after this, so an unmount waiting on this lock can now tear it
1205 * down safely. As such, it is unsafe to reference the iclog after the
1206 * unlock as we could race with it being freed.
1207 */
1208 up(&iclog->ic_sema);
1209}
1210
1211/*
1212 * Return size of each in-core log record buffer.
1213 *
1214 * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
1215 *
1216 * If the filesystem blocksize is too large, we may need to choose a
1217 * larger size since the directory code currently logs entire blocks.
1218 */
1219STATIC void
1220xlog_get_iclog_buffer_size(
1221 struct xfs_mount *mp,
1222 struct xlog *log)
1223{
1224 if (mp->m_logbufs <= 0)
1225 mp->m_logbufs = XLOG_MAX_ICLOGS;
1226 if (mp->m_logbsize <= 0)
1227 mp->m_logbsize = XLOG_BIG_RECORD_BSIZE;
1228
1229 log->l_iclog_bufs = mp->m_logbufs;
1230 log->l_iclog_size = mp->m_logbsize;
1231
1232 /*
1233 * # headers = size / 32k - one header holds cycles from 32k of data.
1234 */
1235 log->l_iclog_heads =
1236 DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE);
1237 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
1238}
1239
1240void
1241xfs_log_work_queue(
1242 struct xfs_mount *mp)
1243{
1244 queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
1245 msecs_to_jiffies(xfs_syncd_centisecs * 10));
1246}
1247
1248/*
1249 * Every sync period we need to unpin all items in the AIL and push them to
1250 * disk. If there is nothing dirty, then we might need to cover the log to
1251 * indicate that the filesystem is idle.
1252 */
1253static void
1254xfs_log_worker(
1255 struct work_struct *work)
1256{
1257 struct xlog *log = container_of(to_delayed_work(work),
1258 struct xlog, l_work);
1259 struct xfs_mount *mp = log->l_mp;
1260
1261 /* dgc: errors ignored - not fatal and nowhere to report them */
1262 if (xfs_log_need_covered(mp)) {
1263 /*
1264 * Dump a transaction into the log that contains no real change.
1265 * This is needed to stamp the current tail LSN into the log
1266 * during the covering operation.
1267 *
1268 * We cannot use an inode here for this - that will push dirty
1269 * state back up into the VFS and then periodic inode flushing
1270 * will prevent log covering from making progress. Hence we
1271 * synchronously log the superblock instead to ensure the
1272 * superblock is immediately unpinned and can be written back.
1273 */
1274 xfs_sync_sb(mp, true);
1275 } else
1276 xfs_log_force(mp, 0);
1277
1278 /* start pushing all the metadata that is currently dirty */
1279 xfs_ail_push_all(mp->m_ail);
1280
1281 /* queue us up again */
1282 xfs_log_work_queue(mp);
1283}
1284
1285/*
1286 * This routine initializes some of the log structure for a given mount point.
1287 * Its primary purpose is to fill in enough, so recovery can occur. However,
1288 * some other stuff may be filled in too.
1289 */
1290STATIC struct xlog *
1291xlog_alloc_log(
1292 struct xfs_mount *mp,
1293 struct xfs_buftarg *log_target,
1294 xfs_daddr_t blk_offset,
1295 int num_bblks)
1296{
1297 struct xlog *log;
1298 xlog_rec_header_t *head;
1299 xlog_in_core_t **iclogp;
1300 xlog_in_core_t *iclog, *prev_iclog=NULL;
1301 int i;
1302 int error = -ENOMEM;
1303 uint log2_size = 0;
1304
1305 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1306 if (!log) {
1307 xfs_warn(mp, "Log allocation failed: No memory!");
1308 goto out;
1309 }
1310
1311 log->l_mp = mp;
1312 log->l_targ = log_target;
1313 log->l_logsize = BBTOB(num_bblks);
1314 log->l_logBBstart = blk_offset;
1315 log->l_logBBsize = num_bblks;
1316 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1317 log->l_flags |= XLOG_ACTIVE_RECOVERY;
1318 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1319
1320 log->l_prev_block = -1;
1321 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1322 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1323 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1324 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1325
1326 xlog_grant_head_init(&log->l_reserve_head);
1327 xlog_grant_head_init(&log->l_write_head);
1328
1329 error = -EFSCORRUPTED;
1330 if (xfs_sb_version_hassector(&mp->m_sb)) {
1331 log2_size = mp->m_sb.sb_logsectlog;
1332 if (log2_size < BBSHIFT) {
1333 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1334 log2_size, BBSHIFT);
1335 goto out_free_log;
1336 }
1337
1338 log2_size -= BBSHIFT;
1339 if (log2_size > mp->m_sectbb_log) {
1340 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1341 log2_size, mp->m_sectbb_log);
1342 goto out_free_log;
1343 }
1344
1345 /* for larger sector sizes, must have v2 or external log */
1346 if (log2_size && log->l_logBBstart > 0 &&
1347 !xfs_sb_version_haslogv2(&mp->m_sb)) {
1348 xfs_warn(mp,
1349 "log sector size (0x%x) invalid for configuration.",
1350 log2_size);
1351 goto out_free_log;
1352 }
1353 }
1354 log->l_sectBBsize = 1 << log2_size;
1355
1356 xlog_get_iclog_buffer_size(mp, log);
1357
1358 spin_lock_init(&log->l_icloglock);
1359 init_waitqueue_head(&log->l_flush_wait);
1360
1361 iclogp = &log->l_iclog;
1362 /*
1363 * The amount of memory to allocate for the iclog structure is
1364 * rather funky due to the way the structure is defined. It is
1365 * done this way so that we can use different sizes for machines
1366 * with different amounts of memory. See the definition of
1367 * xlog_in_core_t in xfs_log_priv.h for details.
1368 */
1369 ASSERT(log->l_iclog_size >= 4096);
1370 for (i = 0; i < log->l_iclog_bufs; i++) {
1371 int align_mask = xfs_buftarg_dma_alignment(mp->m_logdev_targp);
1372 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1373 sizeof(struct bio_vec);
1374
1375 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
1376 if (!iclog)
1377 goto out_free_iclog;
1378
1379 *iclogp = iclog;
1380 iclog->ic_prev = prev_iclog;
1381 prev_iclog = iclog;
1382
1383 iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
1384 KM_MAYFAIL | KM_ZERO);
1385 if (!iclog->ic_data)
1386 goto out_free_iclog;
1387#ifdef DEBUG
1388 log->l_iclog_bak[i] = &iclog->ic_header;
1389#endif
1390 head = &iclog->ic_header;
1391 memset(head, 0, sizeof(xlog_rec_header_t));
1392 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1393 head->h_version = cpu_to_be32(
1394 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1395 head->h_size = cpu_to_be32(log->l_iclog_size);
1396 /* new fields */
1397 head->h_fmt = cpu_to_be32(XLOG_FMT);
1398 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1399
1400 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1401 iclog->ic_state = XLOG_STATE_ACTIVE;
1402 iclog->ic_log = log;
1403 atomic_set(&iclog->ic_refcnt, 0);
1404 spin_lock_init(&iclog->ic_callback_lock);
1405 INIT_LIST_HEAD(&iclog->ic_callbacks);
1406 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
1407
1408 init_waitqueue_head(&iclog->ic_force_wait);
1409 init_waitqueue_head(&iclog->ic_write_wait);
1410 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1411 sema_init(&iclog->ic_sema, 1);
1412
1413 iclogp = &iclog->ic_next;
1414 }
1415 *iclogp = log->l_iclog; /* complete ring */
1416 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
1417
1418 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1419 WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI, 0,
1420 mp->m_super->s_id);
1421 if (!log->l_ioend_workqueue)
1422 goto out_free_iclog;
1423
1424 error = xlog_cil_init(log);
1425 if (error)
1426 goto out_destroy_workqueue;
1427 return log;
1428
1429out_destroy_workqueue:
1430 destroy_workqueue(log->l_ioend_workqueue);
1431out_free_iclog:
1432 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1433 prev_iclog = iclog->ic_next;
1434 kmem_free(iclog->ic_data);
1435 kmem_free(iclog);
1436 if (prev_iclog == log->l_iclog)
1437 break;
1438 }
1439out_free_log:
1440 kmem_free(log);
1441out:
1442 return ERR_PTR(error);
1443} /* xlog_alloc_log */
1444
1445/*
1446 * Write out the commit record of a transaction associated with the given
1447 * ticket to close off a running log write. Return the lsn of the commit record.
1448 */
1449int
1450xlog_commit_record(
1451 struct xlog *log,
1452 struct xlog_ticket *ticket,
1453 struct xlog_in_core **iclog,
1454 xfs_lsn_t *lsn)
1455{
1456 struct xfs_log_iovec reg = {
1457 .i_addr = NULL,
1458 .i_len = 0,
1459 .i_type = XLOG_REG_TYPE_COMMIT,
1460 };
1461 struct xfs_log_vec vec = {
1462 .lv_niovecs = 1,
1463 .lv_iovecp = ®,
1464 };
1465 int error;
1466
1467 if (XLOG_FORCED_SHUTDOWN(log))
1468 return -EIO;
1469
1470 error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS,
1471 false);
1472 if (error)
1473 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
1474 return error;
1475}
1476
1477/*
1478 * Push on the buffer cache code if we ever use more than 75% of the on-disk
1479 * log space. This code pushes on the lsn which would supposedly free up
1480 * the 25% which we want to leave free. We may need to adopt a policy which
1481 * pushes on an lsn which is further along in the log once we reach the high
1482 * water mark. In this manner, we would be creating a low water mark.
1483 */
1484STATIC void
1485xlog_grant_push_ail(
1486 struct xlog *log,
1487 int need_bytes)
1488{
1489 xfs_lsn_t threshold_lsn = 0;
1490 xfs_lsn_t last_sync_lsn;
1491 int free_blocks;
1492 int free_bytes;
1493 int threshold_block;
1494 int threshold_cycle;
1495 int free_threshold;
1496
1497 ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1498
1499 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1500 free_blocks = BTOBBT(free_bytes);
1501
1502 /*
1503 * Set the threshold for the minimum number of free blocks in the
1504 * log to the maximum of what the caller needs, one quarter of the
1505 * log, and 256 blocks.
1506 */
1507 free_threshold = BTOBB(need_bytes);
1508 free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
1509 free_threshold = max(free_threshold, 256);
1510 if (free_blocks >= free_threshold)
1511 return;
1512
1513 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1514 &threshold_block);
1515 threshold_block += free_threshold;
1516 if (threshold_block >= log->l_logBBsize) {
1517 threshold_block -= log->l_logBBsize;
1518 threshold_cycle += 1;
1519 }
1520 threshold_lsn = xlog_assign_lsn(threshold_cycle,
1521 threshold_block);
1522 /*
1523 * Don't pass in an lsn greater than the lsn of the last
1524 * log record known to be on disk. Use a snapshot of the last sync lsn
1525 * so that it doesn't change between the compare and the set.
1526 */
1527 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1528 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1529 threshold_lsn = last_sync_lsn;
1530
1531 /*
1532 * Get the transaction layer to kick the dirty buffers out to
1533 * disk asynchronously. No point in trying to do this if
1534 * the filesystem is shutting down.
1535 */
1536 if (!XLOG_FORCED_SHUTDOWN(log))
1537 xfs_ail_push(log->l_ailp, threshold_lsn);
1538}
1539
1540/*
1541 * Stamp cycle number in every block
1542 */
1543STATIC void
1544xlog_pack_data(
1545 struct xlog *log,
1546 struct xlog_in_core *iclog,
1547 int roundoff)
1548{
1549 int i, j, k;
1550 int size = iclog->ic_offset + roundoff;
1551 __be32 cycle_lsn;
1552 char *dp;
1553
1554 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1555
1556 dp = iclog->ic_datap;
1557 for (i = 0; i < BTOBB(size); i++) {
1558 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE))
1559 break;
1560 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1561 *(__be32 *)dp = cycle_lsn;
1562 dp += BBSIZE;
1563 }
1564
1565 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1566 xlog_in_core_2_t *xhdr = iclog->ic_data;
1567
1568 for ( ; i < BTOBB(size); i++) {
1569 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1570 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
1571 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
1572 *(__be32 *)dp = cycle_lsn;
1573 dp += BBSIZE;
1574 }
1575
1576 for (i = 1; i < log->l_iclog_heads; i++)
1577 xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
1578 }
1579}
1580
1581/*
1582 * Calculate the checksum for a log buffer.
1583 *
1584 * This is a little more complicated than it should be because the various
1585 * headers and the actual data are non-contiguous.
1586 */
1587__le32
1588xlog_cksum(
1589 struct xlog *log,
1590 struct xlog_rec_header *rhead,
1591 char *dp,
1592 int size)
1593{
1594 uint32_t crc;
1595
1596 /* first generate the crc for the record header ... */
1597 crc = xfs_start_cksum_update((char *)rhead,
1598 sizeof(struct xlog_rec_header),
1599 offsetof(struct xlog_rec_header, h_crc));
1600
1601 /* ... then for additional cycle data for v2 logs ... */
1602 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1603 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
1604 int i;
1605 int xheads;
1606
1607 xheads = size / XLOG_HEADER_CYCLE_SIZE;
1608 if (size % XLOG_HEADER_CYCLE_SIZE)
1609 xheads++;
1610
1611 for (i = 1; i < xheads; i++) {
1612 crc = crc32c(crc, &xhdr[i].hic_xheader,
1613 sizeof(struct xlog_rec_ext_header));
1614 }
1615 }
1616
1617 /* ... and finally for the payload */
1618 crc = crc32c(crc, dp, size);
1619
1620 return xfs_end_cksum(crc);
1621}
1622
1623static void
1624xlog_bio_end_io(
1625 struct bio *bio)
1626{
1627 struct xlog_in_core *iclog = bio->bi_private;
1628
1629 queue_work(iclog->ic_log->l_ioend_workqueue,
1630 &iclog->ic_end_io_work);
1631}
1632
1633static int
1634xlog_map_iclog_data(
1635 struct bio *bio,
1636 void *data,
1637 size_t count)
1638{
1639 do {
1640 struct page *page = kmem_to_page(data);
1641 unsigned int off = offset_in_page(data);
1642 size_t len = min_t(size_t, count, PAGE_SIZE - off);
1643
1644 if (bio_add_page(bio, page, len, off) != len)
1645 return -EIO;
1646
1647 data += len;
1648 count -= len;
1649 } while (count);
1650
1651 return 0;
1652}
1653
1654STATIC void
1655xlog_write_iclog(
1656 struct xlog *log,
1657 struct xlog_in_core *iclog,
1658 uint64_t bno,
1659 unsigned int count,
1660 bool need_flush)
1661{
1662 ASSERT(bno < log->l_logBBsize);
1663
1664 /*
1665 * We lock the iclogbufs here so that we can serialise against I/O
1666 * completion during unmount. We might be processing a shutdown
1667 * triggered during unmount, and that can occur asynchronously to the
1668 * unmount thread, and hence we need to ensure that completes before
1669 * tearing down the iclogbufs. Hence we need to hold the buffer lock
1670 * across the log IO to archieve that.
1671 */
1672 down(&iclog->ic_sema);
1673 if (unlikely(iclog->ic_state == XLOG_STATE_IOERROR)) {
1674 /*
1675 * It would seem logical to return EIO here, but we rely on
1676 * the log state machine to propagate I/O errors instead of
1677 * doing it here. We kick of the state machine and unlock
1678 * the buffer manually, the code needs to be kept in sync
1679 * with the I/O completion path.
1680 */
1681 xlog_state_done_syncing(iclog);
1682 up(&iclog->ic_sema);
1683 return;
1684 }
1685
1686 bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE));
1687 bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev);
1688 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1689 iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1690 iclog->ic_bio.bi_private = iclog;
1691
1692 /*
1693 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
1694 * IOs coming immediately after this one. This prevents the block layer
1695 * writeback throttle from throttling log writes behind background
1696 * metadata writeback and causing priority inversions.
1697 */
1698 iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC |
1699 REQ_IDLE | REQ_FUA;
1700 if (need_flush)
1701 iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1702
1703 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
1704 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
1705 return;
1706 }
1707 if (is_vmalloc_addr(iclog->ic_data))
1708 flush_kernel_vmap_range(iclog->ic_data, count);
1709
1710 /*
1711 * If this log buffer would straddle the end of the log we will have
1712 * to split it up into two bios, so that we can continue at the start.
1713 */
1714 if (bno + BTOBB(count) > log->l_logBBsize) {
1715 struct bio *split;
1716
1717 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1718 GFP_NOIO, &fs_bio_set);
1719 bio_chain(split, &iclog->ic_bio);
1720 submit_bio(split);
1721
1722 /* restart at logical offset zero for the remainder */
1723 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1724 }
1725
1726 submit_bio(&iclog->ic_bio);
1727}
1728
1729/*
1730 * We need to bump cycle number for the part of the iclog that is
1731 * written to the start of the log. Watch out for the header magic
1732 * number case, though.
1733 */
1734static void
1735xlog_split_iclog(
1736 struct xlog *log,
1737 void *data,
1738 uint64_t bno,
1739 unsigned int count)
1740{
1741 unsigned int split_offset = BBTOB(log->l_logBBsize - bno);
1742 unsigned int i;
1743
1744 for (i = split_offset; i < count; i += BBSIZE) {
1745 uint32_t cycle = get_unaligned_be32(data + i);
1746
1747 if (++cycle == XLOG_HEADER_MAGIC_NUM)
1748 cycle++;
1749 put_unaligned_be32(cycle, data + i);
1750 }
1751}
1752
1753static int
1754xlog_calc_iclog_size(
1755 struct xlog *log,
1756 struct xlog_in_core *iclog,
1757 uint32_t *roundoff)
1758{
1759 uint32_t count_init, count;
1760 bool use_lsunit;
1761
1762 use_lsunit = xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
1763 log->l_mp->m_sb.sb_logsunit > 1;
1764
1765 /* Add for LR header */
1766 count_init = log->l_iclog_hsize + iclog->ic_offset;
1767
1768 /* Round out the log write size */
1769 if (use_lsunit) {
1770 /* we have a v2 stripe unit to use */
1771 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
1772 } else {
1773 count = BBTOB(BTOBB(count_init));
1774 }
1775
1776 ASSERT(count >= count_init);
1777 *roundoff = count - count_init;
1778
1779 if (use_lsunit)
1780 ASSERT(*roundoff < log->l_mp->m_sb.sb_logsunit);
1781 else
1782 ASSERT(*roundoff < BBTOB(1));
1783 return count;
1784}
1785
1786/*
1787 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
1788 * fashion. Previously, we should have moved the current iclog
1789 * ptr in the log to point to the next available iclog. This allows further
1790 * write to continue while this code syncs out an iclog ready to go.
1791 * Before an in-core log can be written out, the data section must be scanned
1792 * to save away the 1st word of each BBSIZE block into the header. We replace
1793 * it with the current cycle count. Each BBSIZE block is tagged with the
1794 * cycle count because there in an implicit assumption that drives will
1795 * guarantee that entire 512 byte blocks get written at once. In other words,
1796 * we can't have part of a 512 byte block written and part not written. By
1797 * tagging each block, we will know which blocks are valid when recovering
1798 * after an unclean shutdown.
1799 *
1800 * This routine is single threaded on the iclog. No other thread can be in
1801 * this routine with the same iclog. Changing contents of iclog can there-
1802 * fore be done without grabbing the state machine lock. Updating the global
1803 * log will require grabbing the lock though.
1804 *
1805 * The entire log manager uses a logical block numbering scheme. Only
1806 * xlog_write_iclog knows about the fact that the log may not start with
1807 * block zero on a given device.
1808 */
1809STATIC void
1810xlog_sync(
1811 struct xlog *log,
1812 struct xlog_in_core *iclog)
1813{
1814 unsigned int count; /* byte count of bwrite */
1815 unsigned int roundoff; /* roundoff to BB or stripe */
1816 uint64_t bno;
1817 unsigned int size;
1818 bool need_flush = true, split = false;
1819
1820 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
1821
1822 count = xlog_calc_iclog_size(log, iclog, &roundoff);
1823
1824 /* move grant heads by roundoff in sync */
1825 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
1826 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
1827
1828 /* put cycle number in every block */
1829 xlog_pack_data(log, iclog, roundoff);
1830
1831 /* real byte length */
1832 size = iclog->ic_offset;
1833 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb))
1834 size += roundoff;
1835 iclog->ic_header.h_len = cpu_to_be32(size);
1836
1837 XFS_STATS_INC(log->l_mp, xs_log_writes);
1838 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
1839
1840 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
1841
1842 /* Do we need to split this write into 2 parts? */
1843 if (bno + BTOBB(count) > log->l_logBBsize) {
1844 xlog_split_iclog(log, &iclog->ic_header, bno, count);
1845 split = true;
1846 }
1847
1848 /* calculcate the checksum */
1849 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
1850 iclog->ic_datap, size);
1851 /*
1852 * Intentionally corrupt the log record CRC based on the error injection
1853 * frequency, if defined. This facilitates testing log recovery in the
1854 * event of torn writes. Hence, set the IOABORT state to abort the log
1855 * write on I/O completion and shutdown the fs. The subsequent mount
1856 * detects the bad CRC and attempts to recover.
1857 */
1858#ifdef DEBUG
1859 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
1860 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
1861 iclog->ic_fail_crc = true;
1862 xfs_warn(log->l_mp,
1863 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
1864 be64_to_cpu(iclog->ic_header.h_lsn));
1865 }
1866#endif
1867
1868 /*
1869 * Flush the data device before flushing the log to make sure all meta
1870 * data written back from the AIL actually made it to disk before
1871 * stamping the new log tail LSN into the log buffer. For an external
1872 * log we need to issue the flush explicitly, and unfortunately
1873 * synchronously here; for an internal log we can simply use the block
1874 * layer state machine for preflushes.
1875 */
1876 if (log->l_targ != log->l_mp->m_ddev_targp || split) {
1877 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
1878 need_flush = false;
1879 }
1880
1881 xlog_verify_iclog(log, iclog, count);
1882 xlog_write_iclog(log, iclog, bno, count, need_flush);
1883}
1884
1885/*
1886 * Deallocate a log structure
1887 */
1888STATIC void
1889xlog_dealloc_log(
1890 struct xlog *log)
1891{
1892 xlog_in_core_t *iclog, *next_iclog;
1893 int i;
1894
1895 xlog_cil_destroy(log);
1896
1897 /*
1898 * Cycle all the iclogbuf locks to make sure all log IO completion
1899 * is done before we tear down these buffers.
1900 */
1901 iclog = log->l_iclog;
1902 for (i = 0; i < log->l_iclog_bufs; i++) {
1903 down(&iclog->ic_sema);
1904 up(&iclog->ic_sema);
1905 iclog = iclog->ic_next;
1906 }
1907
1908 iclog = log->l_iclog;
1909 for (i = 0; i < log->l_iclog_bufs; i++) {
1910 next_iclog = iclog->ic_next;
1911 kmem_free(iclog->ic_data);
1912 kmem_free(iclog);
1913 iclog = next_iclog;
1914 }
1915
1916 log->l_mp->m_log = NULL;
1917 destroy_workqueue(log->l_ioend_workqueue);
1918 kmem_free(log);
1919}
1920
1921/*
1922 * Update counters atomically now that memcpy is done.
1923 */
1924static inline void
1925xlog_state_finish_copy(
1926 struct xlog *log,
1927 struct xlog_in_core *iclog,
1928 int record_cnt,
1929 int copy_bytes)
1930{
1931 lockdep_assert_held(&log->l_icloglock);
1932
1933 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1934 iclog->ic_offset += copy_bytes;
1935}
1936
1937/*
1938 * print out info relating to regions written which consume
1939 * the reservation
1940 */
1941void
1942xlog_print_tic_res(
1943 struct xfs_mount *mp,
1944 struct xlog_ticket *ticket)
1945{
1946 uint i;
1947 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
1948
1949 /* match with XLOG_REG_TYPE_* in xfs_log.h */
1950#define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str
1951 static char *res_type_str[] = {
1952 REG_TYPE_STR(BFORMAT, "bformat"),
1953 REG_TYPE_STR(BCHUNK, "bchunk"),
1954 REG_TYPE_STR(EFI_FORMAT, "efi_format"),
1955 REG_TYPE_STR(EFD_FORMAT, "efd_format"),
1956 REG_TYPE_STR(IFORMAT, "iformat"),
1957 REG_TYPE_STR(ICORE, "icore"),
1958 REG_TYPE_STR(IEXT, "iext"),
1959 REG_TYPE_STR(IBROOT, "ibroot"),
1960 REG_TYPE_STR(ILOCAL, "ilocal"),
1961 REG_TYPE_STR(IATTR_EXT, "iattr_ext"),
1962 REG_TYPE_STR(IATTR_BROOT, "iattr_broot"),
1963 REG_TYPE_STR(IATTR_LOCAL, "iattr_local"),
1964 REG_TYPE_STR(QFORMAT, "qformat"),
1965 REG_TYPE_STR(DQUOT, "dquot"),
1966 REG_TYPE_STR(QUOTAOFF, "quotaoff"),
1967 REG_TYPE_STR(LRHEADER, "LR header"),
1968 REG_TYPE_STR(UNMOUNT, "unmount"),
1969 REG_TYPE_STR(COMMIT, "commit"),
1970 REG_TYPE_STR(TRANSHDR, "trans header"),
1971 REG_TYPE_STR(ICREATE, "inode create"),
1972 REG_TYPE_STR(RUI_FORMAT, "rui_format"),
1973 REG_TYPE_STR(RUD_FORMAT, "rud_format"),
1974 REG_TYPE_STR(CUI_FORMAT, "cui_format"),
1975 REG_TYPE_STR(CUD_FORMAT, "cud_format"),
1976 REG_TYPE_STR(BUI_FORMAT, "bui_format"),
1977 REG_TYPE_STR(BUD_FORMAT, "bud_format"),
1978 };
1979 BUILD_BUG_ON(ARRAY_SIZE(res_type_str) != XLOG_REG_TYPE_MAX + 1);
1980#undef REG_TYPE_STR
1981
1982 xfs_warn(mp, "ticket reservation summary:");
1983 xfs_warn(mp, " unit res = %d bytes",
1984 ticket->t_unit_res);
1985 xfs_warn(mp, " current res = %d bytes",
1986 ticket->t_curr_res);
1987 xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)",
1988 ticket->t_res_arr_sum, ticket->t_res_o_flow);
1989 xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)",
1990 ticket->t_res_num_ophdrs, ophdr_spc);
1991 xfs_warn(mp, " ophdr + reg = %u bytes",
1992 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc);
1993 xfs_warn(mp, " num regions = %u",
1994 ticket->t_res_num);
1995
1996 for (i = 0; i < ticket->t_res_num; i++) {
1997 uint r_type = ticket->t_res_arr[i].r_type;
1998 xfs_warn(mp, "region[%u]: %s - %u bytes", i,
1999 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
2000 "bad-rtype" : res_type_str[r_type]),
2001 ticket->t_res_arr[i].r_len);
2002 }
2003}
2004
2005/*
2006 * Print a summary of the transaction.
2007 */
2008void
2009xlog_print_trans(
2010 struct xfs_trans *tp)
2011{
2012 struct xfs_mount *mp = tp->t_mountp;
2013 struct xfs_log_item *lip;
2014
2015 /* dump core transaction and ticket info */
2016 xfs_warn(mp, "transaction summary:");
2017 xfs_warn(mp, " log res = %d", tp->t_log_res);
2018 xfs_warn(mp, " log count = %d", tp->t_log_count);
2019 xfs_warn(mp, " flags = 0x%x", tp->t_flags);
2020
2021 xlog_print_tic_res(mp, tp->t_ticket);
2022
2023 /* dump each log item */
2024 list_for_each_entry(lip, &tp->t_items, li_trans) {
2025 struct xfs_log_vec *lv = lip->li_lv;
2026 struct xfs_log_iovec *vec;
2027 int i;
2028
2029 xfs_warn(mp, "log item: ");
2030 xfs_warn(mp, " type = 0x%x", lip->li_type);
2031 xfs_warn(mp, " flags = 0x%lx", lip->li_flags);
2032 if (!lv)
2033 continue;
2034 xfs_warn(mp, " niovecs = %d", lv->lv_niovecs);
2035 xfs_warn(mp, " size = %d", lv->lv_size);
2036 xfs_warn(mp, " bytes = %d", lv->lv_bytes);
2037 xfs_warn(mp, " buf len = %d", lv->lv_buf_len);
2038
2039 /* dump each iovec for the log item */
2040 vec = lv->lv_iovecp;
2041 for (i = 0; i < lv->lv_niovecs; i++) {
2042 int dumplen = min(vec->i_len, 32);
2043
2044 xfs_warn(mp, " iovec[%d]", i);
2045 xfs_warn(mp, " type = 0x%x", vec->i_type);
2046 xfs_warn(mp, " len = %d", vec->i_len);
2047 xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i);
2048 xfs_hex_dump(vec->i_addr, dumplen);
2049
2050 vec++;
2051 }
2052 }
2053}
2054
2055/*
2056 * Calculate the potential space needed by the log vector. We may need a start
2057 * record, and each region gets its own struct xlog_op_header and may need to be
2058 * double word aligned.
2059 */
2060static int
2061xlog_write_calc_vec_length(
2062 struct xlog_ticket *ticket,
2063 struct xfs_log_vec *log_vector,
2064 bool need_start_rec)
2065{
2066 struct xfs_log_vec *lv;
2067 int headers = need_start_rec ? 1 : 0;
2068 int len = 0;
2069 int i;
2070
2071 for (lv = log_vector; lv; lv = lv->lv_next) {
2072 /* we don't write ordered log vectors */
2073 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED)
2074 continue;
2075
2076 headers += lv->lv_niovecs;
2077
2078 for (i = 0; i < lv->lv_niovecs; i++) {
2079 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i];
2080
2081 len += vecp->i_len;
2082 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type);
2083 }
2084 }
2085
2086 ticket->t_res_num_ophdrs += headers;
2087 len += headers * sizeof(struct xlog_op_header);
2088
2089 return len;
2090}
2091
2092static void
2093xlog_write_start_rec(
2094 struct xlog_op_header *ophdr,
2095 struct xlog_ticket *ticket)
2096{
2097 ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2098 ophdr->oh_clientid = ticket->t_clientid;
2099 ophdr->oh_len = 0;
2100 ophdr->oh_flags = XLOG_START_TRANS;
2101 ophdr->oh_res2 = 0;
2102}
2103
2104static xlog_op_header_t *
2105xlog_write_setup_ophdr(
2106 struct xlog *log,
2107 struct xlog_op_header *ophdr,
2108 struct xlog_ticket *ticket,
2109 uint flags)
2110{
2111 ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
2112 ophdr->oh_clientid = ticket->t_clientid;
2113 ophdr->oh_res2 = 0;
2114
2115 /* are we copying a commit or unmount record? */
2116 ophdr->oh_flags = flags;
2117
2118 /*
2119 * We've seen logs corrupted with bad transaction client ids. This
2120 * makes sure that XFS doesn't generate them on. Turn this into an EIO
2121 * and shut down the filesystem.
2122 */
2123 switch (ophdr->oh_clientid) {
2124 case XFS_TRANSACTION:
2125 case XFS_VOLUME:
2126 case XFS_LOG:
2127 break;
2128 default:
2129 xfs_warn(log->l_mp,
2130 "Bad XFS transaction clientid 0x%x in ticket "PTR_FMT,
2131 ophdr->oh_clientid, ticket);
2132 return NULL;
2133 }
2134
2135 return ophdr;
2136}
2137
2138/*
2139 * Set up the parameters of the region copy into the log. This has
2140 * to handle region write split across multiple log buffers - this
2141 * state is kept external to this function so that this code can
2142 * be written in an obvious, self documenting manner.
2143 */
2144static int
2145xlog_write_setup_copy(
2146 struct xlog_ticket *ticket,
2147 struct xlog_op_header *ophdr,
2148 int space_available,
2149 int space_required,
2150 int *copy_off,
2151 int *copy_len,
2152 int *last_was_partial_copy,
2153 int *bytes_consumed)
2154{
2155 int still_to_copy;
2156
2157 still_to_copy = space_required - *bytes_consumed;
2158 *copy_off = *bytes_consumed;
2159
2160 if (still_to_copy <= space_available) {
2161 /* write of region completes here */
2162 *copy_len = still_to_copy;
2163 ophdr->oh_len = cpu_to_be32(*copy_len);
2164 if (*last_was_partial_copy)
2165 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
2166 *last_was_partial_copy = 0;
2167 *bytes_consumed = 0;
2168 return 0;
2169 }
2170
2171 /* partial write of region, needs extra log op header reservation */
2172 *copy_len = space_available;
2173 ophdr->oh_len = cpu_to_be32(*copy_len);
2174 ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
2175 if (*last_was_partial_copy)
2176 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS;
2177 *bytes_consumed += *copy_len;
2178 (*last_was_partial_copy)++;
2179
2180 /* account for new log op header */
2181 ticket->t_curr_res -= sizeof(struct xlog_op_header);
2182 ticket->t_res_num_ophdrs++;
2183
2184 return sizeof(struct xlog_op_header);
2185}
2186
2187static int
2188xlog_write_copy_finish(
2189 struct xlog *log,
2190 struct xlog_in_core *iclog,
2191 uint flags,
2192 int *record_cnt,
2193 int *data_cnt,
2194 int *partial_copy,
2195 int *partial_copy_len,
2196 int log_offset,
2197 struct xlog_in_core **commit_iclog)
2198{
2199 int error;
2200
2201 if (*partial_copy) {
2202 /*
2203 * This iclog has already been marked WANT_SYNC by
2204 * xlog_state_get_iclog_space.
2205 */
2206 spin_lock(&log->l_icloglock);
2207 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2208 *record_cnt = 0;
2209 *data_cnt = 0;
2210 goto release_iclog;
2211 }
2212
2213 *partial_copy = 0;
2214 *partial_copy_len = 0;
2215
2216 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
2217 /* no more space in this iclog - push it. */
2218 spin_lock(&log->l_icloglock);
2219 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2220 *record_cnt = 0;
2221 *data_cnt = 0;
2222
2223 if (iclog->ic_state == XLOG_STATE_ACTIVE)
2224 xlog_state_switch_iclogs(log, iclog, 0);
2225 else
2226 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
2227 iclog->ic_state == XLOG_STATE_IOERROR);
2228 if (!commit_iclog)
2229 goto release_iclog;
2230 spin_unlock(&log->l_icloglock);
2231 ASSERT(flags & XLOG_COMMIT_TRANS);
2232 *commit_iclog = iclog;
2233 }
2234
2235 return 0;
2236
2237release_iclog:
2238 error = xlog_state_release_iclog(log, iclog);
2239 spin_unlock(&log->l_icloglock);
2240 return error;
2241}
2242
2243/*
2244 * Write some region out to in-core log
2245 *
2246 * This will be called when writing externally provided regions or when
2247 * writing out a commit record for a given transaction.
2248 *
2249 * General algorithm:
2250 * 1. Find total length of this write. This may include adding to the
2251 * lengths passed in.
2252 * 2. Check whether we violate the tickets reservation.
2253 * 3. While writing to this iclog
2254 * A. Reserve as much space in this iclog as can get
2255 * B. If this is first write, save away start lsn
2256 * C. While writing this region:
2257 * 1. If first write of transaction, write start record
2258 * 2. Write log operation header (header per region)
2259 * 3. Find out if we can fit entire region into this iclog
2260 * 4. Potentially, verify destination memcpy ptr
2261 * 5. Memcpy (partial) region
2262 * 6. If partial copy, release iclog; otherwise, continue
2263 * copying more regions into current iclog
2264 * 4. Mark want sync bit (in simulation mode)
2265 * 5. Release iclog for potential flush to on-disk log.
2266 *
2267 * ERRORS:
2268 * 1. Panic if reservation is overrun. This should never happen since
2269 * reservation amounts are generated internal to the filesystem.
2270 * NOTES:
2271 * 1. Tickets are single threaded data structures.
2272 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
2273 * syncing routine. When a single log_write region needs to span
2274 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
2275 * on all log operation writes which don't contain the end of the
2276 * region. The XLOG_END_TRANS bit is used for the in-core log
2277 * operation which contains the end of the continued log_write region.
2278 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2279 * we don't really know exactly how much space will be used. As a result,
2280 * we don't update ic_offset until the end when we know exactly how many
2281 * bytes have been written out.
2282 */
2283int
2284xlog_write(
2285 struct xlog *log,
2286 struct xfs_log_vec *log_vector,
2287 struct xlog_ticket *ticket,
2288 xfs_lsn_t *start_lsn,
2289 struct xlog_in_core **commit_iclog,
2290 uint flags,
2291 bool need_start_rec)
2292{
2293 struct xlog_in_core *iclog = NULL;
2294 struct xfs_log_vec *lv = log_vector;
2295 struct xfs_log_iovec *vecp = lv->lv_iovecp;
2296 int index = 0;
2297 int len;
2298 int partial_copy = 0;
2299 int partial_copy_len = 0;
2300 int contwr = 0;
2301 int record_cnt = 0;
2302 int data_cnt = 0;
2303 int error = 0;
2304
2305 /*
2306 * If this is a commit or unmount transaction, we don't need a start
2307 * record to be written. We do, however, have to account for the
2308 * commit or unmount header that gets written. Hence we always have
2309 * to account for an extra xlog_op_header here.
2310 */
2311 ticket->t_curr_res -= sizeof(struct xlog_op_header);
2312 if (ticket->t_curr_res < 0) {
2313 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2314 "ctx ticket reservation ran out. Need to up reservation");
2315 xlog_print_tic_res(log->l_mp, ticket);
2316 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
2317 }
2318
2319 len = xlog_write_calc_vec_length(ticket, log_vector, need_start_rec);
2320 *start_lsn = 0;
2321 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) {
2322 void *ptr;
2323 int log_offset;
2324
2325 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2326 &contwr, &log_offset);
2327 if (error)
2328 return error;
2329
2330 ASSERT(log_offset <= iclog->ic_size - 1);
2331 ptr = iclog->ic_datap + log_offset;
2332
2333 /* start_lsn is the first lsn written to. That's all we need. */
2334 if (!*start_lsn)
2335 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2336
2337 /*
2338 * This loop writes out as many regions as can fit in the amount
2339 * of space which was allocated by xlog_state_get_iclog_space().
2340 */
2341 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) {
2342 struct xfs_log_iovec *reg;
2343 struct xlog_op_header *ophdr;
2344 int copy_len;
2345 int copy_off;
2346 bool ordered = false;
2347
2348 /* ordered log vectors have no regions to write */
2349 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) {
2350 ASSERT(lv->lv_niovecs == 0);
2351 ordered = true;
2352 goto next_lv;
2353 }
2354
2355 reg = &vecp[index];
2356 ASSERT(reg->i_len % sizeof(int32_t) == 0);
2357 ASSERT((unsigned long)ptr % sizeof(int32_t) == 0);
2358
2359 /*
2360 * Before we start formatting log vectors, we need to
2361 * write a start record. Only do this for the first
2362 * iclog we write to.
2363 */
2364 if (need_start_rec) {
2365 xlog_write_start_rec(ptr, ticket);
2366 xlog_write_adv_cnt(&ptr, &len, &log_offset,
2367 sizeof(struct xlog_op_header));
2368 }
2369
2370 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
2371 if (!ophdr)
2372 return -EIO;
2373
2374 xlog_write_adv_cnt(&ptr, &len, &log_offset,
2375 sizeof(struct xlog_op_header));
2376
2377 len += xlog_write_setup_copy(ticket, ophdr,
2378 iclog->ic_size-log_offset,
2379 reg->i_len,
2380 ©_off, ©_len,
2381 &partial_copy,
2382 &partial_copy_len);
2383 xlog_verify_dest_ptr(log, ptr);
2384
2385 /*
2386 * Copy region.
2387 *
2388 * Unmount records just log an opheader, so can have
2389 * empty payloads with no data region to copy. Hence we
2390 * only copy the payload if the vector says it has data
2391 * to copy.
2392 */
2393 ASSERT(copy_len >= 0);
2394 if (copy_len > 0) {
2395 memcpy(ptr, reg->i_addr + copy_off, copy_len);
2396 xlog_write_adv_cnt(&ptr, &len, &log_offset,
2397 copy_len);
2398 }
2399 copy_len += sizeof(struct xlog_op_header);
2400 record_cnt++;
2401 if (need_start_rec) {
2402 copy_len += sizeof(struct xlog_op_header);
2403 record_cnt++;
2404 need_start_rec = false;
2405 }
2406 data_cnt += contwr ? copy_len : 0;
2407
2408 error = xlog_write_copy_finish(log, iclog, flags,
2409 &record_cnt, &data_cnt,
2410 &partial_copy,
2411 &partial_copy_len,
2412 log_offset,
2413 commit_iclog);
2414 if (error)
2415 return error;
2416
2417 /*
2418 * if we had a partial copy, we need to get more iclog
2419 * space but we don't want to increment the region
2420 * index because there is still more is this region to
2421 * write.
2422 *
2423 * If we completed writing this region, and we flushed
2424 * the iclog (indicated by resetting of the record
2425 * count), then we also need to get more log space. If
2426 * this was the last record, though, we are done and
2427 * can just return.
2428 */
2429 if (partial_copy)
2430 break;
2431
2432 if (++index == lv->lv_niovecs) {
2433next_lv:
2434 lv = lv->lv_next;
2435 index = 0;
2436 if (lv)
2437 vecp = lv->lv_iovecp;
2438 }
2439 if (record_cnt == 0 && !ordered) {
2440 if (!lv)
2441 return 0;
2442 break;
2443 }
2444 }
2445 }
2446
2447 ASSERT(len == 0);
2448
2449 spin_lock(&log->l_icloglock);
2450 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
2451 if (commit_iclog) {
2452 ASSERT(flags & XLOG_COMMIT_TRANS);
2453 *commit_iclog = iclog;
2454 } else {
2455 error = xlog_state_release_iclog(log, iclog);
2456 }
2457 spin_unlock(&log->l_icloglock);
2458
2459 return error;
2460}
2461
2462static void
2463xlog_state_activate_iclog(
2464 struct xlog_in_core *iclog,
2465 int *iclogs_changed)
2466{
2467 ASSERT(list_empty_careful(&iclog->ic_callbacks));
2468
2469 /*
2470 * If the number of ops in this iclog indicate it just contains the
2471 * dummy transaction, we can change state into IDLE (the second time
2472 * around). Otherwise we should change the state into NEED a dummy.
2473 * We don't need to cover the dummy.
2474 */
2475 if (*iclogs_changed == 0 &&
2476 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2477 *iclogs_changed = 1;
2478 } else {
2479 /*
2480 * We have two dirty iclogs so start over. This could also be
2481 * num of ops indicating this is not the dummy going out.
2482 */
2483 *iclogs_changed = 2;
2484 }
2485
2486 iclog->ic_state = XLOG_STATE_ACTIVE;
2487 iclog->ic_offset = 0;
2488 iclog->ic_header.h_num_logops = 0;
2489 memset(iclog->ic_header.h_cycle_data, 0,
2490 sizeof(iclog->ic_header.h_cycle_data));
2491 iclog->ic_header.h_lsn = 0;
2492}
2493
2494/*
2495 * Loop through all iclogs and mark all iclogs currently marked DIRTY as
2496 * ACTIVE after iclog I/O has completed.
2497 */
2498static void
2499xlog_state_activate_iclogs(
2500 struct xlog *log,
2501 int *iclogs_changed)
2502{
2503 struct xlog_in_core *iclog = log->l_iclog;
2504
2505 do {
2506 if (iclog->ic_state == XLOG_STATE_DIRTY)
2507 xlog_state_activate_iclog(iclog, iclogs_changed);
2508 /*
2509 * The ordering of marking iclogs ACTIVE must be maintained, so
2510 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2511 */
2512 else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2513 break;
2514 } while ((iclog = iclog->ic_next) != log->l_iclog);
2515}
2516
2517static int
2518xlog_covered_state(
2519 int prev_state,
2520 int iclogs_changed)
2521{
2522 /*
2523 * We usually go to NEED. But we go to NEED2 if the changed indicates we
2524 * are done writing the dummy record. If we are done with the second
2525 * dummy recored (DONE2), then we go to IDLE.
2526 */
2527 switch (prev_state) {
2528 case XLOG_STATE_COVER_IDLE:
2529 case XLOG_STATE_COVER_NEED:
2530 case XLOG_STATE_COVER_NEED2:
2531 break;
2532 case XLOG_STATE_COVER_DONE:
2533 if (iclogs_changed == 1)
2534 return XLOG_STATE_COVER_NEED2;
2535 break;
2536 case XLOG_STATE_COVER_DONE2:
2537 if (iclogs_changed == 1)
2538 return XLOG_STATE_COVER_IDLE;
2539 break;
2540 default:
2541 ASSERT(0);
2542 }
2543
2544 return XLOG_STATE_COVER_NEED;
2545}
2546
2547STATIC void
2548xlog_state_clean_iclog(
2549 struct xlog *log,
2550 struct xlog_in_core *dirty_iclog)
2551{
2552 int iclogs_changed = 0;
2553
2554 dirty_iclog->ic_state = XLOG_STATE_DIRTY;
2555
2556 xlog_state_activate_iclogs(log, &iclogs_changed);
2557 wake_up_all(&dirty_iclog->ic_force_wait);
2558
2559 if (iclogs_changed) {
2560 log->l_covered_state = xlog_covered_state(log->l_covered_state,
2561 iclogs_changed);
2562 }
2563}
2564
2565STATIC xfs_lsn_t
2566xlog_get_lowest_lsn(
2567 struct xlog *log)
2568{
2569 struct xlog_in_core *iclog = log->l_iclog;
2570 xfs_lsn_t lowest_lsn = 0, lsn;
2571
2572 do {
2573 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2574 iclog->ic_state == XLOG_STATE_DIRTY)
2575 continue;
2576
2577 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2578 if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
2579 lowest_lsn = lsn;
2580 } while ((iclog = iclog->ic_next) != log->l_iclog);
2581
2582 return lowest_lsn;
2583}
2584
2585/*
2586 * Completion of a iclog IO does not imply that a transaction has completed, as
2587 * transactions can be large enough to span many iclogs. We cannot change the
2588 * tail of the log half way through a transaction as this may be the only
2589 * transaction in the log and moving the tail to point to the middle of it
2590 * will prevent recovery from finding the start of the transaction. Hence we
2591 * should only update the last_sync_lsn if this iclog contains transaction
2592 * completion callbacks on it.
2593 *
2594 * We have to do this before we drop the icloglock to ensure we are the only one
2595 * that can update it.
2596 *
2597 * If we are moving the last_sync_lsn forwards, we also need to ensure we kick
2598 * the reservation grant head pushing. This is due to the fact that the push
2599 * target is bound by the current last_sync_lsn value. Hence if we have a large
2600 * amount of log space bound up in this committing transaction then the
2601 * last_sync_lsn value may be the limiting factor preventing tail pushing from
2602 * freeing space in the log. Hence once we've updated the last_sync_lsn we
2603 * should push the AIL to ensure the push target (and hence the grant head) is
2604 * no longer bound by the old log head location and can move forwards and make
2605 * progress again.
2606 */
2607static void
2608xlog_state_set_callback(
2609 struct xlog *log,
2610 struct xlog_in_core *iclog,
2611 xfs_lsn_t header_lsn)
2612{
2613 iclog->ic_state = XLOG_STATE_CALLBACK;
2614
2615 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2616 header_lsn) <= 0);
2617
2618 if (list_empty_careful(&iclog->ic_callbacks))
2619 return;
2620
2621 atomic64_set(&log->l_last_sync_lsn, header_lsn);
2622 xlog_grant_push_ail(log, 0);
2623}
2624
2625/*
2626 * Return true if we need to stop processing, false to continue to the next
2627 * iclog. The caller will need to run callbacks if the iclog is returned in the
2628 * XLOG_STATE_CALLBACK state.
2629 */
2630static bool
2631xlog_state_iodone_process_iclog(
2632 struct xlog *log,
2633 struct xlog_in_core *iclog,
2634 bool *ioerror)
2635{
2636 xfs_lsn_t lowest_lsn;
2637 xfs_lsn_t header_lsn;
2638
2639 switch (iclog->ic_state) {
2640 case XLOG_STATE_ACTIVE:
2641 case XLOG_STATE_DIRTY:
2642 /*
2643 * Skip all iclogs in the ACTIVE & DIRTY states:
2644 */
2645 return false;
2646 case XLOG_STATE_IOERROR:
2647 /*
2648 * Between marking a filesystem SHUTDOWN and stopping the log,
2649 * we do flush all iclogs to disk (if there wasn't a log I/O
2650 * error). So, we do want things to go smoothly in case of just
2651 * a SHUTDOWN w/o a LOG_IO_ERROR.
2652 */
2653 *ioerror = true;
2654 return false;
2655 case XLOG_STATE_DONE_SYNC:
2656 /*
2657 * Now that we have an iclog that is in the DONE_SYNC state, do
2658 * one more check here to see if we have chased our tail around.
2659 * If this is not the lowest lsn iclog, then we will leave it
2660 * for another completion to process.
2661 */
2662 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2663 lowest_lsn = xlog_get_lowest_lsn(log);
2664 if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
2665 return false;
2666 xlog_state_set_callback(log, iclog, header_lsn);
2667 return false;
2668 default:
2669 /*
2670 * Can only perform callbacks in order. Since this iclog is not
2671 * in the DONE_SYNC state, we skip the rest and just try to
2672 * clean up.
2673 */
2674 return true;
2675 }
2676}
2677
2678/*
2679 * Keep processing entries in the iclog callback list until we come around and
2680 * it is empty. We need to atomically see that the list is empty and change the
2681 * state to DIRTY so that we don't miss any more callbacks being added.
2682 *
2683 * This function is called with the icloglock held and returns with it held. We
2684 * drop it while running callbacks, however, as holding it over thousands of
2685 * callbacks is unnecessary and causes excessive contention if we do.
2686 */
2687static void
2688xlog_state_do_iclog_callbacks(
2689 struct xlog *log,
2690 struct xlog_in_core *iclog)
2691 __releases(&log->l_icloglock)
2692 __acquires(&log->l_icloglock)
2693{
2694 spin_unlock(&log->l_icloglock);
2695 spin_lock(&iclog->ic_callback_lock);
2696 while (!list_empty(&iclog->ic_callbacks)) {
2697 LIST_HEAD(tmp);
2698
2699 list_splice_init(&iclog->ic_callbacks, &tmp);
2700
2701 spin_unlock(&iclog->ic_callback_lock);
2702 xlog_cil_process_committed(&tmp);
2703 spin_lock(&iclog->ic_callback_lock);
2704 }
2705
2706 /*
2707 * Pick up the icloglock while still holding the callback lock so we
2708 * serialise against anyone trying to add more callbacks to this iclog
2709 * now we've finished processing.
2710 */
2711 spin_lock(&log->l_icloglock);
2712 spin_unlock(&iclog->ic_callback_lock);
2713}
2714
2715STATIC void
2716xlog_state_do_callback(
2717 struct xlog *log)
2718{
2719 struct xlog_in_core *iclog;
2720 struct xlog_in_core *first_iclog;
2721 bool cycled_icloglock;
2722 bool ioerror;
2723 int flushcnt = 0;
2724 int repeats = 0;
2725
2726 spin_lock(&log->l_icloglock);
2727 do {
2728 /*
2729 * Scan all iclogs starting with the one pointed to by the
2730 * log. Reset this starting point each time the log is
2731 * unlocked (during callbacks).
2732 *
2733 * Keep looping through iclogs until one full pass is made
2734 * without running any callbacks.
2735 */
2736 first_iclog = log->l_iclog;
2737 iclog = log->l_iclog;
2738 cycled_icloglock = false;
2739 ioerror = false;
2740 repeats++;
2741
2742 do {
2743 if (xlog_state_iodone_process_iclog(log, iclog,
2744 &ioerror))
2745 break;
2746
2747 if (iclog->ic_state != XLOG_STATE_CALLBACK &&
2748 iclog->ic_state != XLOG_STATE_IOERROR) {
2749 iclog = iclog->ic_next;
2750 continue;
2751 }
2752
2753 /*
2754 * Running callbacks will drop the icloglock which means
2755 * we'll have to run at least one more complete loop.
2756 */
2757 cycled_icloglock = true;
2758 xlog_state_do_iclog_callbacks(log, iclog);
2759 if (XLOG_FORCED_SHUTDOWN(log))
2760 wake_up_all(&iclog->ic_force_wait);
2761 else
2762 xlog_state_clean_iclog(log, iclog);
2763 iclog = iclog->ic_next;
2764 } while (first_iclog != iclog);
2765
2766 if (repeats > 5000) {
2767 flushcnt += repeats;
2768 repeats = 0;
2769 xfs_warn(log->l_mp,
2770 "%s: possible infinite loop (%d iterations)",
2771 __func__, flushcnt);
2772 }
2773 } while (!ioerror && cycled_icloglock);
2774
2775 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE ||
2776 log->l_iclog->ic_state == XLOG_STATE_IOERROR)
2777 wake_up_all(&log->l_flush_wait);
2778
2779 spin_unlock(&log->l_icloglock);
2780}
2781
2782
2783/*
2784 * Finish transitioning this iclog to the dirty state.
2785 *
2786 * Make sure that we completely execute this routine only when this is
2787 * the last call to the iclog. There is a good chance that iclog flushes,
2788 * when we reach the end of the physical log, get turned into 2 separate
2789 * calls to bwrite. Hence, one iclog flush could generate two calls to this
2790 * routine. By using the reference count bwritecnt, we guarantee that only
2791 * the second completion goes through.
2792 *
2793 * Callbacks could take time, so they are done outside the scope of the
2794 * global state machine log lock.
2795 */
2796STATIC void
2797xlog_state_done_syncing(
2798 struct xlog_in_core *iclog)
2799{
2800 struct xlog *log = iclog->ic_log;
2801
2802 spin_lock(&log->l_icloglock);
2803 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2804
2805 /*
2806 * If we got an error, either on the first buffer, or in the case of
2807 * split log writes, on the second, we shut down the file system and
2808 * no iclogs should ever be attempted to be written to disk again.
2809 */
2810 if (!XLOG_FORCED_SHUTDOWN(log)) {
2811 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2812 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2813 }
2814
2815 /*
2816 * Someone could be sleeping prior to writing out the next
2817 * iclog buffer, we wake them all, one will get to do the
2818 * I/O, the others get to wait for the result.
2819 */
2820 wake_up_all(&iclog->ic_write_wait);
2821 spin_unlock(&log->l_icloglock);
2822 xlog_state_do_callback(log);
2823}
2824
2825/*
2826 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2827 * sleep. We wait on the flush queue on the head iclog as that should be
2828 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2829 * we will wait here and all new writes will sleep until a sync completes.
2830 *
2831 * The in-core logs are used in a circular fashion. They are not used
2832 * out-of-order even when an iclog past the head is free.
2833 *
2834 * return:
2835 * * log_offset where xlog_write() can start writing into the in-core
2836 * log's data space.
2837 * * in-core log pointer to which xlog_write() should write.
2838 * * boolean indicating this is a continued write to an in-core log.
2839 * If this is the last write, then the in-core log's offset field
2840 * needs to be incremented, depending on the amount of data which
2841 * is copied.
2842 */
2843STATIC int
2844xlog_state_get_iclog_space(
2845 struct xlog *log,
2846 int len,
2847 struct xlog_in_core **iclogp,
2848 struct xlog_ticket *ticket,
2849 int *continued_write,
2850 int *logoffsetp)
2851{
2852 int log_offset;
2853 xlog_rec_header_t *head;
2854 xlog_in_core_t *iclog;
2855
2856restart:
2857 spin_lock(&log->l_icloglock);
2858 if (XLOG_FORCED_SHUTDOWN(log)) {
2859 spin_unlock(&log->l_icloglock);
2860 return -EIO;
2861 }
2862
2863 iclog = log->l_iclog;
2864 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2865 XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2866
2867 /* Wait for log writes to have flushed */
2868 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2869 goto restart;
2870 }
2871
2872 head = &iclog->ic_header;
2873
2874 atomic_inc(&iclog->ic_refcnt); /* prevents sync */
2875 log_offset = iclog->ic_offset;
2876
2877 /* On the 1st write to an iclog, figure out lsn. This works
2878 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2879 * committing to. If the offset is set, that's how many blocks
2880 * must be written.
2881 */
2882 if (log_offset == 0) {
2883 ticket->t_curr_res -= log->l_iclog_hsize;
2884 xlog_tic_add_region(ticket,
2885 log->l_iclog_hsize,
2886 XLOG_REG_TYPE_LRHEADER);
2887 head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2888 head->h_lsn = cpu_to_be64(
2889 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2890 ASSERT(log->l_curr_block >= 0);
2891 }
2892
2893 /* If there is enough room to write everything, then do it. Otherwise,
2894 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2895 * bit is on, so this will get flushed out. Don't update ic_offset
2896 * until you know exactly how many bytes get copied. Therefore, wait
2897 * until later to update ic_offset.
2898 *
2899 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2900 * can fit into remaining data section.
2901 */
2902 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2903 int error = 0;
2904
2905 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2906
2907 /*
2908 * If we are the only one writing to this iclog, sync it to
2909 * disk. We need to do an atomic compare and decrement here to
2910 * avoid racing with concurrent atomic_dec_and_lock() calls in
2911 * xlog_state_release_iclog() when there is more than one
2912 * reference to the iclog.
2913 */
2914 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2915 error = xlog_state_release_iclog(log, iclog);
2916 spin_unlock(&log->l_icloglock);
2917 if (error)
2918 return error;
2919 goto restart;
2920 }
2921
2922 /* Do we have enough room to write the full amount in the remainder
2923 * of this iclog? Or must we continue a write on the next iclog and
2924 * mark this iclog as completely taken? In the case where we switch
2925 * iclogs (to mark it taken), this particular iclog will release/sync
2926 * to disk in xlog_write().
2927 */
2928 if (len <= iclog->ic_size - iclog->ic_offset) {
2929 *continued_write = 0;
2930 iclog->ic_offset += len;
2931 } else {
2932 *continued_write = 1;
2933 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2934 }
2935 *iclogp = iclog;
2936
2937 ASSERT(iclog->ic_offset <= iclog->ic_size);
2938 spin_unlock(&log->l_icloglock);
2939
2940 *logoffsetp = log_offset;
2941 return 0;
2942}
2943
2944/*
2945 * The first cnt-1 times a ticket goes through here we don't need to move the
2946 * grant write head because the permanent reservation has reserved cnt times the
2947 * unit amount. Release part of current permanent unit reservation and reset
2948 * current reservation to be one units worth. Also move grant reservation head
2949 * forward.
2950 */
2951void
2952xfs_log_ticket_regrant(
2953 struct xlog *log,
2954 struct xlog_ticket *ticket)
2955{
2956 trace_xfs_log_ticket_regrant(log, ticket);
2957
2958 if (ticket->t_cnt > 0)
2959 ticket->t_cnt--;
2960
2961 xlog_grant_sub_space(log, &log->l_reserve_head.grant,
2962 ticket->t_curr_res);
2963 xlog_grant_sub_space(log, &log->l_write_head.grant,
2964 ticket->t_curr_res);
2965 ticket->t_curr_res = ticket->t_unit_res;
2966 xlog_tic_reset_res(ticket);
2967
2968 trace_xfs_log_ticket_regrant_sub(log, ticket);
2969
2970 /* just return if we still have some of the pre-reserved space */
2971 if (!ticket->t_cnt) {
2972 xlog_grant_add_space(log, &log->l_reserve_head.grant,
2973 ticket->t_unit_res);
2974 trace_xfs_log_ticket_regrant_exit(log, ticket);
2975
2976 ticket->t_curr_res = ticket->t_unit_res;
2977 xlog_tic_reset_res(ticket);
2978 }
2979
2980 xfs_log_ticket_put(ticket);
2981}
2982
2983/*
2984 * Give back the space left from a reservation.
2985 *
2986 * All the information we need to make a correct determination of space left
2987 * is present. For non-permanent reservations, things are quite easy. The
2988 * count should have been decremented to zero. We only need to deal with the
2989 * space remaining in the current reservation part of the ticket. If the
2990 * ticket contains a permanent reservation, there may be left over space which
2991 * needs to be released. A count of N means that N-1 refills of the current
2992 * reservation can be done before we need to ask for more space. The first
2993 * one goes to fill up the first current reservation. Once we run out of
2994 * space, the count will stay at zero and the only space remaining will be
2995 * in the current reservation field.
2996 */
2997void
2998xfs_log_ticket_ungrant(
2999 struct xlog *log,
3000 struct xlog_ticket *ticket)
3001{
3002 int bytes;
3003
3004 trace_xfs_log_ticket_ungrant(log, ticket);
3005
3006 if (ticket->t_cnt > 0)
3007 ticket->t_cnt--;
3008
3009 trace_xfs_log_ticket_ungrant_sub(log, ticket);
3010
3011 /*
3012 * If this is a permanent reservation ticket, we may be able to free
3013 * up more space based on the remaining count.
3014 */
3015 bytes = ticket->t_curr_res;
3016 if (ticket->t_cnt > 0) {
3017 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
3018 bytes += ticket->t_unit_res*ticket->t_cnt;
3019 }
3020
3021 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
3022 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
3023
3024 trace_xfs_log_ticket_ungrant_exit(log, ticket);
3025
3026 xfs_log_space_wake(log->l_mp);
3027 xfs_log_ticket_put(ticket);
3028}
3029
3030/*
3031 * This routine will mark the current iclog in the ring as WANT_SYNC and move
3032 * the current iclog pointer to the next iclog in the ring.
3033 */
3034STATIC void
3035xlog_state_switch_iclogs(
3036 struct xlog *log,
3037 struct xlog_in_core *iclog,
3038 int eventual_size)
3039{
3040 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3041 assert_spin_locked(&log->l_icloglock);
3042
3043 if (!eventual_size)
3044 eventual_size = iclog->ic_offset;
3045 iclog->ic_state = XLOG_STATE_WANT_SYNC;
3046 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3047 log->l_prev_block = log->l_curr_block;
3048 log->l_prev_cycle = log->l_curr_cycle;
3049
3050 /* roll log?: ic_offset changed later */
3051 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
3052
3053 /* Round up to next log-sunit */
3054 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
3055 log->l_mp->m_sb.sb_logsunit > 1) {
3056 uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
3057 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
3058 }
3059
3060 if (log->l_curr_block >= log->l_logBBsize) {
3061 /*
3062 * Rewind the current block before the cycle is bumped to make
3063 * sure that the combined LSN never transiently moves forward
3064 * when the log wraps to the next cycle. This is to support the
3065 * unlocked sample of these fields from xlog_valid_lsn(). Most
3066 * other cases should acquire l_icloglock.
3067 */
3068 log->l_curr_block -= log->l_logBBsize;
3069 ASSERT(log->l_curr_block >= 0);
3070 smp_wmb();
3071 log->l_curr_cycle++;
3072 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
3073 log->l_curr_cycle++;
3074 }
3075 ASSERT(iclog == log->l_iclog);
3076 log->l_iclog = iclog->ic_next;
3077}
3078
3079/*
3080 * Write out all data in the in-core log as of this exact moment in time.
3081 *
3082 * Data may be written to the in-core log during this call. However,
3083 * we don't guarantee this data will be written out. A change from past
3084 * implementation means this routine will *not* write out zero length LRs.
3085 *
3086 * Basically, we try and perform an intelligent scan of the in-core logs.
3087 * If we determine there is no flushable data, we just return. There is no
3088 * flushable data if:
3089 *
3090 * 1. the current iclog is active and has no data; the previous iclog
3091 * is in the active or dirty state.
3092 * 2. the current iclog is drity, and the previous iclog is in the
3093 * active or dirty state.
3094 *
3095 * We may sleep if:
3096 *
3097 * 1. the current iclog is not in the active nor dirty state.
3098 * 2. the current iclog dirty, and the previous iclog is not in the
3099 * active nor dirty state.
3100 * 3. the current iclog is active, and there is another thread writing
3101 * to this particular iclog.
3102 * 4. a) the current iclog is active and has no other writers
3103 * b) when we return from flushing out this iclog, it is still
3104 * not in the active nor dirty state.
3105 */
3106int
3107xfs_log_force(
3108 struct xfs_mount *mp,
3109 uint flags)
3110{
3111 struct xlog *log = mp->m_log;
3112 struct xlog_in_core *iclog;
3113 xfs_lsn_t lsn;
3114
3115 XFS_STATS_INC(mp, xs_log_force);
3116 trace_xfs_log_force(mp, 0, _RET_IP_);
3117
3118 xlog_cil_force(log);
3119
3120 spin_lock(&log->l_icloglock);
3121 iclog = log->l_iclog;
3122 if (iclog->ic_state == XLOG_STATE_IOERROR)
3123 goto out_error;
3124
3125 if (iclog->ic_state == XLOG_STATE_DIRTY ||
3126 (iclog->ic_state == XLOG_STATE_ACTIVE &&
3127 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
3128 /*
3129 * If the head is dirty or (active and empty), then we need to
3130 * look at the previous iclog.
3131 *
3132 * If the previous iclog is active or dirty we are done. There
3133 * is nothing to sync out. Otherwise, we attach ourselves to the
3134 * previous iclog and go to sleep.
3135 */
3136 iclog = iclog->ic_prev;
3137 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3138 if (atomic_read(&iclog->ic_refcnt) == 0) {
3139 /*
3140 * We are the only one with access to this iclog.
3141 *
3142 * Flush it out now. There should be a roundoff of zero
3143 * to show that someone has already taken care of the
3144 * roundoff from the previous sync.
3145 */
3146 atomic_inc(&iclog->ic_refcnt);
3147 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3148 xlog_state_switch_iclogs(log, iclog, 0);
3149 if (xlog_state_release_iclog(log, iclog))
3150 goto out_error;
3151
3152 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
3153 goto out_unlock;
3154 } else {
3155 /*
3156 * Someone else is writing to this iclog.
3157 *
3158 * Use its call to flush out the data. However, the
3159 * other thread may not force out this LR, so we mark
3160 * it WANT_SYNC.
3161 */
3162 xlog_state_switch_iclogs(log, iclog, 0);
3163 }
3164 } else {
3165 /*
3166 * If the head iclog is not active nor dirty, we just attach
3167 * ourselves to the head and go to sleep if necessary.
3168 */
3169 ;
3170 }
3171
3172 if (flags & XFS_LOG_SYNC)
3173 return xlog_wait_on_iclog(iclog);
3174out_unlock:
3175 spin_unlock(&log->l_icloglock);
3176 return 0;
3177out_error:
3178 spin_unlock(&log->l_icloglock);
3179 return -EIO;
3180}
3181
3182static int
3183__xfs_log_force_lsn(
3184 struct xfs_mount *mp,
3185 xfs_lsn_t lsn,
3186 uint flags,
3187 int *log_flushed,
3188 bool already_slept)
3189{
3190 struct xlog *log = mp->m_log;
3191 struct xlog_in_core *iclog;
3192
3193 spin_lock(&log->l_icloglock);
3194 iclog = log->l_iclog;
3195 if (iclog->ic_state == XLOG_STATE_IOERROR)
3196 goto out_error;
3197
3198 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3199 iclog = iclog->ic_next;
3200 if (iclog == log->l_iclog)
3201 goto out_unlock;
3202 }
3203
3204 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3205 /*
3206 * We sleep here if we haven't already slept (e.g. this is the
3207 * first time we've looked at the correct iclog buf) and the
3208 * buffer before us is going to be sync'ed. The reason for this
3209 * is that if we are doing sync transactions here, by waiting
3210 * for the previous I/O to complete, we can allow a few more
3211 * transactions into this iclog before we close it down.
3212 *
3213 * Otherwise, we mark the buffer WANT_SYNC, and bump up the
3214 * refcnt so we can release the log (which drops the ref count).
3215 * The state switch keeps new transaction commits from using
3216 * this buffer. When the current commits finish writing into
3217 * the buffer, the refcount will drop to zero and the buffer
3218 * will go out then.
3219 */
3220 if (!already_slept &&
3221 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
3222 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
3223 XFS_STATS_INC(mp, xs_log_force_sleep);
3224
3225 xlog_wait(&iclog->ic_prev->ic_write_wait,
3226 &log->l_icloglock);
3227 return -EAGAIN;
3228 }
3229 atomic_inc(&iclog->ic_refcnt);
3230 xlog_state_switch_iclogs(log, iclog, 0);
3231 if (xlog_state_release_iclog(log, iclog))
3232 goto out_error;
3233 if (log_flushed)
3234 *log_flushed = 1;
3235 }
3236
3237 if (flags & XFS_LOG_SYNC)
3238 return xlog_wait_on_iclog(iclog);
3239out_unlock:
3240 spin_unlock(&log->l_icloglock);
3241 return 0;
3242out_error:
3243 spin_unlock(&log->l_icloglock);
3244 return -EIO;
3245}
3246
3247/*
3248 * Force the in-core log to disk for a specific LSN.
3249 *
3250 * Find in-core log with lsn.
3251 * If it is in the DIRTY state, just return.
3252 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3253 * state and go to sleep or return.
3254 * If it is in any other state, go to sleep or return.
3255 *
3256 * Synchronous forces are implemented with a wait queue. All callers trying
3257 * to force a given lsn to disk must wait on the queue attached to the
3258 * specific in-core log. When given in-core log finally completes its write
3259 * to disk, that thread will wake up all threads waiting on the queue.
3260 */
3261int
3262xfs_log_force_lsn(
3263 struct xfs_mount *mp,
3264 xfs_lsn_t lsn,
3265 uint flags,
3266 int *log_flushed)
3267{
3268 int ret;
3269 ASSERT(lsn != 0);
3270
3271 XFS_STATS_INC(mp, xs_log_force);
3272 trace_xfs_log_force(mp, lsn, _RET_IP_);
3273
3274 lsn = xlog_cil_force_lsn(mp->m_log, lsn);
3275 if (lsn == NULLCOMMITLSN)
3276 return 0;
3277
3278 ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, false);
3279 if (ret == -EAGAIN)
3280 ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, true);
3281 return ret;
3282}
3283
3284/*
3285 * Free a used ticket when its refcount falls to zero.
3286 */
3287void
3288xfs_log_ticket_put(
3289 xlog_ticket_t *ticket)
3290{
3291 ASSERT(atomic_read(&ticket->t_ref) > 0);
3292 if (atomic_dec_and_test(&ticket->t_ref))
3293 kmem_cache_free(xfs_log_ticket_zone, ticket);
3294}
3295
3296xlog_ticket_t *
3297xfs_log_ticket_get(
3298 xlog_ticket_t *ticket)
3299{
3300 ASSERT(atomic_read(&ticket->t_ref) > 0);
3301 atomic_inc(&ticket->t_ref);
3302 return ticket;
3303}
3304
3305/*
3306 * Figure out the total log space unit (in bytes) that would be
3307 * required for a log ticket.
3308 */
3309int
3310xfs_log_calc_unit_res(
3311 struct xfs_mount *mp,
3312 int unit_bytes)
3313{
3314 struct xlog *log = mp->m_log;
3315 int iclog_space;
3316 uint num_headers;
3317
3318 /*
3319 * Permanent reservations have up to 'cnt'-1 active log operations
3320 * in the log. A unit in this case is the amount of space for one
3321 * of these log operations. Normal reservations have a cnt of 1
3322 * and their unit amount is the total amount of space required.
3323 *
3324 * The following lines of code account for non-transaction data
3325 * which occupy space in the on-disk log.
3326 *
3327 * Normal form of a transaction is:
3328 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3329 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3330 *
3331 * We need to account for all the leadup data and trailer data
3332 * around the transaction data.
3333 * And then we need to account for the worst case in terms of using
3334 * more space.
3335 * The worst case will happen if:
3336 * - the placement of the transaction happens to be such that the
3337 * roundoff is at its maximum
3338 * - the transaction data is synced before the commit record is synced
3339 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3340 * Therefore the commit record is in its own Log Record.
3341 * This can happen as the commit record is called with its
3342 * own region to xlog_write().
3343 * This then means that in the worst case, roundoff can happen for
3344 * the commit-rec as well.
3345 * The commit-rec is smaller than padding in this scenario and so it is
3346 * not added separately.
3347 */
3348
3349 /* for trans header */
3350 unit_bytes += sizeof(xlog_op_header_t);
3351 unit_bytes += sizeof(xfs_trans_header_t);
3352
3353 /* for start-rec */
3354 unit_bytes += sizeof(xlog_op_header_t);
3355
3356 /*
3357 * for LR headers - the space for data in an iclog is the size minus
3358 * the space used for the headers. If we use the iclog size, then we
3359 * undercalculate the number of headers required.
3360 *
3361 * Furthermore - the addition of op headers for split-recs might
3362 * increase the space required enough to require more log and op
3363 * headers, so take that into account too.
3364 *
3365 * IMPORTANT: This reservation makes the assumption that if this
3366 * transaction is the first in an iclog and hence has the LR headers
3367 * accounted to it, then the remaining space in the iclog is
3368 * exclusively for this transaction. i.e. if the transaction is larger
3369 * than the iclog, it will be the only thing in that iclog.
3370 * Fundamentally, this means we must pass the entire log vector to
3371 * xlog_write to guarantee this.
3372 */
3373 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3374 num_headers = howmany(unit_bytes, iclog_space);
3375
3376 /* for split-recs - ophdrs added when data split over LRs */
3377 unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3378
3379 /* add extra header reservations if we overrun */
3380 while (!num_headers ||
3381 howmany(unit_bytes, iclog_space) > num_headers) {
3382 unit_bytes += sizeof(xlog_op_header_t);
3383 num_headers++;
3384 }
3385 unit_bytes += log->l_iclog_hsize * num_headers;
3386
3387 /* for commit-rec LR header - note: padding will subsume the ophdr */
3388 unit_bytes += log->l_iclog_hsize;
3389
3390 /* for roundoff padding for transaction data and one for commit record */
3391 if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) {
3392 /* log su roundoff */
3393 unit_bytes += 2 * mp->m_sb.sb_logsunit;
3394 } else {
3395 /* BB roundoff */
3396 unit_bytes += 2 * BBSIZE;
3397 }
3398
3399 return unit_bytes;
3400}
3401
3402/*
3403 * Allocate and initialise a new log ticket.
3404 */
3405struct xlog_ticket *
3406xlog_ticket_alloc(
3407 struct xlog *log,
3408 int unit_bytes,
3409 int cnt,
3410 char client,
3411 bool permanent)
3412{
3413 struct xlog_ticket *tic;
3414 int unit_res;
3415
3416 tic = kmem_cache_zalloc(xfs_log_ticket_zone, GFP_NOFS | __GFP_NOFAIL);
3417
3418 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes);
3419
3420 atomic_set(&tic->t_ref, 1);
3421 tic->t_task = current;
3422 INIT_LIST_HEAD(&tic->t_queue);
3423 tic->t_unit_res = unit_res;
3424 tic->t_curr_res = unit_res;
3425 tic->t_cnt = cnt;
3426 tic->t_ocnt = cnt;
3427 tic->t_tid = prandom_u32();
3428 tic->t_clientid = client;
3429 if (permanent)
3430 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3431
3432 xlog_tic_reset_res(tic);
3433
3434 return tic;
3435}
3436
3437#if defined(DEBUG)
3438/*
3439 * Make sure that the destination ptr is within the valid data region of
3440 * one of the iclogs. This uses backup pointers stored in a different
3441 * part of the log in case we trash the log structure.
3442 */
3443STATIC void
3444xlog_verify_dest_ptr(
3445 struct xlog *log,
3446 void *ptr)
3447{
3448 int i;
3449 int good_ptr = 0;
3450
3451 for (i = 0; i < log->l_iclog_bufs; i++) {
3452 if (ptr >= log->l_iclog_bak[i] &&
3453 ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
3454 good_ptr++;
3455 }
3456
3457 if (!good_ptr)
3458 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3459}
3460
3461/*
3462 * Check to make sure the grant write head didn't just over lap the tail. If
3463 * the cycles are the same, we can't be overlapping. Otherwise, make sure that
3464 * the cycles differ by exactly one and check the byte count.
3465 *
3466 * This check is run unlocked, so can give false positives. Rather than assert
3467 * on failures, use a warn-once flag and a panic tag to allow the admin to
3468 * determine if they want to panic the machine when such an error occurs. For
3469 * debug kernels this will have the same effect as using an assert but, unlinke
3470 * an assert, it can be turned off at runtime.
3471 */
3472STATIC void
3473xlog_verify_grant_tail(
3474 struct xlog *log)
3475{
3476 int tail_cycle, tail_blocks;
3477 int cycle, space;
3478
3479 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3480 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3481 if (tail_cycle != cycle) {
3482 if (cycle - 1 != tail_cycle &&
3483 !(log->l_flags & XLOG_TAIL_WARN)) {
3484 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3485 "%s: cycle - 1 != tail_cycle", __func__);
3486 log->l_flags |= XLOG_TAIL_WARN;
3487 }
3488
3489 if (space > BBTOB(tail_blocks) &&
3490 !(log->l_flags & XLOG_TAIL_WARN)) {
3491 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3492 "%s: space > BBTOB(tail_blocks)", __func__);
3493 log->l_flags |= XLOG_TAIL_WARN;
3494 }
3495 }
3496}
3497
3498/* check if it will fit */
3499STATIC void
3500xlog_verify_tail_lsn(
3501 struct xlog *log,
3502 struct xlog_in_core *iclog,
3503 xfs_lsn_t tail_lsn)
3504{
3505 int blocks;
3506
3507 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3508 blocks =
3509 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3510 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3511 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3512 } else {
3513 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3514
3515 if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3516 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3517
3518 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3519 if (blocks < BTOBB(iclog->ic_offset) + 1)
3520 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3521 }
3522}
3523
3524/*
3525 * Perform a number of checks on the iclog before writing to disk.
3526 *
3527 * 1. Make sure the iclogs are still circular
3528 * 2. Make sure we have a good magic number
3529 * 3. Make sure we don't have magic numbers in the data
3530 * 4. Check fields of each log operation header for:
3531 * A. Valid client identifier
3532 * B. tid ptr value falls in valid ptr space (user space code)
3533 * C. Length in log record header is correct according to the
3534 * individual operation headers within record.
3535 * 5. When a bwrite will occur within 5 blocks of the front of the physical
3536 * log, check the preceding blocks of the physical log to make sure all
3537 * the cycle numbers agree with the current cycle number.
3538 */
3539STATIC void
3540xlog_verify_iclog(
3541 struct xlog *log,
3542 struct xlog_in_core *iclog,
3543 int count)
3544{
3545 xlog_op_header_t *ophead;
3546 xlog_in_core_t *icptr;
3547 xlog_in_core_2_t *xhdr;
3548 void *base_ptr, *ptr, *p;
3549 ptrdiff_t field_offset;
3550 uint8_t clientid;
3551 int len, i, j, k, op_len;
3552 int idx;
3553
3554 /* check validity of iclog pointers */
3555 spin_lock(&log->l_icloglock);
3556 icptr = log->l_iclog;
3557 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3558 ASSERT(icptr);
3559
3560 if (icptr != log->l_iclog)
3561 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3562 spin_unlock(&log->l_icloglock);
3563
3564 /* check log magic numbers */
3565 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3566 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3567
3568 base_ptr = ptr = &iclog->ic_header;
3569 p = &iclog->ic_header;
3570 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) {
3571 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3572 xfs_emerg(log->l_mp, "%s: unexpected magic num",
3573 __func__);
3574 }
3575
3576 /* check fields */
3577 len = be32_to_cpu(iclog->ic_header.h_num_logops);
3578 base_ptr = ptr = iclog->ic_datap;
3579 ophead = ptr;
3580 xhdr = iclog->ic_data;
3581 for (i = 0; i < len; i++) {
3582 ophead = ptr;
3583
3584 /* clientid is only 1 byte */
3585 p = &ophead->oh_clientid;
3586 field_offset = p - base_ptr;
3587 if (field_offset & 0x1ff) {
3588 clientid = ophead->oh_clientid;
3589 } else {
3590 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap);
3591 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3592 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3593 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3594 clientid = xlog_get_client_id(
3595 xhdr[j].hic_xheader.xh_cycle_data[k]);
3596 } else {
3597 clientid = xlog_get_client_id(
3598 iclog->ic_header.h_cycle_data[idx]);
3599 }
3600 }
3601 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
3602 xfs_warn(log->l_mp,
3603 "%s: invalid clientid %d op "PTR_FMT" offset 0x%lx",
3604 __func__, clientid, ophead,
3605 (unsigned long)field_offset);
3606
3607 /* check length */
3608 p = &ophead->oh_len;
3609 field_offset = p - base_ptr;
3610 if (field_offset & 0x1ff) {
3611 op_len = be32_to_cpu(ophead->oh_len);
3612 } else {
3613 idx = BTOBBT((uintptr_t)&ophead->oh_len -
3614 (uintptr_t)iclog->ic_datap);
3615 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3616 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3617 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3618 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3619 } else {
3620 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3621 }
3622 }
3623 ptr += sizeof(xlog_op_header_t) + op_len;
3624 }
3625}
3626#endif
3627
3628/*
3629 * Mark all iclogs IOERROR. l_icloglock is held by the caller.
3630 */
3631STATIC int
3632xlog_state_ioerror(
3633 struct xlog *log)
3634{
3635 xlog_in_core_t *iclog, *ic;
3636
3637 iclog = log->l_iclog;
3638 if (iclog->ic_state != XLOG_STATE_IOERROR) {
3639 /*
3640 * Mark all the incore logs IOERROR.
3641 * From now on, no log flushes will result.
3642 */
3643 ic = iclog;
3644 do {
3645 ic->ic_state = XLOG_STATE_IOERROR;
3646 ic = ic->ic_next;
3647 } while (ic != iclog);
3648 return 0;
3649 }
3650 /*
3651 * Return non-zero, if state transition has already happened.
3652 */
3653 return 1;
3654}
3655
3656/*
3657 * This is called from xfs_force_shutdown, when we're forcibly
3658 * shutting down the filesystem, typically because of an IO error.
3659 * Our main objectives here are to make sure that:
3660 * a. if !logerror, flush the logs to disk. Anything modified
3661 * after this is ignored.
3662 * b. the filesystem gets marked 'SHUTDOWN' for all interested
3663 * parties to find out, 'atomically'.
3664 * c. those who're sleeping on log reservations, pinned objects and
3665 * other resources get woken up, and be told the bad news.
3666 * d. nothing new gets queued up after (b) and (c) are done.
3667 *
3668 * Note: for the !logerror case we need to flush the regions held in memory out
3669 * to disk first. This needs to be done before the log is marked as shutdown,
3670 * otherwise the iclog writes will fail.
3671 */
3672int
3673xfs_log_force_umount(
3674 struct xfs_mount *mp,
3675 int logerror)
3676{
3677 struct xlog *log;
3678 int retval;
3679
3680 log = mp->m_log;
3681
3682 /*
3683 * If this happens during log recovery, don't worry about
3684 * locking; the log isn't open for business yet.
3685 */
3686 if (!log ||
3687 log->l_flags & XLOG_ACTIVE_RECOVERY) {
3688 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3689 if (mp->m_sb_bp)
3690 mp->m_sb_bp->b_flags |= XBF_DONE;
3691 return 0;
3692 }
3693
3694 /*
3695 * Somebody could've already done the hard work for us.
3696 * No need to get locks for this.
3697 */
3698 if (logerror && log->l_iclog->ic_state == XLOG_STATE_IOERROR) {
3699 ASSERT(XLOG_FORCED_SHUTDOWN(log));
3700 return 1;
3701 }
3702
3703 /*
3704 * Flush all the completed transactions to disk before marking the log
3705 * being shut down. We need to do it in this order to ensure that
3706 * completed operations are safely on disk before we shut down, and that
3707 * we don't have to issue any buffer IO after the shutdown flags are set
3708 * to guarantee this.
3709 */
3710 if (!logerror)
3711 xfs_log_force(mp, XFS_LOG_SYNC);
3712
3713 /*
3714 * mark the filesystem and the as in a shutdown state and wake
3715 * everybody up to tell them the bad news.
3716 */
3717 spin_lock(&log->l_icloglock);
3718 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3719 if (mp->m_sb_bp)
3720 mp->m_sb_bp->b_flags |= XBF_DONE;
3721
3722 /*
3723 * Mark the log and the iclogs with IO error flags to prevent any
3724 * further log IO from being issued or completed.
3725 */
3726 log->l_flags |= XLOG_IO_ERROR;
3727 retval = xlog_state_ioerror(log);
3728 spin_unlock(&log->l_icloglock);
3729
3730 /*
3731 * We don't want anybody waiting for log reservations after this. That
3732 * means we have to wake up everybody queued up on reserveq as well as
3733 * writeq. In addition, we make sure in xlog_{re}grant_log_space that
3734 * we don't enqueue anything once the SHUTDOWN flag is set, and this
3735 * action is protected by the grant locks.
3736 */
3737 xlog_grant_head_wake_all(&log->l_reserve_head);
3738 xlog_grant_head_wake_all(&log->l_write_head);
3739
3740 /*
3741 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
3742 * as if the log writes were completed. The abort handling in the log
3743 * item committed callback functions will do this again under lock to
3744 * avoid races.
3745 */
3746 spin_lock(&log->l_cilp->xc_push_lock);
3747 wake_up_all(&log->l_cilp->xc_commit_wait);
3748 spin_unlock(&log->l_cilp->xc_push_lock);
3749 xlog_state_do_callback(log);
3750
3751 /* return non-zero if log IOERROR transition had already happened */
3752 return retval;
3753}
3754
3755STATIC int
3756xlog_iclogs_empty(
3757 struct xlog *log)
3758{
3759 xlog_in_core_t *iclog;
3760
3761 iclog = log->l_iclog;
3762 do {
3763 /* endianness does not matter here, zero is zero in
3764 * any language.
3765 */
3766 if (iclog->ic_header.h_num_logops)
3767 return 0;
3768 iclog = iclog->ic_next;
3769 } while (iclog != log->l_iclog);
3770 return 1;
3771}
3772
3773/*
3774 * Verify that an LSN stamped into a piece of metadata is valid. This is
3775 * intended for use in read verifiers on v5 superblocks.
3776 */
3777bool
3778xfs_log_check_lsn(
3779 struct xfs_mount *mp,
3780 xfs_lsn_t lsn)
3781{
3782 struct xlog *log = mp->m_log;
3783 bool valid;
3784
3785 /*
3786 * norecovery mode skips mount-time log processing and unconditionally
3787 * resets the in-core LSN. We can't validate in this mode, but
3788 * modifications are not allowed anyways so just return true.
3789 */
3790 if (mp->m_flags & XFS_MOUNT_NORECOVERY)
3791 return true;
3792
3793 /*
3794 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is
3795 * handled by recovery and thus safe to ignore here.
3796 */
3797 if (lsn == NULLCOMMITLSN)
3798 return true;
3799
3800 valid = xlog_valid_lsn(mp->m_log, lsn);
3801
3802 /* warn the user about what's gone wrong before verifier failure */
3803 if (!valid) {
3804 spin_lock(&log->l_icloglock);
3805 xfs_warn(mp,
3806"Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). "
3807"Please unmount and run xfs_repair (>= v4.3) to resolve.",
3808 CYCLE_LSN(lsn), BLOCK_LSN(lsn),
3809 log->l_curr_cycle, log->l_curr_block);
3810 spin_unlock(&log->l_icloglock);
3811 }
3812
3813 return valid;
3814}
3815
3816bool
3817xfs_log_in_recovery(
3818 struct xfs_mount *mp)
3819{
3820 struct xlog *log = mp->m_log;
3821
3822 return log->l_flags & XLOG_ACTIVE_RECOVERY;
3823}