Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2008 Dave Chinner
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_mount.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_trace.h"
17#include "xfs_errortag.h"
18#include "xfs_error.h"
19#include "xfs_log.h"
20#include "xfs_log_priv.h"
21
22#ifdef DEBUG
23/*
24 * Check that the list is sorted as it should be.
25 *
26 * Called with the ail lock held, but we don't want to assert fail with it
27 * held otherwise we'll lock everything up and won't be able to debug the
28 * cause. Hence we sample and check the state under the AIL lock and return if
29 * everything is fine, otherwise we drop the lock and run the ASSERT checks.
30 * Asserts may not be fatal, so pick the lock back up and continue onwards.
31 */
32STATIC void
33xfs_ail_check(
34 struct xfs_ail *ailp,
35 struct xfs_log_item *lip)
36 __must_hold(&ailp->ail_lock)
37{
38 struct xfs_log_item *prev_lip;
39 struct xfs_log_item *next_lip;
40 xfs_lsn_t prev_lsn = NULLCOMMITLSN;
41 xfs_lsn_t next_lsn = NULLCOMMITLSN;
42 xfs_lsn_t lsn;
43 bool in_ail;
44
45
46 if (list_empty(&ailp->ail_head))
47 return;
48
49 /*
50 * Sample then check the next and previous entries are valid.
51 */
52 in_ail = test_bit(XFS_LI_IN_AIL, &lip->li_flags);
53 prev_lip = list_entry(lip->li_ail.prev, struct xfs_log_item, li_ail);
54 if (&prev_lip->li_ail != &ailp->ail_head)
55 prev_lsn = prev_lip->li_lsn;
56 next_lip = list_entry(lip->li_ail.next, struct xfs_log_item, li_ail);
57 if (&next_lip->li_ail != &ailp->ail_head)
58 next_lsn = next_lip->li_lsn;
59 lsn = lip->li_lsn;
60
61 if (in_ail &&
62 (prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0) &&
63 (next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0))
64 return;
65
66 spin_unlock(&ailp->ail_lock);
67 ASSERT(in_ail);
68 ASSERT(prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0);
69 ASSERT(next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0);
70 spin_lock(&ailp->ail_lock);
71}
72#else /* !DEBUG */
73#define xfs_ail_check(a,l)
74#endif /* DEBUG */
75
76/*
77 * Return a pointer to the last item in the AIL. If the AIL is empty, then
78 * return NULL.
79 */
80static struct xfs_log_item *
81xfs_ail_max(
82 struct xfs_ail *ailp)
83{
84 if (list_empty(&ailp->ail_head))
85 return NULL;
86
87 return list_entry(ailp->ail_head.prev, struct xfs_log_item, li_ail);
88}
89
90/*
91 * Return a pointer to the item which follows the given item in the AIL. If
92 * the given item is the last item in the list, then return NULL.
93 */
94static struct xfs_log_item *
95xfs_ail_next(
96 struct xfs_ail *ailp,
97 struct xfs_log_item *lip)
98{
99 if (lip->li_ail.next == &ailp->ail_head)
100 return NULL;
101
102 return list_first_entry(&lip->li_ail, struct xfs_log_item, li_ail);
103}
104
105/*
106 * This is called by the log manager code to determine the LSN of the tail of
107 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
108 * is empty, then this function returns 0.
109 *
110 * We need the AIL lock in order to get a coherent read of the lsn of the last
111 * item in the AIL.
112 */
113static xfs_lsn_t
114__xfs_ail_min_lsn(
115 struct xfs_ail *ailp)
116{
117 struct xfs_log_item *lip = xfs_ail_min(ailp);
118
119 if (lip)
120 return lip->li_lsn;
121 return 0;
122}
123
124xfs_lsn_t
125xfs_ail_min_lsn(
126 struct xfs_ail *ailp)
127{
128 xfs_lsn_t lsn;
129
130 spin_lock(&ailp->ail_lock);
131 lsn = __xfs_ail_min_lsn(ailp);
132 spin_unlock(&ailp->ail_lock);
133
134 return lsn;
135}
136
137/*
138 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
139 */
140static xfs_lsn_t
141xfs_ail_max_lsn(
142 struct xfs_ail *ailp)
143{
144 xfs_lsn_t lsn = 0;
145 struct xfs_log_item *lip;
146
147 spin_lock(&ailp->ail_lock);
148 lip = xfs_ail_max(ailp);
149 if (lip)
150 lsn = lip->li_lsn;
151 spin_unlock(&ailp->ail_lock);
152
153 return lsn;
154}
155
156/*
157 * The cursor keeps track of where our current traversal is up to by tracking
158 * the next item in the list for us. However, for this to be safe, removing an
159 * object from the AIL needs to invalidate any cursor that points to it. hence
160 * the traversal cursor needs to be linked to the struct xfs_ail so that
161 * deletion can search all the active cursors for invalidation.
162 */
163STATIC void
164xfs_trans_ail_cursor_init(
165 struct xfs_ail *ailp,
166 struct xfs_ail_cursor *cur)
167{
168 cur->item = NULL;
169 list_add_tail(&cur->list, &ailp->ail_cursors);
170}
171
172/*
173 * Get the next item in the traversal and advance the cursor. If the cursor
174 * was invalidated (indicated by a lip of 1), restart the traversal.
175 */
176struct xfs_log_item *
177xfs_trans_ail_cursor_next(
178 struct xfs_ail *ailp,
179 struct xfs_ail_cursor *cur)
180{
181 struct xfs_log_item *lip = cur->item;
182
183 if ((uintptr_t)lip & 1)
184 lip = xfs_ail_min(ailp);
185 if (lip)
186 cur->item = xfs_ail_next(ailp, lip);
187 return lip;
188}
189
190/*
191 * When the traversal is complete, we need to remove the cursor from the list
192 * of traversing cursors.
193 */
194void
195xfs_trans_ail_cursor_done(
196 struct xfs_ail_cursor *cur)
197{
198 cur->item = NULL;
199 list_del_init(&cur->list);
200}
201
202/*
203 * Invalidate any cursor that is pointing to this item. This is called when an
204 * item is removed from the AIL. Any cursor pointing to this object is now
205 * invalid and the traversal needs to be terminated so it doesn't reference a
206 * freed object. We set the low bit of the cursor item pointer so we can
207 * distinguish between an invalidation and the end of the list when getting the
208 * next item from the cursor.
209 */
210STATIC void
211xfs_trans_ail_cursor_clear(
212 struct xfs_ail *ailp,
213 struct xfs_log_item *lip)
214{
215 struct xfs_ail_cursor *cur;
216
217 list_for_each_entry(cur, &ailp->ail_cursors, list) {
218 if (cur->item == lip)
219 cur->item = (struct xfs_log_item *)
220 ((uintptr_t)cur->item | 1);
221 }
222}
223
224/*
225 * Find the first item in the AIL with the given @lsn by searching in ascending
226 * LSN order and initialise the cursor to point to the next item for a
227 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
228 * first item in the AIL. Returns NULL if the list is empty.
229 */
230struct xfs_log_item *
231xfs_trans_ail_cursor_first(
232 struct xfs_ail *ailp,
233 struct xfs_ail_cursor *cur,
234 xfs_lsn_t lsn)
235{
236 struct xfs_log_item *lip;
237
238 xfs_trans_ail_cursor_init(ailp, cur);
239
240 if (lsn == 0) {
241 lip = xfs_ail_min(ailp);
242 goto out;
243 }
244
245 list_for_each_entry(lip, &ailp->ail_head, li_ail) {
246 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
247 goto out;
248 }
249 return NULL;
250
251out:
252 if (lip)
253 cur->item = xfs_ail_next(ailp, lip);
254 return lip;
255}
256
257static struct xfs_log_item *
258__xfs_trans_ail_cursor_last(
259 struct xfs_ail *ailp,
260 xfs_lsn_t lsn)
261{
262 struct xfs_log_item *lip;
263
264 list_for_each_entry_reverse(lip, &ailp->ail_head, li_ail) {
265 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
266 return lip;
267 }
268 return NULL;
269}
270
271/*
272 * Find the last item in the AIL with the given @lsn by searching in descending
273 * LSN order and initialise the cursor to point to that item. If there is no
274 * item with the value of @lsn, then it sets the cursor to the last item with an
275 * LSN lower than @lsn. Returns NULL if the list is empty.
276 */
277struct xfs_log_item *
278xfs_trans_ail_cursor_last(
279 struct xfs_ail *ailp,
280 struct xfs_ail_cursor *cur,
281 xfs_lsn_t lsn)
282{
283 xfs_trans_ail_cursor_init(ailp, cur);
284 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
285 return cur->item;
286}
287
288/*
289 * Splice the log item list into the AIL at the given LSN. We splice to the
290 * tail of the given LSN to maintain insert order for push traversals. The
291 * cursor is optional, allowing repeated updates to the same LSN to avoid
292 * repeated traversals. This should not be called with an empty list.
293 */
294static void
295xfs_ail_splice(
296 struct xfs_ail *ailp,
297 struct xfs_ail_cursor *cur,
298 struct list_head *list,
299 xfs_lsn_t lsn)
300{
301 struct xfs_log_item *lip;
302
303 ASSERT(!list_empty(list));
304
305 /*
306 * Use the cursor to determine the insertion point if one is
307 * provided. If not, or if the one we got is not valid,
308 * find the place in the AIL where the items belong.
309 */
310 lip = cur ? cur->item : NULL;
311 if (!lip || (uintptr_t)lip & 1)
312 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
313
314 /*
315 * If a cursor is provided, we know we're processing the AIL
316 * in lsn order, and future items to be spliced in will
317 * follow the last one being inserted now. Update the
318 * cursor to point to that last item, now while we have a
319 * reliable pointer to it.
320 */
321 if (cur)
322 cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
323
324 /*
325 * Finally perform the splice. Unless the AIL was empty,
326 * lip points to the item in the AIL _after_ which the new
327 * items should go. If lip is null the AIL was empty, so
328 * the new items go at the head of the AIL.
329 */
330 if (lip)
331 list_splice(list, &lip->li_ail);
332 else
333 list_splice(list, &ailp->ail_head);
334}
335
336/*
337 * Delete the given item from the AIL. Return a pointer to the item.
338 */
339static void
340xfs_ail_delete(
341 struct xfs_ail *ailp,
342 struct xfs_log_item *lip)
343{
344 xfs_ail_check(ailp, lip);
345 list_del(&lip->li_ail);
346 xfs_trans_ail_cursor_clear(ailp, lip);
347}
348
349/*
350 * Requeue a failed buffer for writeback.
351 *
352 * We clear the log item failed state here as well, but we have to be careful
353 * about reference counts because the only active reference counts on the buffer
354 * may be the failed log items. Hence if we clear the log item failed state
355 * before queuing the buffer for IO we can release all active references to
356 * the buffer and free it, leading to use after free problems in
357 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
358 * order we process them in - the buffer is locked, and we own the buffer list
359 * so nothing on them is going to change while we are performing this action.
360 *
361 * Hence we can safely queue the buffer for IO before we clear the failed log
362 * item state, therefore always having an active reference to the buffer and
363 * avoiding the transient zero-reference state that leads to use-after-free.
364 */
365static inline int
366xfsaild_resubmit_item(
367 struct xfs_log_item *lip,
368 struct list_head *buffer_list)
369{
370 struct xfs_buf *bp = lip->li_buf;
371
372 if (!xfs_buf_trylock(bp))
373 return XFS_ITEM_LOCKED;
374
375 if (!xfs_buf_delwri_queue(bp, buffer_list)) {
376 xfs_buf_unlock(bp);
377 return XFS_ITEM_FLUSHING;
378 }
379
380 /* protected by ail_lock */
381 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
382 if (bp->b_flags & _XBF_INODES)
383 clear_bit(XFS_LI_FAILED, &lip->li_flags);
384 else
385 xfs_clear_li_failed(lip);
386 }
387
388 xfs_buf_unlock(bp);
389 return XFS_ITEM_SUCCESS;
390}
391
392static inline uint
393xfsaild_push_item(
394 struct xfs_ail *ailp,
395 struct xfs_log_item *lip)
396{
397 /*
398 * If log item pinning is enabled, skip the push and track the item as
399 * pinned. This can help induce head-behind-tail conditions.
400 */
401 if (XFS_TEST_ERROR(false, ailp->ail_log->l_mp, XFS_ERRTAG_LOG_ITEM_PIN))
402 return XFS_ITEM_PINNED;
403
404 /*
405 * Consider the item pinned if a push callback is not defined so the
406 * caller will force the log. This should only happen for intent items
407 * as they are unpinned once the associated done item is committed to
408 * the on-disk log.
409 */
410 if (!lip->li_ops->iop_push)
411 return XFS_ITEM_PINNED;
412 if (test_bit(XFS_LI_FAILED, &lip->li_flags))
413 return xfsaild_resubmit_item(lip, &ailp->ail_buf_list);
414 return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
415}
416
417static long
418xfsaild_push(
419 struct xfs_ail *ailp)
420{
421 struct xfs_mount *mp = ailp->ail_log->l_mp;
422 struct xfs_ail_cursor cur;
423 struct xfs_log_item *lip;
424 xfs_lsn_t lsn;
425 xfs_lsn_t target = NULLCOMMITLSN;
426 long tout;
427 int stuck = 0;
428 int flushing = 0;
429 int count = 0;
430
431 /*
432 * If we encountered pinned items or did not finish writing out all
433 * buffers the last time we ran, force a background CIL push to get the
434 * items unpinned in the near future. We do not wait on the CIL push as
435 * that could stall us for seconds if there is enough background IO
436 * load. Stalling for that long when the tail of the log is pinned and
437 * needs flushing will hard stop the transaction subsystem when log
438 * space runs out.
439 */
440 if (ailp->ail_log_flush && ailp->ail_last_pushed_lsn == 0 &&
441 (!list_empty_careful(&ailp->ail_buf_list) ||
442 xfs_ail_min_lsn(ailp))) {
443 ailp->ail_log_flush = 0;
444
445 XFS_STATS_INC(mp, xs_push_ail_flush);
446 xlog_cil_flush(ailp->ail_log);
447 }
448
449 spin_lock(&ailp->ail_lock);
450
451 /*
452 * If we have a sync push waiter, we always have to push till the AIL is
453 * empty. Update the target to point to the end of the AIL so that
454 * capture updates that occur after the sync push waiter has gone to
455 * sleep.
456 */
457 if (waitqueue_active(&ailp->ail_empty)) {
458 lip = xfs_ail_max(ailp);
459 if (lip)
460 target = lip->li_lsn;
461 } else {
462 /* barrier matches the ail_target update in xfs_ail_push() */
463 smp_rmb();
464 target = ailp->ail_target;
465 ailp->ail_target_prev = target;
466 }
467
468 /* we're done if the AIL is empty or our push has reached the end */
469 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
470 if (!lip)
471 goto out_done;
472
473 XFS_STATS_INC(mp, xs_push_ail);
474
475 ASSERT(target != NULLCOMMITLSN);
476
477 lsn = lip->li_lsn;
478 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
479 int lock_result;
480
481 /*
482 * Note that iop_push may unlock and reacquire the AIL lock. We
483 * rely on the AIL cursor implementation to be able to deal with
484 * the dropped lock.
485 */
486 lock_result = xfsaild_push_item(ailp, lip);
487 switch (lock_result) {
488 case XFS_ITEM_SUCCESS:
489 XFS_STATS_INC(mp, xs_push_ail_success);
490 trace_xfs_ail_push(lip);
491
492 ailp->ail_last_pushed_lsn = lsn;
493 break;
494
495 case XFS_ITEM_FLUSHING:
496 /*
497 * The item or its backing buffer is already being
498 * flushed. The typical reason for that is that an
499 * inode buffer is locked because we already pushed the
500 * updates to it as part of inode clustering.
501 *
502 * We do not want to stop flushing just because lots
503 * of items are already being flushed, but we need to
504 * re-try the flushing relatively soon if most of the
505 * AIL is being flushed.
506 */
507 XFS_STATS_INC(mp, xs_push_ail_flushing);
508 trace_xfs_ail_flushing(lip);
509
510 flushing++;
511 ailp->ail_last_pushed_lsn = lsn;
512 break;
513
514 case XFS_ITEM_PINNED:
515 XFS_STATS_INC(mp, xs_push_ail_pinned);
516 trace_xfs_ail_pinned(lip);
517
518 stuck++;
519 ailp->ail_log_flush++;
520 break;
521 case XFS_ITEM_LOCKED:
522 XFS_STATS_INC(mp, xs_push_ail_locked);
523 trace_xfs_ail_locked(lip);
524
525 stuck++;
526 break;
527 default:
528 ASSERT(0);
529 break;
530 }
531
532 count++;
533
534 /*
535 * Are there too many items we can't do anything with?
536 *
537 * If we are skipping too many items because we can't flush
538 * them or they are already being flushed, we back off and
539 * given them time to complete whatever operation is being
540 * done. i.e. remove pressure from the AIL while we can't make
541 * progress so traversals don't slow down further inserts and
542 * removals to/from the AIL.
543 *
544 * The value of 100 is an arbitrary magic number based on
545 * observation.
546 */
547 if (stuck > 100)
548 break;
549
550 lip = xfs_trans_ail_cursor_next(ailp, &cur);
551 if (lip == NULL)
552 break;
553 lsn = lip->li_lsn;
554 }
555
556out_done:
557 xfs_trans_ail_cursor_done(&cur);
558 spin_unlock(&ailp->ail_lock);
559
560 if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
561 ailp->ail_log_flush++;
562
563 if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
564 /*
565 * We reached the target or the AIL is empty, so wait a bit
566 * longer for I/O to complete and remove pushed items from the
567 * AIL before we start the next scan from the start of the AIL.
568 */
569 tout = 50;
570 ailp->ail_last_pushed_lsn = 0;
571 } else if (((stuck + flushing) * 100) / count > 90) {
572 /*
573 * Either there is a lot of contention on the AIL or we are
574 * stuck due to operations in progress. "Stuck" in this case
575 * is defined as >90% of the items we tried to push were stuck.
576 *
577 * Backoff a bit more to allow some I/O to complete before
578 * restarting from the start of the AIL. This prevents us from
579 * spinning on the same items, and if they are pinned will all
580 * the restart to issue a log force to unpin the stuck items.
581 */
582 tout = 20;
583 ailp->ail_last_pushed_lsn = 0;
584 } else {
585 /*
586 * Assume we have more work to do in a short while.
587 */
588 tout = 10;
589 }
590
591 return tout;
592}
593
594static int
595xfsaild(
596 void *data)
597{
598 struct xfs_ail *ailp = data;
599 long tout = 0; /* milliseconds */
600 unsigned int noreclaim_flag;
601
602 noreclaim_flag = memalloc_noreclaim_save();
603 set_freezable();
604
605 while (1) {
606 if (tout && tout <= 20)
607 set_current_state(TASK_KILLABLE|TASK_FREEZABLE);
608 else
609 set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
610
611 /*
612 * Check kthread_should_stop() after we set the task state to
613 * guarantee that we either see the stop bit and exit or the
614 * task state is reset to runnable such that it's not scheduled
615 * out indefinitely and detects the stop bit at next iteration.
616 * A memory barrier is included in above task state set to
617 * serialize again kthread_stop().
618 */
619 if (kthread_should_stop()) {
620 __set_current_state(TASK_RUNNING);
621
622 /*
623 * The caller forces out the AIL before stopping the
624 * thread in the common case, which means the delwri
625 * queue is drained. In the shutdown case, the queue may
626 * still hold relogged buffers that haven't been
627 * submitted because they were pinned since added to the
628 * queue.
629 *
630 * Log I/O error processing stales the underlying buffer
631 * and clears the delwri state, expecting the buf to be
632 * removed on the next submission attempt. That won't
633 * happen if we're shutting down, so this is the last
634 * opportunity to release such buffers from the queue.
635 */
636 ASSERT(list_empty(&ailp->ail_buf_list) ||
637 xlog_is_shutdown(ailp->ail_log));
638 xfs_buf_delwri_cancel(&ailp->ail_buf_list);
639 break;
640 }
641
642 spin_lock(&ailp->ail_lock);
643
644 /*
645 * Idle if the AIL is empty and we are not racing with a target
646 * update. We check the AIL after we set the task to a sleep
647 * state to guarantee that we either catch an ail_target update
648 * or that a wake_up resets the state to TASK_RUNNING.
649 * Otherwise, we run the risk of sleeping indefinitely.
650 *
651 * The barrier matches the ail_target update in xfs_ail_push().
652 */
653 smp_rmb();
654 if (!xfs_ail_min(ailp) &&
655 ailp->ail_target == ailp->ail_target_prev &&
656 list_empty(&ailp->ail_buf_list)) {
657 spin_unlock(&ailp->ail_lock);
658 schedule();
659 tout = 0;
660 continue;
661 }
662 spin_unlock(&ailp->ail_lock);
663
664 if (tout)
665 schedule_timeout(msecs_to_jiffies(tout));
666
667 __set_current_state(TASK_RUNNING);
668
669 try_to_freeze();
670
671 tout = xfsaild_push(ailp);
672 }
673
674 memalloc_noreclaim_restore(noreclaim_flag);
675 return 0;
676}
677
678/*
679 * This routine is called to move the tail of the AIL forward. It does this by
680 * trying to flush items in the AIL whose lsns are below the given
681 * threshold_lsn.
682 *
683 * The push is run asynchronously in a workqueue, which means the caller needs
684 * to handle waiting on the async flush for space to become available.
685 * We don't want to interrupt any push that is in progress, hence we only queue
686 * work if we set the pushing bit appropriately.
687 *
688 * We do this unlocked - we only need to know whether there is anything in the
689 * AIL at the time we are called. We don't need to access the contents of
690 * any of the objects, so the lock is not needed.
691 */
692void
693xfs_ail_push(
694 struct xfs_ail *ailp,
695 xfs_lsn_t threshold_lsn)
696{
697 struct xfs_log_item *lip;
698
699 lip = xfs_ail_min(ailp);
700 if (!lip || xlog_is_shutdown(ailp->ail_log) ||
701 XFS_LSN_CMP(threshold_lsn, ailp->ail_target) <= 0)
702 return;
703
704 /*
705 * Ensure that the new target is noticed in push code before it clears
706 * the XFS_AIL_PUSHING_BIT.
707 */
708 smp_wmb();
709 xfs_trans_ail_copy_lsn(ailp, &ailp->ail_target, &threshold_lsn);
710 smp_wmb();
711
712 wake_up_process(ailp->ail_task);
713}
714
715/*
716 * Push out all items in the AIL immediately
717 */
718void
719xfs_ail_push_all(
720 struct xfs_ail *ailp)
721{
722 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
723
724 if (threshold_lsn)
725 xfs_ail_push(ailp, threshold_lsn);
726}
727
728/*
729 * Push out all items in the AIL immediately and wait until the AIL is empty.
730 */
731void
732xfs_ail_push_all_sync(
733 struct xfs_ail *ailp)
734{
735 DEFINE_WAIT(wait);
736
737 spin_lock(&ailp->ail_lock);
738 while (xfs_ail_max(ailp) != NULL) {
739 prepare_to_wait(&ailp->ail_empty, &wait, TASK_UNINTERRUPTIBLE);
740 wake_up_process(ailp->ail_task);
741 spin_unlock(&ailp->ail_lock);
742 schedule();
743 spin_lock(&ailp->ail_lock);
744 }
745 spin_unlock(&ailp->ail_lock);
746
747 finish_wait(&ailp->ail_empty, &wait);
748}
749
750void
751xfs_ail_update_finish(
752 struct xfs_ail *ailp,
753 xfs_lsn_t old_lsn) __releases(ailp->ail_lock)
754{
755 struct xlog *log = ailp->ail_log;
756
757 /* if the tail lsn hasn't changed, don't do updates or wakeups. */
758 if (!old_lsn || old_lsn == __xfs_ail_min_lsn(ailp)) {
759 spin_unlock(&ailp->ail_lock);
760 return;
761 }
762
763 if (!xlog_is_shutdown(log))
764 xlog_assign_tail_lsn_locked(log->l_mp);
765
766 if (list_empty(&ailp->ail_head))
767 wake_up_all(&ailp->ail_empty);
768 spin_unlock(&ailp->ail_lock);
769 xfs_log_space_wake(log->l_mp);
770}
771
772/*
773 * xfs_trans_ail_update - bulk AIL insertion operation.
774 *
775 * @xfs_trans_ail_update takes an array of log items that all need to be
776 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
777 * be added. Otherwise, it will be repositioned by removing it and re-adding
778 * it to the AIL. If we move the first item in the AIL, update the log tail to
779 * match the new minimum LSN in the AIL.
780 *
781 * This function takes the AIL lock once to execute the update operations on
782 * all the items in the array, and as such should not be called with the AIL
783 * lock held. As a result, once we have the AIL lock, we need to check each log
784 * item LSN to confirm it needs to be moved forward in the AIL.
785 *
786 * To optimise the insert operation, we delete all the items from the AIL in
787 * the first pass, moving them into a temporary list, then splice the temporary
788 * list into the correct position in the AIL. This avoids needing to do an
789 * insert operation on every item.
790 *
791 * This function must be called with the AIL lock held. The lock is dropped
792 * before returning.
793 */
794void
795xfs_trans_ail_update_bulk(
796 struct xfs_ail *ailp,
797 struct xfs_ail_cursor *cur,
798 struct xfs_log_item **log_items,
799 int nr_items,
800 xfs_lsn_t lsn) __releases(ailp->ail_lock)
801{
802 struct xfs_log_item *mlip;
803 xfs_lsn_t tail_lsn = 0;
804 int i;
805 LIST_HEAD(tmp);
806
807 ASSERT(nr_items > 0); /* Not required, but true. */
808 mlip = xfs_ail_min(ailp);
809
810 for (i = 0; i < nr_items; i++) {
811 struct xfs_log_item *lip = log_items[i];
812 if (test_and_set_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
813 /* check if we really need to move the item */
814 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
815 continue;
816
817 trace_xfs_ail_move(lip, lip->li_lsn, lsn);
818 if (mlip == lip && !tail_lsn)
819 tail_lsn = lip->li_lsn;
820
821 xfs_ail_delete(ailp, lip);
822 } else {
823 trace_xfs_ail_insert(lip, 0, lsn);
824 }
825 lip->li_lsn = lsn;
826 list_add_tail(&lip->li_ail, &tmp);
827 }
828
829 if (!list_empty(&tmp))
830 xfs_ail_splice(ailp, cur, &tmp, lsn);
831
832 xfs_ail_update_finish(ailp, tail_lsn);
833}
834
835/* Insert a log item into the AIL. */
836void
837xfs_trans_ail_insert(
838 struct xfs_ail *ailp,
839 struct xfs_log_item *lip,
840 xfs_lsn_t lsn)
841{
842 spin_lock(&ailp->ail_lock);
843 xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
844}
845
846/*
847 * Delete one log item from the AIL.
848 *
849 * If this item was at the tail of the AIL, return the LSN of the log item so
850 * that we can use it to check if the LSN of the tail of the log has moved
851 * when finishing up the AIL delete process in xfs_ail_update_finish().
852 */
853xfs_lsn_t
854xfs_ail_delete_one(
855 struct xfs_ail *ailp,
856 struct xfs_log_item *lip)
857{
858 struct xfs_log_item *mlip = xfs_ail_min(ailp);
859 xfs_lsn_t lsn = lip->li_lsn;
860
861 trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
862 xfs_ail_delete(ailp, lip);
863 clear_bit(XFS_LI_IN_AIL, &lip->li_flags);
864 lip->li_lsn = 0;
865
866 if (mlip == lip)
867 return lsn;
868 return 0;
869}
870
871void
872xfs_trans_ail_delete(
873 struct xfs_log_item *lip,
874 int shutdown_type)
875{
876 struct xfs_ail *ailp = lip->li_ailp;
877 struct xlog *log = ailp->ail_log;
878 xfs_lsn_t tail_lsn;
879
880 spin_lock(&ailp->ail_lock);
881 if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
882 spin_unlock(&ailp->ail_lock);
883 if (shutdown_type && !xlog_is_shutdown(log)) {
884 xfs_alert_tag(log->l_mp, XFS_PTAG_AILDELETE,
885 "%s: attempting to delete a log item that is not in the AIL",
886 __func__);
887 xlog_force_shutdown(log, shutdown_type);
888 }
889 return;
890 }
891
892 /* xfs_ail_update_finish() drops the AIL lock */
893 xfs_clear_li_failed(lip);
894 tail_lsn = xfs_ail_delete_one(ailp, lip);
895 xfs_ail_update_finish(ailp, tail_lsn);
896}
897
898int
899xfs_trans_ail_init(
900 xfs_mount_t *mp)
901{
902 struct xfs_ail *ailp;
903
904 ailp = kzalloc(sizeof(struct xfs_ail),
905 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
906 if (!ailp)
907 return -ENOMEM;
908
909 ailp->ail_log = mp->m_log;
910 INIT_LIST_HEAD(&ailp->ail_head);
911 INIT_LIST_HEAD(&ailp->ail_cursors);
912 spin_lock_init(&ailp->ail_lock);
913 INIT_LIST_HEAD(&ailp->ail_buf_list);
914 init_waitqueue_head(&ailp->ail_empty);
915
916 ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
917 mp->m_super->s_id);
918 if (IS_ERR(ailp->ail_task))
919 goto out_free_ailp;
920
921 mp->m_ail = ailp;
922 return 0;
923
924out_free_ailp:
925 kfree(ailp);
926 return -ENOMEM;
927}
928
929void
930xfs_trans_ail_destroy(
931 xfs_mount_t *mp)
932{
933 struct xfs_ail *ailp = mp->m_ail;
934
935 kthread_stop(ailp->ail_task);
936 kfree(ailp);
937}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2008 Dave Chinner
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_mount.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_trace.h"
17#include "xfs_errortag.h"
18#include "xfs_error.h"
19#include "xfs_log.h"
20#include "xfs_log_priv.h"
21
22#ifdef DEBUG
23/*
24 * Check that the list is sorted as it should be.
25 *
26 * Called with the ail lock held, but we don't want to assert fail with it
27 * held otherwise we'll lock everything up and won't be able to debug the
28 * cause. Hence we sample and check the state under the AIL lock and return if
29 * everything is fine, otherwise we drop the lock and run the ASSERT checks.
30 * Asserts may not be fatal, so pick the lock back up and continue onwards.
31 */
32STATIC void
33xfs_ail_check(
34 struct xfs_ail *ailp,
35 struct xfs_log_item *lip)
36 __must_hold(&ailp->ail_lock)
37{
38 struct xfs_log_item *prev_lip;
39 struct xfs_log_item *next_lip;
40 xfs_lsn_t prev_lsn = NULLCOMMITLSN;
41 xfs_lsn_t next_lsn = NULLCOMMITLSN;
42 xfs_lsn_t lsn;
43 bool in_ail;
44
45
46 if (list_empty(&ailp->ail_head))
47 return;
48
49 /*
50 * Sample then check the next and previous entries are valid.
51 */
52 in_ail = test_bit(XFS_LI_IN_AIL, &lip->li_flags);
53 prev_lip = list_entry(lip->li_ail.prev, struct xfs_log_item, li_ail);
54 if (&prev_lip->li_ail != &ailp->ail_head)
55 prev_lsn = prev_lip->li_lsn;
56 next_lip = list_entry(lip->li_ail.next, struct xfs_log_item, li_ail);
57 if (&next_lip->li_ail != &ailp->ail_head)
58 next_lsn = next_lip->li_lsn;
59 lsn = lip->li_lsn;
60
61 if (in_ail &&
62 (prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0) &&
63 (next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0))
64 return;
65
66 spin_unlock(&ailp->ail_lock);
67 ASSERT(in_ail);
68 ASSERT(prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0);
69 ASSERT(next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0);
70 spin_lock(&ailp->ail_lock);
71}
72#else /* !DEBUG */
73#define xfs_ail_check(a,l)
74#endif /* DEBUG */
75
76/*
77 * Return a pointer to the last item in the AIL. If the AIL is empty, then
78 * return NULL.
79 */
80static struct xfs_log_item *
81xfs_ail_max(
82 struct xfs_ail *ailp)
83{
84 if (list_empty(&ailp->ail_head))
85 return NULL;
86
87 return list_entry(ailp->ail_head.prev, struct xfs_log_item, li_ail);
88}
89
90/*
91 * Return a pointer to the item which follows the given item in the AIL. If
92 * the given item is the last item in the list, then return NULL.
93 */
94static struct xfs_log_item *
95xfs_ail_next(
96 struct xfs_ail *ailp,
97 struct xfs_log_item *lip)
98{
99 if (lip->li_ail.next == &ailp->ail_head)
100 return NULL;
101
102 return list_first_entry(&lip->li_ail, struct xfs_log_item, li_ail);
103}
104
105/*
106 * This is called by the log manager code to determine the LSN of the tail of
107 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
108 * is empty, then this function returns 0.
109 *
110 * We need the AIL lock in order to get a coherent read of the lsn of the last
111 * item in the AIL.
112 */
113static xfs_lsn_t
114__xfs_ail_min_lsn(
115 struct xfs_ail *ailp)
116{
117 struct xfs_log_item *lip = xfs_ail_min(ailp);
118
119 if (lip)
120 return lip->li_lsn;
121 return 0;
122}
123
124xfs_lsn_t
125xfs_ail_min_lsn(
126 struct xfs_ail *ailp)
127{
128 xfs_lsn_t lsn;
129
130 spin_lock(&ailp->ail_lock);
131 lsn = __xfs_ail_min_lsn(ailp);
132 spin_unlock(&ailp->ail_lock);
133
134 return lsn;
135}
136
137/*
138 * The cursor keeps track of where our current traversal is up to by tracking
139 * the next item in the list for us. However, for this to be safe, removing an
140 * object from the AIL needs to invalidate any cursor that points to it. hence
141 * the traversal cursor needs to be linked to the struct xfs_ail so that
142 * deletion can search all the active cursors for invalidation.
143 */
144STATIC void
145xfs_trans_ail_cursor_init(
146 struct xfs_ail *ailp,
147 struct xfs_ail_cursor *cur)
148{
149 cur->item = NULL;
150 list_add_tail(&cur->list, &ailp->ail_cursors);
151}
152
153/*
154 * Get the next item in the traversal and advance the cursor. If the cursor
155 * was invalidated (indicated by a lip of 1), restart the traversal.
156 */
157struct xfs_log_item *
158xfs_trans_ail_cursor_next(
159 struct xfs_ail *ailp,
160 struct xfs_ail_cursor *cur)
161{
162 struct xfs_log_item *lip = cur->item;
163
164 if ((uintptr_t)lip & 1)
165 lip = xfs_ail_min(ailp);
166 if (lip)
167 cur->item = xfs_ail_next(ailp, lip);
168 return lip;
169}
170
171/*
172 * When the traversal is complete, we need to remove the cursor from the list
173 * of traversing cursors.
174 */
175void
176xfs_trans_ail_cursor_done(
177 struct xfs_ail_cursor *cur)
178{
179 cur->item = NULL;
180 list_del_init(&cur->list);
181}
182
183/*
184 * Invalidate any cursor that is pointing to this item. This is called when an
185 * item is removed from the AIL. Any cursor pointing to this object is now
186 * invalid and the traversal needs to be terminated so it doesn't reference a
187 * freed object. We set the low bit of the cursor item pointer so we can
188 * distinguish between an invalidation and the end of the list when getting the
189 * next item from the cursor.
190 */
191STATIC void
192xfs_trans_ail_cursor_clear(
193 struct xfs_ail *ailp,
194 struct xfs_log_item *lip)
195{
196 struct xfs_ail_cursor *cur;
197
198 list_for_each_entry(cur, &ailp->ail_cursors, list) {
199 if (cur->item == lip)
200 cur->item = (struct xfs_log_item *)
201 ((uintptr_t)cur->item | 1);
202 }
203}
204
205/*
206 * Find the first item in the AIL with the given @lsn by searching in ascending
207 * LSN order and initialise the cursor to point to the next item for a
208 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
209 * first item in the AIL. Returns NULL if the list is empty.
210 */
211struct xfs_log_item *
212xfs_trans_ail_cursor_first(
213 struct xfs_ail *ailp,
214 struct xfs_ail_cursor *cur,
215 xfs_lsn_t lsn)
216{
217 struct xfs_log_item *lip;
218
219 xfs_trans_ail_cursor_init(ailp, cur);
220
221 if (lsn == 0) {
222 lip = xfs_ail_min(ailp);
223 goto out;
224 }
225
226 list_for_each_entry(lip, &ailp->ail_head, li_ail) {
227 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
228 goto out;
229 }
230 return NULL;
231
232out:
233 if (lip)
234 cur->item = xfs_ail_next(ailp, lip);
235 return lip;
236}
237
238static struct xfs_log_item *
239__xfs_trans_ail_cursor_last(
240 struct xfs_ail *ailp,
241 xfs_lsn_t lsn)
242{
243 struct xfs_log_item *lip;
244
245 list_for_each_entry_reverse(lip, &ailp->ail_head, li_ail) {
246 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
247 return lip;
248 }
249 return NULL;
250}
251
252/*
253 * Find the last item in the AIL with the given @lsn by searching in descending
254 * LSN order and initialise the cursor to point to that item. If there is no
255 * item with the value of @lsn, then it sets the cursor to the last item with an
256 * LSN lower than @lsn. Returns NULL if the list is empty.
257 */
258struct xfs_log_item *
259xfs_trans_ail_cursor_last(
260 struct xfs_ail *ailp,
261 struct xfs_ail_cursor *cur,
262 xfs_lsn_t lsn)
263{
264 xfs_trans_ail_cursor_init(ailp, cur);
265 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
266 return cur->item;
267}
268
269/*
270 * Splice the log item list into the AIL at the given LSN. We splice to the
271 * tail of the given LSN to maintain insert order for push traversals. The
272 * cursor is optional, allowing repeated updates to the same LSN to avoid
273 * repeated traversals. This should not be called with an empty list.
274 */
275static void
276xfs_ail_splice(
277 struct xfs_ail *ailp,
278 struct xfs_ail_cursor *cur,
279 struct list_head *list,
280 xfs_lsn_t lsn)
281{
282 struct xfs_log_item *lip;
283
284 ASSERT(!list_empty(list));
285
286 /*
287 * Use the cursor to determine the insertion point if one is
288 * provided. If not, or if the one we got is not valid,
289 * find the place in the AIL where the items belong.
290 */
291 lip = cur ? cur->item : NULL;
292 if (!lip || (uintptr_t)lip & 1)
293 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
294
295 /*
296 * If a cursor is provided, we know we're processing the AIL
297 * in lsn order, and future items to be spliced in will
298 * follow the last one being inserted now. Update the
299 * cursor to point to that last item, now while we have a
300 * reliable pointer to it.
301 */
302 if (cur)
303 cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
304
305 /*
306 * Finally perform the splice. Unless the AIL was empty,
307 * lip points to the item in the AIL _after_ which the new
308 * items should go. If lip is null the AIL was empty, so
309 * the new items go at the head of the AIL.
310 */
311 if (lip)
312 list_splice(list, &lip->li_ail);
313 else
314 list_splice(list, &ailp->ail_head);
315}
316
317/*
318 * Delete the given item from the AIL. Return a pointer to the item.
319 */
320static void
321xfs_ail_delete(
322 struct xfs_ail *ailp,
323 struct xfs_log_item *lip)
324{
325 xfs_ail_check(ailp, lip);
326 list_del(&lip->li_ail);
327 xfs_trans_ail_cursor_clear(ailp, lip);
328}
329
330/*
331 * Requeue a failed buffer for writeback.
332 *
333 * We clear the log item failed state here as well, but we have to be careful
334 * about reference counts because the only active reference counts on the buffer
335 * may be the failed log items. Hence if we clear the log item failed state
336 * before queuing the buffer for IO we can release all active references to
337 * the buffer and free it, leading to use after free problems in
338 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
339 * order we process them in - the buffer is locked, and we own the buffer list
340 * so nothing on them is going to change while we are performing this action.
341 *
342 * Hence we can safely queue the buffer for IO before we clear the failed log
343 * item state, therefore always having an active reference to the buffer and
344 * avoiding the transient zero-reference state that leads to use-after-free.
345 */
346static inline int
347xfsaild_resubmit_item(
348 struct xfs_log_item *lip,
349 struct list_head *buffer_list)
350{
351 struct xfs_buf *bp = lip->li_buf;
352
353 if (!xfs_buf_trylock(bp))
354 return XFS_ITEM_LOCKED;
355
356 if (!xfs_buf_delwri_queue(bp, buffer_list)) {
357 xfs_buf_unlock(bp);
358 return XFS_ITEM_FLUSHING;
359 }
360
361 /* protected by ail_lock */
362 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
363 if (bp->b_flags & (_XBF_INODES | _XBF_DQUOTS))
364 clear_bit(XFS_LI_FAILED, &lip->li_flags);
365 else
366 xfs_clear_li_failed(lip);
367 }
368
369 xfs_buf_unlock(bp);
370 return XFS_ITEM_SUCCESS;
371}
372
373static inline uint
374xfsaild_push_item(
375 struct xfs_ail *ailp,
376 struct xfs_log_item *lip)
377{
378 /*
379 * If log item pinning is enabled, skip the push and track the item as
380 * pinned. This can help induce head-behind-tail conditions.
381 */
382 if (XFS_TEST_ERROR(false, ailp->ail_log->l_mp, XFS_ERRTAG_LOG_ITEM_PIN))
383 return XFS_ITEM_PINNED;
384
385 /*
386 * Consider the item pinned if a push callback is not defined so the
387 * caller will force the log. This should only happen for intent items
388 * as they are unpinned once the associated done item is committed to
389 * the on-disk log.
390 */
391 if (!lip->li_ops->iop_push)
392 return XFS_ITEM_PINNED;
393 if (test_bit(XFS_LI_FAILED, &lip->li_flags))
394 return xfsaild_resubmit_item(lip, &ailp->ail_buf_list);
395 return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
396}
397
398/*
399 * Compute the LSN that we'd need to push the log tail towards in order to have
400 * at least 25% of the log space free. If the log free space already meets this
401 * threshold, this function returns the lowest LSN in the AIL to slowly keep
402 * writeback ticking over and the tail of the log moving forward.
403 */
404static xfs_lsn_t
405xfs_ail_calc_push_target(
406 struct xfs_ail *ailp)
407{
408 struct xlog *log = ailp->ail_log;
409 struct xfs_log_item *lip;
410 xfs_lsn_t target_lsn;
411 xfs_lsn_t max_lsn;
412 xfs_lsn_t min_lsn;
413 int32_t free_bytes;
414 uint32_t target_block;
415 uint32_t target_cycle;
416
417 lockdep_assert_held(&ailp->ail_lock);
418
419 lip = xfs_ail_max(ailp);
420 if (!lip)
421 return NULLCOMMITLSN;
422
423 max_lsn = lip->li_lsn;
424 min_lsn = __xfs_ail_min_lsn(ailp);
425
426 /*
427 * If we are supposed to push all the items in the AIL, we want to push
428 * to the current head. We then clear the push flag so that we don't
429 * keep pushing newly queued items beyond where the push all command was
430 * run. If the push waiter wants to empty the ail, it should queue
431 * itself on the ail_empty wait queue.
432 */
433 if (test_and_clear_bit(XFS_AIL_OPSTATE_PUSH_ALL, &ailp->ail_opstate))
434 return max_lsn;
435
436 /* If someone wants the AIL empty, keep pushing everything we have. */
437 if (waitqueue_active(&ailp->ail_empty))
438 return max_lsn;
439
440 /*
441 * Background pushing - attempt to keep 25% of the log free and if we
442 * have that much free retain the existing target.
443 */
444 free_bytes = log->l_logsize - xlog_lsn_sub(log, max_lsn, min_lsn);
445 if (free_bytes >= log->l_logsize >> 2)
446 return ailp->ail_target;
447
448 target_cycle = CYCLE_LSN(min_lsn);
449 target_block = BLOCK_LSN(min_lsn) + (log->l_logBBsize >> 2);
450 if (target_block >= log->l_logBBsize) {
451 target_block -= log->l_logBBsize;
452 target_cycle += 1;
453 }
454 target_lsn = xlog_assign_lsn(target_cycle, target_block);
455
456 /* Cap the target to the highest LSN known to be in the AIL. */
457 if (XFS_LSN_CMP(target_lsn, max_lsn) > 0)
458 return max_lsn;
459
460 /* If the existing target is higher than the new target, keep it. */
461 if (XFS_LSN_CMP(ailp->ail_target, target_lsn) >= 0)
462 return ailp->ail_target;
463 return target_lsn;
464}
465
466static long
467xfsaild_push(
468 struct xfs_ail *ailp)
469{
470 struct xfs_mount *mp = ailp->ail_log->l_mp;
471 struct xfs_ail_cursor cur;
472 struct xfs_log_item *lip;
473 xfs_lsn_t lsn;
474 long tout;
475 int stuck = 0;
476 int flushing = 0;
477 int count = 0;
478
479 /*
480 * If we encountered pinned items or did not finish writing out all
481 * buffers the last time we ran, force a background CIL push to get the
482 * items unpinned in the near future. We do not wait on the CIL push as
483 * that could stall us for seconds if there is enough background IO
484 * load. Stalling for that long when the tail of the log is pinned and
485 * needs flushing will hard stop the transaction subsystem when log
486 * space runs out.
487 */
488 if (ailp->ail_log_flush && ailp->ail_last_pushed_lsn == 0 &&
489 (!list_empty_careful(&ailp->ail_buf_list) ||
490 xfs_ail_min_lsn(ailp))) {
491 ailp->ail_log_flush = 0;
492
493 XFS_STATS_INC(mp, xs_push_ail_flush);
494 xlog_cil_flush(ailp->ail_log);
495 }
496
497 spin_lock(&ailp->ail_lock);
498 WRITE_ONCE(ailp->ail_target, xfs_ail_calc_push_target(ailp));
499 if (ailp->ail_target == NULLCOMMITLSN)
500 goto out_done;
501
502 /* we're done if the AIL is empty or our push has reached the end */
503 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
504 if (!lip)
505 goto out_done_cursor;
506
507 XFS_STATS_INC(mp, xs_push_ail);
508
509 ASSERT(ailp->ail_target != NULLCOMMITLSN);
510
511 lsn = lip->li_lsn;
512 while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) {
513 int lock_result;
514
515 if (test_bit(XFS_LI_FLUSHING, &lip->li_flags))
516 goto next_item;
517
518 /*
519 * Note that iop_push may unlock and reacquire the AIL lock. We
520 * rely on the AIL cursor implementation to be able to deal with
521 * the dropped lock.
522 */
523 lock_result = xfsaild_push_item(ailp, lip);
524 switch (lock_result) {
525 case XFS_ITEM_SUCCESS:
526 XFS_STATS_INC(mp, xs_push_ail_success);
527 trace_xfs_ail_push(lip);
528
529 ailp->ail_last_pushed_lsn = lsn;
530 break;
531
532 case XFS_ITEM_FLUSHING:
533 /*
534 * The item or its backing buffer is already being
535 * flushed. The typical reason for that is that an
536 * inode buffer is locked because we already pushed the
537 * updates to it as part of inode clustering.
538 *
539 * We do not want to stop flushing just because lots
540 * of items are already being flushed, but we need to
541 * re-try the flushing relatively soon if most of the
542 * AIL is being flushed.
543 */
544 XFS_STATS_INC(mp, xs_push_ail_flushing);
545 trace_xfs_ail_flushing(lip);
546
547 flushing++;
548 ailp->ail_last_pushed_lsn = lsn;
549 break;
550
551 case XFS_ITEM_PINNED:
552 XFS_STATS_INC(mp, xs_push_ail_pinned);
553 trace_xfs_ail_pinned(lip);
554
555 stuck++;
556 ailp->ail_log_flush++;
557 break;
558 case XFS_ITEM_LOCKED:
559 XFS_STATS_INC(mp, xs_push_ail_locked);
560 trace_xfs_ail_locked(lip);
561
562 stuck++;
563 break;
564 default:
565 ASSERT(0);
566 break;
567 }
568
569 count++;
570
571 /*
572 * Are there too many items we can't do anything with?
573 *
574 * If we are skipping too many items because we can't flush
575 * them or they are already being flushed, we back off and
576 * given them time to complete whatever operation is being
577 * done. i.e. remove pressure from the AIL while we can't make
578 * progress so traversals don't slow down further inserts and
579 * removals to/from the AIL.
580 *
581 * The value of 100 is an arbitrary magic number based on
582 * observation.
583 */
584 if (stuck > 100)
585 break;
586
587next_item:
588 lip = xfs_trans_ail_cursor_next(ailp, &cur);
589 if (lip == NULL)
590 break;
591 if (lip->li_lsn != lsn && count > 1000)
592 break;
593 lsn = lip->li_lsn;
594 }
595
596out_done_cursor:
597 xfs_trans_ail_cursor_done(&cur);
598out_done:
599 spin_unlock(&ailp->ail_lock);
600
601 if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
602 ailp->ail_log_flush++;
603
604 if (!count || XFS_LSN_CMP(lsn, ailp->ail_target) >= 0) {
605 /*
606 * We reached the target or the AIL is empty, so wait a bit
607 * longer for I/O to complete and remove pushed items from the
608 * AIL before we start the next scan from the start of the AIL.
609 */
610 tout = 50;
611 ailp->ail_last_pushed_lsn = 0;
612 } else if (((stuck + flushing) * 100) / count > 90) {
613 /*
614 * Either there is a lot of contention on the AIL or we are
615 * stuck due to operations in progress. "Stuck" in this case
616 * is defined as >90% of the items we tried to push were stuck.
617 *
618 * Backoff a bit more to allow some I/O to complete before
619 * restarting from the start of the AIL. This prevents us from
620 * spinning on the same items, and if they are pinned will all
621 * the restart to issue a log force to unpin the stuck items.
622 */
623 tout = 20;
624 ailp->ail_last_pushed_lsn = 0;
625 } else {
626 /*
627 * Assume we have more work to do in a short while.
628 */
629 tout = 0;
630 }
631
632 return tout;
633}
634
635static int
636xfsaild(
637 void *data)
638{
639 struct xfs_ail *ailp = data;
640 long tout = 0; /* milliseconds */
641 unsigned int noreclaim_flag;
642
643 noreclaim_flag = memalloc_noreclaim_save();
644 set_freezable();
645
646 while (1) {
647 /*
648 * Long waits of 50ms or more occur when we've run out of items
649 * to push, so we only want uninterruptible state if we're
650 * actually blocked on something.
651 */
652 if (tout && tout <= 20)
653 set_current_state(TASK_KILLABLE|TASK_FREEZABLE);
654 else
655 set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
656
657 /*
658 * Check kthread_should_stop() after we set the task state to
659 * guarantee that we either see the stop bit and exit or the
660 * task state is reset to runnable such that it's not scheduled
661 * out indefinitely and detects the stop bit at next iteration.
662 * A memory barrier is included in above task state set to
663 * serialize again kthread_stop().
664 */
665 if (kthread_should_stop()) {
666 __set_current_state(TASK_RUNNING);
667
668 /*
669 * The caller forces out the AIL before stopping the
670 * thread in the common case, which means the delwri
671 * queue is drained. In the shutdown case, the queue may
672 * still hold relogged buffers that haven't been
673 * submitted because they were pinned since added to the
674 * queue.
675 *
676 * Log I/O error processing stales the underlying buffer
677 * and clears the delwri state, expecting the buf to be
678 * removed on the next submission attempt. That won't
679 * happen if we're shutting down, so this is the last
680 * opportunity to release such buffers from the queue.
681 */
682 ASSERT(list_empty(&ailp->ail_buf_list) ||
683 xlog_is_shutdown(ailp->ail_log));
684 xfs_buf_delwri_cancel(&ailp->ail_buf_list);
685 break;
686 }
687
688 /* Idle if the AIL is empty. */
689 spin_lock(&ailp->ail_lock);
690 if (!xfs_ail_min(ailp) && list_empty(&ailp->ail_buf_list)) {
691 spin_unlock(&ailp->ail_lock);
692 schedule();
693 tout = 0;
694 continue;
695 }
696 spin_unlock(&ailp->ail_lock);
697
698 if (tout)
699 schedule_timeout(msecs_to_jiffies(tout));
700
701 __set_current_state(TASK_RUNNING);
702
703 try_to_freeze();
704
705 tout = xfsaild_push(ailp);
706 }
707
708 memalloc_noreclaim_restore(noreclaim_flag);
709 return 0;
710}
711
712/*
713 * Push out all items in the AIL immediately and wait until the AIL is empty.
714 */
715void
716xfs_ail_push_all_sync(
717 struct xfs_ail *ailp)
718{
719 DEFINE_WAIT(wait);
720
721 spin_lock(&ailp->ail_lock);
722 while (xfs_ail_max(ailp) != NULL) {
723 prepare_to_wait(&ailp->ail_empty, &wait, TASK_UNINTERRUPTIBLE);
724 wake_up_process(ailp->ail_task);
725 spin_unlock(&ailp->ail_lock);
726 schedule();
727 spin_lock(&ailp->ail_lock);
728 }
729 spin_unlock(&ailp->ail_lock);
730
731 finish_wait(&ailp->ail_empty, &wait);
732}
733
734void
735__xfs_ail_assign_tail_lsn(
736 struct xfs_ail *ailp)
737{
738 struct xlog *log = ailp->ail_log;
739 xfs_lsn_t tail_lsn;
740
741 assert_spin_locked(&ailp->ail_lock);
742
743 if (xlog_is_shutdown(log))
744 return;
745
746 tail_lsn = __xfs_ail_min_lsn(ailp);
747 if (!tail_lsn)
748 tail_lsn = ailp->ail_head_lsn;
749
750 WRITE_ONCE(log->l_tail_space,
751 xlog_lsn_sub(log, ailp->ail_head_lsn, tail_lsn));
752 trace_xfs_log_assign_tail_lsn(log, tail_lsn);
753 atomic64_set(&log->l_tail_lsn, tail_lsn);
754}
755
756/*
757 * Callers should pass the original tail lsn so that we can detect if the tail
758 * has moved as a result of the operation that was performed. If the caller
759 * needs to force a tail space update, it should pass NULLCOMMITLSN to bypass
760 * the "did the tail LSN change?" checks. If the caller wants to avoid a tail
761 * update (e.g. it knows the tail did not change) it should pass an @old_lsn of
762 * 0.
763 */
764void
765xfs_ail_update_finish(
766 struct xfs_ail *ailp,
767 xfs_lsn_t old_lsn) __releases(ailp->ail_lock)
768{
769 struct xlog *log = ailp->ail_log;
770
771 /* If the tail lsn hasn't changed, don't do updates or wakeups. */
772 if (!old_lsn || old_lsn == __xfs_ail_min_lsn(ailp)) {
773 spin_unlock(&ailp->ail_lock);
774 return;
775 }
776
777 __xfs_ail_assign_tail_lsn(ailp);
778 if (list_empty(&ailp->ail_head))
779 wake_up_all(&ailp->ail_empty);
780 spin_unlock(&ailp->ail_lock);
781 xfs_log_space_wake(log->l_mp);
782}
783
784/*
785 * xfs_trans_ail_update - bulk AIL insertion operation.
786 *
787 * @xfs_trans_ail_update takes an array of log items that all need to be
788 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
789 * be added. Otherwise, it will be repositioned by removing it and re-adding
790 * it to the AIL. If we move the first item in the AIL, update the log tail to
791 * match the new minimum LSN in the AIL.
792 *
793 * This function takes the AIL lock once to execute the update operations on
794 * all the items in the array, and as such should not be called with the AIL
795 * lock held. As a result, once we have the AIL lock, we need to check each log
796 * item LSN to confirm it needs to be moved forward in the AIL.
797 *
798 * To optimise the insert operation, we delete all the items from the AIL in
799 * the first pass, moving them into a temporary list, then splice the temporary
800 * list into the correct position in the AIL. This avoids needing to do an
801 * insert operation on every item.
802 *
803 * This function must be called with the AIL lock held. The lock is dropped
804 * before returning.
805 */
806void
807xfs_trans_ail_update_bulk(
808 struct xfs_ail *ailp,
809 struct xfs_ail_cursor *cur,
810 struct xfs_log_item **log_items,
811 int nr_items,
812 xfs_lsn_t lsn) __releases(ailp->ail_lock)
813{
814 struct xfs_log_item *mlip;
815 xfs_lsn_t tail_lsn = 0;
816 int i;
817 LIST_HEAD(tmp);
818
819 ASSERT(nr_items > 0); /* Not required, but true. */
820 mlip = xfs_ail_min(ailp);
821
822 for (i = 0; i < nr_items; i++) {
823 struct xfs_log_item *lip = log_items[i];
824 if (test_and_set_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
825 /* check if we really need to move the item */
826 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
827 continue;
828
829 trace_xfs_ail_move(lip, lip->li_lsn, lsn);
830 if (mlip == lip && !tail_lsn)
831 tail_lsn = lip->li_lsn;
832
833 xfs_ail_delete(ailp, lip);
834 } else {
835 trace_xfs_ail_insert(lip, 0, lsn);
836 }
837 lip->li_lsn = lsn;
838 list_add_tail(&lip->li_ail, &tmp);
839 }
840
841 if (!list_empty(&tmp))
842 xfs_ail_splice(ailp, cur, &tmp, lsn);
843
844 /*
845 * If this is the first insert, wake up the push daemon so it can
846 * actively scan for items to push. We also need to do a log tail
847 * LSN update to ensure that it is correctly tracked by the log, so
848 * set the tail_lsn to NULLCOMMITLSN so that xfs_ail_update_finish()
849 * will see that the tail lsn has changed and will update the tail
850 * appropriately.
851 */
852 if (!mlip) {
853 wake_up_process(ailp->ail_task);
854 tail_lsn = NULLCOMMITLSN;
855 }
856
857 xfs_ail_update_finish(ailp, tail_lsn);
858}
859
860/* Insert a log item into the AIL. */
861void
862xfs_trans_ail_insert(
863 struct xfs_ail *ailp,
864 struct xfs_log_item *lip,
865 xfs_lsn_t lsn)
866{
867 spin_lock(&ailp->ail_lock);
868 xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
869}
870
871/*
872 * Delete one log item from the AIL.
873 *
874 * If this item was at the tail of the AIL, return the LSN of the log item so
875 * that we can use it to check if the LSN of the tail of the log has moved
876 * when finishing up the AIL delete process in xfs_ail_update_finish().
877 */
878xfs_lsn_t
879xfs_ail_delete_one(
880 struct xfs_ail *ailp,
881 struct xfs_log_item *lip)
882{
883 struct xfs_log_item *mlip = xfs_ail_min(ailp);
884 xfs_lsn_t lsn = lip->li_lsn;
885
886 trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
887 xfs_ail_delete(ailp, lip);
888 clear_bit(XFS_LI_IN_AIL, &lip->li_flags);
889 lip->li_lsn = 0;
890
891 if (mlip == lip)
892 return lsn;
893 return 0;
894}
895
896void
897xfs_trans_ail_delete(
898 struct xfs_log_item *lip,
899 int shutdown_type)
900{
901 struct xfs_ail *ailp = lip->li_ailp;
902 struct xlog *log = ailp->ail_log;
903 xfs_lsn_t tail_lsn;
904
905 spin_lock(&ailp->ail_lock);
906 if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
907 spin_unlock(&ailp->ail_lock);
908 if (shutdown_type && !xlog_is_shutdown(log)) {
909 xfs_alert_tag(log->l_mp, XFS_PTAG_AILDELETE,
910 "%s: attempting to delete a log item that is not in the AIL",
911 __func__);
912 xlog_force_shutdown(log, shutdown_type);
913 }
914 return;
915 }
916
917 /* xfs_ail_update_finish() drops the AIL lock */
918 xfs_clear_li_failed(lip);
919 tail_lsn = xfs_ail_delete_one(ailp, lip);
920 xfs_ail_update_finish(ailp, tail_lsn);
921}
922
923int
924xfs_trans_ail_init(
925 xfs_mount_t *mp)
926{
927 struct xfs_ail *ailp;
928
929 ailp = kzalloc(sizeof(struct xfs_ail),
930 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
931 if (!ailp)
932 return -ENOMEM;
933
934 ailp->ail_log = mp->m_log;
935 INIT_LIST_HEAD(&ailp->ail_head);
936 INIT_LIST_HEAD(&ailp->ail_cursors);
937 spin_lock_init(&ailp->ail_lock);
938 INIT_LIST_HEAD(&ailp->ail_buf_list);
939 init_waitqueue_head(&ailp->ail_empty);
940
941 ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
942 mp->m_super->s_id);
943 if (IS_ERR(ailp->ail_task))
944 goto out_free_ailp;
945
946 mp->m_ail = ailp;
947 return 0;
948
949out_free_ailp:
950 kfree(ailp);
951 return -ENOMEM;
952}
953
954void
955xfs_trans_ail_destroy(
956 xfs_mount_t *mp)
957{
958 struct xfs_ail *ailp = mp->m_ail;
959
960 kthread_stop(ailp->ail_task);
961 kfree(ailp);
962}