Loading...
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2008 Dave Chinner
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_mount.h"
25#include "xfs_trans.h"
26#include "xfs_trans_priv.h"
27#include "xfs_trace.h"
28#include "xfs_errortag.h"
29#include "xfs_error.h"
30#include "xfs_log.h"
31
32#ifdef DEBUG
33/*
34 * Check that the list is sorted as it should be.
35 */
36STATIC void
37xfs_ail_check(
38 struct xfs_ail *ailp,
39 xfs_log_item_t *lip)
40{
41 xfs_log_item_t *prev_lip;
42
43 if (list_empty(&ailp->ail_head))
44 return;
45
46 /*
47 * Check the next and previous entries are valid.
48 */
49 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
50 prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
51 if (&prev_lip->li_ail != &ailp->ail_head)
52 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
53
54 prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
55 if (&prev_lip->li_ail != &ailp->ail_head)
56 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
57
58
59}
60#else /* !DEBUG */
61#define xfs_ail_check(a,l)
62#endif /* DEBUG */
63
64/*
65 * Return a pointer to the last item in the AIL. If the AIL is empty, then
66 * return NULL.
67 */
68static xfs_log_item_t *
69xfs_ail_max(
70 struct xfs_ail *ailp)
71{
72 if (list_empty(&ailp->ail_head))
73 return NULL;
74
75 return list_entry(ailp->ail_head.prev, xfs_log_item_t, li_ail);
76}
77
78/*
79 * Return a pointer to the item which follows the given item in the AIL. If
80 * the given item is the last item in the list, then return NULL.
81 */
82static xfs_log_item_t *
83xfs_ail_next(
84 struct xfs_ail *ailp,
85 xfs_log_item_t *lip)
86{
87 if (lip->li_ail.next == &ailp->ail_head)
88 return NULL;
89
90 return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
91}
92
93/*
94 * This is called by the log manager code to determine the LSN of the tail of
95 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
96 * is empty, then this function returns 0.
97 *
98 * We need the AIL lock in order to get a coherent read of the lsn of the last
99 * item in the AIL.
100 */
101xfs_lsn_t
102xfs_ail_min_lsn(
103 struct xfs_ail *ailp)
104{
105 xfs_lsn_t lsn = 0;
106 xfs_log_item_t *lip;
107
108 spin_lock(&ailp->ail_lock);
109 lip = xfs_ail_min(ailp);
110 if (lip)
111 lsn = lip->li_lsn;
112 spin_unlock(&ailp->ail_lock);
113
114 return lsn;
115}
116
117/*
118 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
119 */
120static xfs_lsn_t
121xfs_ail_max_lsn(
122 struct xfs_ail *ailp)
123{
124 xfs_lsn_t lsn = 0;
125 xfs_log_item_t *lip;
126
127 spin_lock(&ailp->ail_lock);
128 lip = xfs_ail_max(ailp);
129 if (lip)
130 lsn = lip->li_lsn;
131 spin_unlock(&ailp->ail_lock);
132
133 return lsn;
134}
135
136/*
137 * The cursor keeps track of where our current traversal is up to by tracking
138 * the next item in the list for us. However, for this to be safe, removing an
139 * object from the AIL needs to invalidate any cursor that points to it. hence
140 * the traversal cursor needs to be linked to the struct xfs_ail so that
141 * deletion can search all the active cursors for invalidation.
142 */
143STATIC void
144xfs_trans_ail_cursor_init(
145 struct xfs_ail *ailp,
146 struct xfs_ail_cursor *cur)
147{
148 cur->item = NULL;
149 list_add_tail(&cur->list, &ailp->ail_cursors);
150}
151
152/*
153 * Get the next item in the traversal and advance the cursor. If the cursor
154 * was invalidated (indicated by a lip of 1), restart the traversal.
155 */
156struct xfs_log_item *
157xfs_trans_ail_cursor_next(
158 struct xfs_ail *ailp,
159 struct xfs_ail_cursor *cur)
160{
161 struct xfs_log_item *lip = cur->item;
162
163 if ((uintptr_t)lip & 1)
164 lip = xfs_ail_min(ailp);
165 if (lip)
166 cur->item = xfs_ail_next(ailp, lip);
167 return lip;
168}
169
170/*
171 * When the traversal is complete, we need to remove the cursor from the list
172 * of traversing cursors.
173 */
174void
175xfs_trans_ail_cursor_done(
176 struct xfs_ail_cursor *cur)
177{
178 cur->item = NULL;
179 list_del_init(&cur->list);
180}
181
182/*
183 * Invalidate any cursor that is pointing to this item. This is called when an
184 * item is removed from the AIL. Any cursor pointing to this object is now
185 * invalid and the traversal needs to be terminated so it doesn't reference a
186 * freed object. We set the low bit of the cursor item pointer so we can
187 * distinguish between an invalidation and the end of the list when getting the
188 * next item from the cursor.
189 */
190STATIC void
191xfs_trans_ail_cursor_clear(
192 struct xfs_ail *ailp,
193 struct xfs_log_item *lip)
194{
195 struct xfs_ail_cursor *cur;
196
197 list_for_each_entry(cur, &ailp->ail_cursors, list) {
198 if (cur->item == lip)
199 cur->item = (struct xfs_log_item *)
200 ((uintptr_t)cur->item | 1);
201 }
202}
203
204/*
205 * Find the first item in the AIL with the given @lsn by searching in ascending
206 * LSN order and initialise the cursor to point to the next item for a
207 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
208 * first item in the AIL. Returns NULL if the list is empty.
209 */
210xfs_log_item_t *
211xfs_trans_ail_cursor_first(
212 struct xfs_ail *ailp,
213 struct xfs_ail_cursor *cur,
214 xfs_lsn_t lsn)
215{
216 xfs_log_item_t *lip;
217
218 xfs_trans_ail_cursor_init(ailp, cur);
219
220 if (lsn == 0) {
221 lip = xfs_ail_min(ailp);
222 goto out;
223 }
224
225 list_for_each_entry(lip, &ailp->ail_head, li_ail) {
226 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
227 goto out;
228 }
229 return NULL;
230
231out:
232 if (lip)
233 cur->item = xfs_ail_next(ailp, lip);
234 return lip;
235}
236
237static struct xfs_log_item *
238__xfs_trans_ail_cursor_last(
239 struct xfs_ail *ailp,
240 xfs_lsn_t lsn)
241{
242 xfs_log_item_t *lip;
243
244 list_for_each_entry_reverse(lip, &ailp->ail_head, li_ail) {
245 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
246 return lip;
247 }
248 return NULL;
249}
250
251/*
252 * Find the last item in the AIL with the given @lsn by searching in descending
253 * LSN order and initialise the cursor to point to that item. If there is no
254 * item with the value of @lsn, then it sets the cursor to the last item with an
255 * LSN lower than @lsn. Returns NULL if the list is empty.
256 */
257struct xfs_log_item *
258xfs_trans_ail_cursor_last(
259 struct xfs_ail *ailp,
260 struct xfs_ail_cursor *cur,
261 xfs_lsn_t lsn)
262{
263 xfs_trans_ail_cursor_init(ailp, cur);
264 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
265 return cur->item;
266}
267
268/*
269 * Splice the log item list into the AIL at the given LSN. We splice to the
270 * tail of the given LSN to maintain insert order for push traversals. The
271 * cursor is optional, allowing repeated updates to the same LSN to avoid
272 * repeated traversals. This should not be called with an empty list.
273 */
274static void
275xfs_ail_splice(
276 struct xfs_ail *ailp,
277 struct xfs_ail_cursor *cur,
278 struct list_head *list,
279 xfs_lsn_t lsn)
280{
281 struct xfs_log_item *lip;
282
283 ASSERT(!list_empty(list));
284
285 /*
286 * Use the cursor to determine the insertion point if one is
287 * provided. If not, or if the one we got is not valid,
288 * find the place in the AIL where the items belong.
289 */
290 lip = cur ? cur->item : NULL;
291 if (!lip || (uintptr_t)lip & 1)
292 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
293
294 /*
295 * If a cursor is provided, we know we're processing the AIL
296 * in lsn order, and future items to be spliced in will
297 * follow the last one being inserted now. Update the
298 * cursor to point to that last item, now while we have a
299 * reliable pointer to it.
300 */
301 if (cur)
302 cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
303
304 /*
305 * Finally perform the splice. Unless the AIL was empty,
306 * lip points to the item in the AIL _after_ which the new
307 * items should go. If lip is null the AIL was empty, so
308 * the new items go at the head of the AIL.
309 */
310 if (lip)
311 list_splice(list, &lip->li_ail);
312 else
313 list_splice(list, &ailp->ail_head);
314}
315
316/*
317 * Delete the given item from the AIL. Return a pointer to the item.
318 */
319static void
320xfs_ail_delete(
321 struct xfs_ail *ailp,
322 xfs_log_item_t *lip)
323{
324 xfs_ail_check(ailp, lip);
325 list_del(&lip->li_ail);
326 xfs_trans_ail_cursor_clear(ailp, lip);
327}
328
329static inline uint
330xfsaild_push_item(
331 struct xfs_ail *ailp,
332 struct xfs_log_item *lip)
333{
334 /*
335 * If log item pinning is enabled, skip the push and track the item as
336 * pinned. This can help induce head-behind-tail conditions.
337 */
338 if (XFS_TEST_ERROR(false, ailp->ail_mount, XFS_ERRTAG_LOG_ITEM_PIN))
339 return XFS_ITEM_PINNED;
340
341 return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
342}
343
344static long
345xfsaild_push(
346 struct xfs_ail *ailp)
347{
348 xfs_mount_t *mp = ailp->ail_mount;
349 struct xfs_ail_cursor cur;
350 xfs_log_item_t *lip;
351 xfs_lsn_t lsn;
352 xfs_lsn_t target;
353 long tout;
354 int stuck = 0;
355 int flushing = 0;
356 int count = 0;
357
358 /*
359 * If we encountered pinned items or did not finish writing out all
360 * buffers the last time we ran, force the log first and wait for it
361 * before pushing again.
362 */
363 if (ailp->ail_log_flush && ailp->ail_last_pushed_lsn == 0 &&
364 (!list_empty_careful(&ailp->ail_buf_list) ||
365 xfs_ail_min_lsn(ailp))) {
366 ailp->ail_log_flush = 0;
367
368 XFS_STATS_INC(mp, xs_push_ail_flush);
369 xfs_log_force(mp, XFS_LOG_SYNC);
370 }
371
372 spin_lock(&ailp->ail_lock);
373
374 /* barrier matches the ail_target update in xfs_ail_push() */
375 smp_rmb();
376 target = ailp->ail_target;
377 ailp->ail_target_prev = target;
378
379 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
380 if (!lip) {
381 /*
382 * If the AIL is empty or our push has reached the end we are
383 * done now.
384 */
385 xfs_trans_ail_cursor_done(&cur);
386 spin_unlock(&ailp->ail_lock);
387 goto out_done;
388 }
389
390 XFS_STATS_INC(mp, xs_push_ail);
391
392 lsn = lip->li_lsn;
393 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
394 int lock_result;
395
396 /*
397 * Note that iop_push may unlock and reacquire the AIL lock. We
398 * rely on the AIL cursor implementation to be able to deal with
399 * the dropped lock.
400 */
401 lock_result = xfsaild_push_item(ailp, lip);
402 switch (lock_result) {
403 case XFS_ITEM_SUCCESS:
404 XFS_STATS_INC(mp, xs_push_ail_success);
405 trace_xfs_ail_push(lip);
406
407 ailp->ail_last_pushed_lsn = lsn;
408 break;
409
410 case XFS_ITEM_FLUSHING:
411 /*
412 * The item or its backing buffer is already beeing
413 * flushed. The typical reason for that is that an
414 * inode buffer is locked because we already pushed the
415 * updates to it as part of inode clustering.
416 *
417 * We do not want to to stop flushing just because lots
418 * of items are already beeing flushed, but we need to
419 * re-try the flushing relatively soon if most of the
420 * AIL is beeing flushed.
421 */
422 XFS_STATS_INC(mp, xs_push_ail_flushing);
423 trace_xfs_ail_flushing(lip);
424
425 flushing++;
426 ailp->ail_last_pushed_lsn = lsn;
427 break;
428
429 case XFS_ITEM_PINNED:
430 XFS_STATS_INC(mp, xs_push_ail_pinned);
431 trace_xfs_ail_pinned(lip);
432
433 stuck++;
434 ailp->ail_log_flush++;
435 break;
436 case XFS_ITEM_LOCKED:
437 XFS_STATS_INC(mp, xs_push_ail_locked);
438 trace_xfs_ail_locked(lip);
439
440 stuck++;
441 break;
442 default:
443 ASSERT(0);
444 break;
445 }
446
447 count++;
448
449 /*
450 * Are there too many items we can't do anything with?
451 *
452 * If we we are skipping too many items because we can't flush
453 * them or they are already being flushed, we back off and
454 * given them time to complete whatever operation is being
455 * done. i.e. remove pressure from the AIL while we can't make
456 * progress so traversals don't slow down further inserts and
457 * removals to/from the AIL.
458 *
459 * The value of 100 is an arbitrary magic number based on
460 * observation.
461 */
462 if (stuck > 100)
463 break;
464
465 lip = xfs_trans_ail_cursor_next(ailp, &cur);
466 if (lip == NULL)
467 break;
468 lsn = lip->li_lsn;
469 }
470 xfs_trans_ail_cursor_done(&cur);
471 spin_unlock(&ailp->ail_lock);
472
473 if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
474 ailp->ail_log_flush++;
475
476 if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
477out_done:
478 /*
479 * We reached the target or the AIL is empty, so wait a bit
480 * longer for I/O to complete and remove pushed items from the
481 * AIL before we start the next scan from the start of the AIL.
482 */
483 tout = 50;
484 ailp->ail_last_pushed_lsn = 0;
485 } else if (((stuck + flushing) * 100) / count > 90) {
486 /*
487 * Either there is a lot of contention on the AIL or we are
488 * stuck due to operations in progress. "Stuck" in this case
489 * is defined as >90% of the items we tried to push were stuck.
490 *
491 * Backoff a bit more to allow some I/O to complete before
492 * restarting from the start of the AIL. This prevents us from
493 * spinning on the same items, and if they are pinned will all
494 * the restart to issue a log force to unpin the stuck items.
495 */
496 tout = 20;
497 ailp->ail_last_pushed_lsn = 0;
498 } else {
499 /*
500 * Assume we have more work to do in a short while.
501 */
502 tout = 10;
503 }
504
505 return tout;
506}
507
508static int
509xfsaild(
510 void *data)
511{
512 struct xfs_ail *ailp = data;
513 long tout = 0; /* milliseconds */
514
515 current->flags |= PF_MEMALLOC;
516 set_freezable();
517
518 while (1) {
519 if (tout && tout <= 20)
520 set_current_state(TASK_KILLABLE);
521 else
522 set_current_state(TASK_INTERRUPTIBLE);
523
524 /*
525 * Check kthread_should_stop() after we set the task state
526 * to guarantee that we either see the stop bit and exit or
527 * the task state is reset to runnable such that it's not
528 * scheduled out indefinitely and detects the stop bit at
529 * next iteration.
530 *
531 * A memory barrier is included in above task state set to
532 * serialize again kthread_stop().
533 */
534 if (kthread_should_stop()) {
535 __set_current_state(TASK_RUNNING);
536 break;
537 }
538
539 spin_lock(&ailp->ail_lock);
540
541 /*
542 * Idle if the AIL is empty and we are not racing with a target
543 * update. We check the AIL after we set the task to a sleep
544 * state to guarantee that we either catch an ail_target update
545 * or that a wake_up resets the state to TASK_RUNNING.
546 * Otherwise, we run the risk of sleeping indefinitely.
547 *
548 * The barrier matches the ail_target update in xfs_ail_push().
549 */
550 smp_rmb();
551 if (!xfs_ail_min(ailp) &&
552 ailp->ail_target == ailp->ail_target_prev) {
553 spin_unlock(&ailp->ail_lock);
554 freezable_schedule();
555 tout = 0;
556 continue;
557 }
558 spin_unlock(&ailp->ail_lock);
559
560 if (tout)
561 freezable_schedule_timeout(msecs_to_jiffies(tout));
562
563 __set_current_state(TASK_RUNNING);
564
565 try_to_freeze();
566
567 tout = xfsaild_push(ailp);
568 }
569
570 return 0;
571}
572
573/*
574 * This routine is called to move the tail of the AIL forward. It does this by
575 * trying to flush items in the AIL whose lsns are below the given
576 * threshold_lsn.
577 *
578 * The push is run asynchronously in a workqueue, which means the caller needs
579 * to handle waiting on the async flush for space to become available.
580 * We don't want to interrupt any push that is in progress, hence we only queue
581 * work if we set the pushing bit approriately.
582 *
583 * We do this unlocked - we only need to know whether there is anything in the
584 * AIL at the time we are called. We don't need to access the contents of
585 * any of the objects, so the lock is not needed.
586 */
587void
588xfs_ail_push(
589 struct xfs_ail *ailp,
590 xfs_lsn_t threshold_lsn)
591{
592 xfs_log_item_t *lip;
593
594 lip = xfs_ail_min(ailp);
595 if (!lip || XFS_FORCED_SHUTDOWN(ailp->ail_mount) ||
596 XFS_LSN_CMP(threshold_lsn, ailp->ail_target) <= 0)
597 return;
598
599 /*
600 * Ensure that the new target is noticed in push code before it clears
601 * the XFS_AIL_PUSHING_BIT.
602 */
603 smp_wmb();
604 xfs_trans_ail_copy_lsn(ailp, &ailp->ail_target, &threshold_lsn);
605 smp_wmb();
606
607 wake_up_process(ailp->ail_task);
608}
609
610/*
611 * Push out all items in the AIL immediately
612 */
613void
614xfs_ail_push_all(
615 struct xfs_ail *ailp)
616{
617 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
618
619 if (threshold_lsn)
620 xfs_ail_push(ailp, threshold_lsn);
621}
622
623/*
624 * Push out all items in the AIL immediately and wait until the AIL is empty.
625 */
626void
627xfs_ail_push_all_sync(
628 struct xfs_ail *ailp)
629{
630 struct xfs_log_item *lip;
631 DEFINE_WAIT(wait);
632
633 spin_lock(&ailp->ail_lock);
634 while ((lip = xfs_ail_max(ailp)) != NULL) {
635 prepare_to_wait(&ailp->ail_empty, &wait, TASK_UNINTERRUPTIBLE);
636 ailp->ail_target = lip->li_lsn;
637 wake_up_process(ailp->ail_task);
638 spin_unlock(&ailp->ail_lock);
639 schedule();
640 spin_lock(&ailp->ail_lock);
641 }
642 spin_unlock(&ailp->ail_lock);
643
644 finish_wait(&ailp->ail_empty, &wait);
645}
646
647/*
648 * xfs_trans_ail_update - bulk AIL insertion operation.
649 *
650 * @xfs_trans_ail_update takes an array of log items that all need to be
651 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
652 * be added. Otherwise, it will be repositioned by removing it and re-adding
653 * it to the AIL. If we move the first item in the AIL, update the log tail to
654 * match the new minimum LSN in the AIL.
655 *
656 * This function takes the AIL lock once to execute the update operations on
657 * all the items in the array, and as such should not be called with the AIL
658 * lock held. As a result, once we have the AIL lock, we need to check each log
659 * item LSN to confirm it needs to be moved forward in the AIL.
660 *
661 * To optimise the insert operation, we delete all the items from the AIL in
662 * the first pass, moving them into a temporary list, then splice the temporary
663 * list into the correct position in the AIL. This avoids needing to do an
664 * insert operation on every item.
665 *
666 * This function must be called with the AIL lock held. The lock is dropped
667 * before returning.
668 */
669void
670xfs_trans_ail_update_bulk(
671 struct xfs_ail *ailp,
672 struct xfs_ail_cursor *cur,
673 struct xfs_log_item **log_items,
674 int nr_items,
675 xfs_lsn_t lsn) __releases(ailp->ail_lock)
676{
677 xfs_log_item_t *mlip;
678 int mlip_changed = 0;
679 int i;
680 LIST_HEAD(tmp);
681
682 ASSERT(nr_items > 0); /* Not required, but true. */
683 mlip = xfs_ail_min(ailp);
684
685 for (i = 0; i < nr_items; i++) {
686 struct xfs_log_item *lip = log_items[i];
687 if (lip->li_flags & XFS_LI_IN_AIL) {
688 /* check if we really need to move the item */
689 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
690 continue;
691
692 trace_xfs_ail_move(lip, lip->li_lsn, lsn);
693 xfs_ail_delete(ailp, lip);
694 if (mlip == lip)
695 mlip_changed = 1;
696 } else {
697 lip->li_flags |= XFS_LI_IN_AIL;
698 trace_xfs_ail_insert(lip, 0, lsn);
699 }
700 lip->li_lsn = lsn;
701 list_add(&lip->li_ail, &tmp);
702 }
703
704 if (!list_empty(&tmp))
705 xfs_ail_splice(ailp, cur, &tmp, lsn);
706
707 if (mlip_changed) {
708 if (!XFS_FORCED_SHUTDOWN(ailp->ail_mount))
709 xlog_assign_tail_lsn_locked(ailp->ail_mount);
710 spin_unlock(&ailp->ail_lock);
711
712 xfs_log_space_wake(ailp->ail_mount);
713 } else {
714 spin_unlock(&ailp->ail_lock);
715 }
716}
717
718bool
719xfs_ail_delete_one(
720 struct xfs_ail *ailp,
721 struct xfs_log_item *lip)
722{
723 struct xfs_log_item *mlip = xfs_ail_min(ailp);
724
725 trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
726 xfs_ail_delete(ailp, lip);
727 xfs_clear_li_failed(lip);
728 lip->li_flags &= ~XFS_LI_IN_AIL;
729 lip->li_lsn = 0;
730
731 return mlip == lip;
732}
733
734/**
735 * Remove a log items from the AIL
736 *
737 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
738 * removed from the AIL. The caller is already holding the AIL lock, and done
739 * all the checks necessary to ensure the items passed in via @log_items are
740 * ready for deletion. This includes checking that the items are in the AIL.
741 *
742 * For each log item to be removed, unlink it from the AIL, clear the IN_AIL
743 * flag from the item and reset the item's lsn to 0. If we remove the first
744 * item in the AIL, update the log tail to match the new minimum LSN in the
745 * AIL.
746 *
747 * This function will not drop the AIL lock until all items are removed from
748 * the AIL to minimise the amount of lock traffic on the AIL. This does not
749 * greatly increase the AIL hold time, but does significantly reduce the amount
750 * of traffic on the lock, especially during IO completion.
751 *
752 * This function must be called with the AIL lock held. The lock is dropped
753 * before returning.
754 */
755void
756xfs_trans_ail_delete(
757 struct xfs_ail *ailp,
758 struct xfs_log_item *lip,
759 int shutdown_type) __releases(ailp->ail_lock)
760{
761 struct xfs_mount *mp = ailp->ail_mount;
762 bool mlip_changed;
763
764 if (!(lip->li_flags & XFS_LI_IN_AIL)) {
765 spin_unlock(&ailp->ail_lock);
766 if (!XFS_FORCED_SHUTDOWN(mp)) {
767 xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
768 "%s: attempting to delete a log item that is not in the AIL",
769 __func__);
770 xfs_force_shutdown(mp, shutdown_type);
771 }
772 return;
773 }
774
775 mlip_changed = xfs_ail_delete_one(ailp, lip);
776 if (mlip_changed) {
777 if (!XFS_FORCED_SHUTDOWN(mp))
778 xlog_assign_tail_lsn_locked(mp);
779 if (list_empty(&ailp->ail_head))
780 wake_up_all(&ailp->ail_empty);
781 }
782
783 spin_unlock(&ailp->ail_lock);
784 if (mlip_changed)
785 xfs_log_space_wake(ailp->ail_mount);
786}
787
788int
789xfs_trans_ail_init(
790 xfs_mount_t *mp)
791{
792 struct xfs_ail *ailp;
793
794 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
795 if (!ailp)
796 return -ENOMEM;
797
798 ailp->ail_mount = mp;
799 INIT_LIST_HEAD(&ailp->ail_head);
800 INIT_LIST_HEAD(&ailp->ail_cursors);
801 spin_lock_init(&ailp->ail_lock);
802 INIT_LIST_HEAD(&ailp->ail_buf_list);
803 init_waitqueue_head(&ailp->ail_empty);
804
805 ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
806 ailp->ail_mount->m_fsname);
807 if (IS_ERR(ailp->ail_task))
808 goto out_free_ailp;
809
810 mp->m_ail = ailp;
811 return 0;
812
813out_free_ailp:
814 kmem_free(ailp);
815 return -ENOMEM;
816}
817
818void
819xfs_trans_ail_destroy(
820 xfs_mount_t *mp)
821{
822 struct xfs_ail *ailp = mp->m_ail;
823
824 kthread_stop(ailp->ail_task);
825 kmem_free(ailp);
826}
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2008 Dave Chinner
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
21#include "xfs_types.h"
22#include "xfs_log.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h"
27#include "xfs_trans_priv.h"
28#include "xfs_trace.h"
29#include "xfs_error.h"
30
31#ifdef DEBUG
32/*
33 * Check that the list is sorted as it should be.
34 */
35STATIC void
36xfs_ail_check(
37 struct xfs_ail *ailp,
38 xfs_log_item_t *lip)
39{
40 xfs_log_item_t *prev_lip;
41
42 if (list_empty(&ailp->xa_ail))
43 return;
44
45 /*
46 * Check the next and previous entries are valid.
47 */
48 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
49 prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
50 if (&prev_lip->li_ail != &ailp->xa_ail)
51 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
52
53 prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
54 if (&prev_lip->li_ail != &ailp->xa_ail)
55 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
56
57
58#ifdef XFS_TRANS_DEBUG
59 /*
60 * Walk the list checking lsn ordering, and that every entry has the
61 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
62 * when specifically debugging the transaction subsystem.
63 */
64 prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
65 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
66 if (&prev_lip->li_ail != &ailp->xa_ail)
67 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
68 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
69 prev_lip = lip;
70 }
71#endif /* XFS_TRANS_DEBUG */
72}
73#else /* !DEBUG */
74#define xfs_ail_check(a,l)
75#endif /* DEBUG */
76
77/*
78 * Return a pointer to the first item in the AIL. If the AIL is empty, then
79 * return NULL.
80 */
81xfs_log_item_t *
82xfs_ail_min(
83 struct xfs_ail *ailp)
84{
85 if (list_empty(&ailp->xa_ail))
86 return NULL;
87
88 return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
89}
90
91 /*
92 * Return a pointer to the last item in the AIL. If the AIL is empty, then
93 * return NULL.
94 */
95static xfs_log_item_t *
96xfs_ail_max(
97 struct xfs_ail *ailp)
98{
99 if (list_empty(&ailp->xa_ail))
100 return NULL;
101
102 return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
103}
104
105/*
106 * Return a pointer to the item which follows the given item in the AIL. If
107 * the given item is the last item in the list, then return NULL.
108 */
109static xfs_log_item_t *
110xfs_ail_next(
111 struct xfs_ail *ailp,
112 xfs_log_item_t *lip)
113{
114 if (lip->li_ail.next == &ailp->xa_ail)
115 return NULL;
116
117 return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
118}
119
120/*
121 * This is called by the log manager code to determine the LSN of the tail of
122 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
123 * is empty, then this function returns 0.
124 *
125 * We need the AIL lock in order to get a coherent read of the lsn of the last
126 * item in the AIL.
127 */
128xfs_lsn_t
129xfs_ail_min_lsn(
130 struct xfs_ail *ailp)
131{
132 xfs_lsn_t lsn = 0;
133 xfs_log_item_t *lip;
134
135 spin_lock(&ailp->xa_lock);
136 lip = xfs_ail_min(ailp);
137 if (lip)
138 lsn = lip->li_lsn;
139 spin_unlock(&ailp->xa_lock);
140
141 return lsn;
142}
143
144/*
145 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
146 */
147static xfs_lsn_t
148xfs_ail_max_lsn(
149 struct xfs_ail *ailp)
150{
151 xfs_lsn_t lsn = 0;
152 xfs_log_item_t *lip;
153
154 spin_lock(&ailp->xa_lock);
155 lip = xfs_ail_max(ailp);
156 if (lip)
157 lsn = lip->li_lsn;
158 spin_unlock(&ailp->xa_lock);
159
160 return lsn;
161}
162
163/*
164 * The cursor keeps track of where our current traversal is up to by tracking
165 * the next item in the list for us. However, for this to be safe, removing an
166 * object from the AIL needs to invalidate any cursor that points to it. hence
167 * the traversal cursor needs to be linked to the struct xfs_ail so that
168 * deletion can search all the active cursors for invalidation.
169 */
170STATIC void
171xfs_trans_ail_cursor_init(
172 struct xfs_ail *ailp,
173 struct xfs_ail_cursor *cur)
174{
175 cur->item = NULL;
176 list_add_tail(&cur->list, &ailp->xa_cursors);
177}
178
179/*
180 * Get the next item in the traversal and advance the cursor. If the cursor
181 * was invalidated (indicated by a lip of 1), restart the traversal.
182 */
183struct xfs_log_item *
184xfs_trans_ail_cursor_next(
185 struct xfs_ail *ailp,
186 struct xfs_ail_cursor *cur)
187{
188 struct xfs_log_item *lip = cur->item;
189
190 if ((__psint_t)lip & 1)
191 lip = xfs_ail_min(ailp);
192 if (lip)
193 cur->item = xfs_ail_next(ailp, lip);
194 return lip;
195}
196
197/*
198 * When the traversal is complete, we need to remove the cursor from the list
199 * of traversing cursors.
200 */
201void
202xfs_trans_ail_cursor_done(
203 struct xfs_ail *ailp,
204 struct xfs_ail_cursor *cur)
205{
206 cur->item = NULL;
207 list_del_init(&cur->list);
208}
209
210/*
211 * Invalidate any cursor that is pointing to this item. This is called when an
212 * item is removed from the AIL. Any cursor pointing to this object is now
213 * invalid and the traversal needs to be terminated so it doesn't reference a
214 * freed object. We set the low bit of the cursor item pointer so we can
215 * distinguish between an invalidation and the end of the list when getting the
216 * next item from the cursor.
217 */
218STATIC void
219xfs_trans_ail_cursor_clear(
220 struct xfs_ail *ailp,
221 struct xfs_log_item *lip)
222{
223 struct xfs_ail_cursor *cur;
224
225 list_for_each_entry(cur, &ailp->xa_cursors, list) {
226 if (cur->item == lip)
227 cur->item = (struct xfs_log_item *)
228 ((__psint_t)cur->item | 1);
229 }
230}
231
232/*
233 * Find the first item in the AIL with the given @lsn by searching in ascending
234 * LSN order and initialise the cursor to point to the next item for a
235 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
236 * first item in the AIL. Returns NULL if the list is empty.
237 */
238xfs_log_item_t *
239xfs_trans_ail_cursor_first(
240 struct xfs_ail *ailp,
241 struct xfs_ail_cursor *cur,
242 xfs_lsn_t lsn)
243{
244 xfs_log_item_t *lip;
245
246 xfs_trans_ail_cursor_init(ailp, cur);
247
248 if (lsn == 0) {
249 lip = xfs_ail_min(ailp);
250 goto out;
251 }
252
253 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
254 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
255 goto out;
256 }
257 return NULL;
258
259out:
260 if (lip)
261 cur->item = xfs_ail_next(ailp, lip);
262 return lip;
263}
264
265static struct xfs_log_item *
266__xfs_trans_ail_cursor_last(
267 struct xfs_ail *ailp,
268 xfs_lsn_t lsn)
269{
270 xfs_log_item_t *lip;
271
272 list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
273 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
274 return lip;
275 }
276 return NULL;
277}
278
279/*
280 * Find the last item in the AIL with the given @lsn by searching in descending
281 * LSN order and initialise the cursor to point to that item. If there is no
282 * item with the value of @lsn, then it sets the cursor to the last item with an
283 * LSN lower than @lsn. Returns NULL if the list is empty.
284 */
285struct xfs_log_item *
286xfs_trans_ail_cursor_last(
287 struct xfs_ail *ailp,
288 struct xfs_ail_cursor *cur,
289 xfs_lsn_t lsn)
290{
291 xfs_trans_ail_cursor_init(ailp, cur);
292 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
293 return cur->item;
294}
295
296/*
297 * Splice the log item list into the AIL at the given LSN. We splice to the
298 * tail of the given LSN to maintain insert order for push traversals. The
299 * cursor is optional, allowing repeated updates to the same LSN to avoid
300 * repeated traversals. This should not be called with an empty list.
301 */
302static void
303xfs_ail_splice(
304 struct xfs_ail *ailp,
305 struct xfs_ail_cursor *cur,
306 struct list_head *list,
307 xfs_lsn_t lsn)
308{
309 struct xfs_log_item *lip;
310
311 ASSERT(!list_empty(list));
312
313 /*
314 * Use the cursor to determine the insertion point if one is
315 * provided. If not, or if the one we got is not valid,
316 * find the place in the AIL where the items belong.
317 */
318 lip = cur ? cur->item : NULL;
319 if (!lip || (__psint_t) lip & 1)
320 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
321
322 /*
323 * If a cursor is provided, we know we're processing the AIL
324 * in lsn order, and future items to be spliced in will
325 * follow the last one being inserted now. Update the
326 * cursor to point to that last item, now while we have a
327 * reliable pointer to it.
328 */
329 if (cur)
330 cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
331
332 /*
333 * Finally perform the splice. Unless the AIL was empty,
334 * lip points to the item in the AIL _after_ which the new
335 * items should go. If lip is null the AIL was empty, so
336 * the new items go at the head of the AIL.
337 */
338 if (lip)
339 list_splice(list, &lip->li_ail);
340 else
341 list_splice(list, &ailp->xa_ail);
342}
343
344/*
345 * Delete the given item from the AIL. Return a pointer to the item.
346 */
347static void
348xfs_ail_delete(
349 struct xfs_ail *ailp,
350 xfs_log_item_t *lip)
351{
352 xfs_ail_check(ailp, lip);
353 list_del(&lip->li_ail);
354 xfs_trans_ail_cursor_clear(ailp, lip);
355}
356
357static long
358xfsaild_push(
359 struct xfs_ail *ailp)
360{
361 xfs_mount_t *mp = ailp->xa_mount;
362 struct xfs_ail_cursor cur;
363 xfs_log_item_t *lip;
364 xfs_lsn_t lsn;
365 xfs_lsn_t target;
366 long tout;
367 int stuck = 0;
368 int flushing = 0;
369 int count = 0;
370
371 /*
372 * If we encountered pinned items or did not finish writing out all
373 * buffers the last time we ran, force the log first and wait for it
374 * before pushing again.
375 */
376 if (ailp->xa_log_flush && ailp->xa_last_pushed_lsn == 0 &&
377 (!list_empty_careful(&ailp->xa_buf_list) ||
378 xfs_ail_min_lsn(ailp))) {
379 ailp->xa_log_flush = 0;
380
381 XFS_STATS_INC(xs_push_ail_flush);
382 xfs_log_force(mp, XFS_LOG_SYNC);
383 }
384
385 spin_lock(&ailp->xa_lock);
386 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
387 if (!lip) {
388 /*
389 * If the AIL is empty or our push has reached the end we are
390 * done now.
391 */
392 xfs_trans_ail_cursor_done(ailp, &cur);
393 spin_unlock(&ailp->xa_lock);
394 goto out_done;
395 }
396
397 XFS_STATS_INC(xs_push_ail);
398
399 lsn = lip->li_lsn;
400 target = ailp->xa_target;
401 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
402 int lock_result;
403
404 /*
405 * Note that IOP_PUSH may unlock and reacquire the AIL lock. We
406 * rely on the AIL cursor implementation to be able to deal with
407 * the dropped lock.
408 */
409 lock_result = IOP_PUSH(lip, &ailp->xa_buf_list);
410 switch (lock_result) {
411 case XFS_ITEM_SUCCESS:
412 XFS_STATS_INC(xs_push_ail_success);
413 trace_xfs_ail_push(lip);
414
415 ailp->xa_last_pushed_lsn = lsn;
416 break;
417
418 case XFS_ITEM_FLUSHING:
419 /*
420 * The item or its backing buffer is already beeing
421 * flushed. The typical reason for that is that an
422 * inode buffer is locked because we already pushed the
423 * updates to it as part of inode clustering.
424 *
425 * We do not want to to stop flushing just because lots
426 * of items are already beeing flushed, but we need to
427 * re-try the flushing relatively soon if most of the
428 * AIL is beeing flushed.
429 */
430 XFS_STATS_INC(xs_push_ail_flushing);
431 trace_xfs_ail_flushing(lip);
432
433 flushing++;
434 ailp->xa_last_pushed_lsn = lsn;
435 break;
436
437 case XFS_ITEM_PINNED:
438 XFS_STATS_INC(xs_push_ail_pinned);
439 trace_xfs_ail_pinned(lip);
440
441 stuck++;
442 ailp->xa_log_flush++;
443 break;
444 case XFS_ITEM_LOCKED:
445 XFS_STATS_INC(xs_push_ail_locked);
446 trace_xfs_ail_locked(lip);
447
448 stuck++;
449 break;
450 default:
451 ASSERT(0);
452 break;
453 }
454
455 count++;
456
457 /*
458 * Are there too many items we can't do anything with?
459 *
460 * If we we are skipping too many items because we can't flush
461 * them or they are already being flushed, we back off and
462 * given them time to complete whatever operation is being
463 * done. i.e. remove pressure from the AIL while we can't make
464 * progress so traversals don't slow down further inserts and
465 * removals to/from the AIL.
466 *
467 * The value of 100 is an arbitrary magic number based on
468 * observation.
469 */
470 if (stuck > 100)
471 break;
472
473 lip = xfs_trans_ail_cursor_next(ailp, &cur);
474 if (lip == NULL)
475 break;
476 lsn = lip->li_lsn;
477 }
478 xfs_trans_ail_cursor_done(ailp, &cur);
479 spin_unlock(&ailp->xa_lock);
480
481 if (xfs_buf_delwri_submit_nowait(&ailp->xa_buf_list))
482 ailp->xa_log_flush++;
483
484 if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
485out_done:
486 /*
487 * We reached the target or the AIL is empty, so wait a bit
488 * longer for I/O to complete and remove pushed items from the
489 * AIL before we start the next scan from the start of the AIL.
490 */
491 tout = 50;
492 ailp->xa_last_pushed_lsn = 0;
493 } else if (((stuck + flushing) * 100) / count > 90) {
494 /*
495 * Either there is a lot of contention on the AIL or we are
496 * stuck due to operations in progress. "Stuck" in this case
497 * is defined as >90% of the items we tried to push were stuck.
498 *
499 * Backoff a bit more to allow some I/O to complete before
500 * restarting from the start of the AIL. This prevents us from
501 * spinning on the same items, and if they are pinned will all
502 * the restart to issue a log force to unpin the stuck items.
503 */
504 tout = 20;
505 ailp->xa_last_pushed_lsn = 0;
506 } else {
507 /*
508 * Assume we have more work to do in a short while.
509 */
510 tout = 10;
511 }
512
513 return tout;
514}
515
516static int
517xfsaild(
518 void *data)
519{
520 struct xfs_ail *ailp = data;
521 long tout = 0; /* milliseconds */
522
523 current->flags |= PF_MEMALLOC;
524
525 while (!kthread_should_stop()) {
526 if (tout && tout <= 20)
527 __set_current_state(TASK_KILLABLE);
528 else
529 __set_current_state(TASK_INTERRUPTIBLE);
530 schedule_timeout(tout ?
531 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
532
533 try_to_freeze();
534
535 tout = xfsaild_push(ailp);
536 }
537
538 return 0;
539}
540
541/*
542 * This routine is called to move the tail of the AIL forward. It does this by
543 * trying to flush items in the AIL whose lsns are below the given
544 * threshold_lsn.
545 *
546 * The push is run asynchronously in a workqueue, which means the caller needs
547 * to handle waiting on the async flush for space to become available.
548 * We don't want to interrupt any push that is in progress, hence we only queue
549 * work if we set the pushing bit approriately.
550 *
551 * We do this unlocked - we only need to know whether there is anything in the
552 * AIL at the time we are called. We don't need to access the contents of
553 * any of the objects, so the lock is not needed.
554 */
555void
556xfs_ail_push(
557 struct xfs_ail *ailp,
558 xfs_lsn_t threshold_lsn)
559{
560 xfs_log_item_t *lip;
561
562 lip = xfs_ail_min(ailp);
563 if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
564 XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
565 return;
566
567 /*
568 * Ensure that the new target is noticed in push code before it clears
569 * the XFS_AIL_PUSHING_BIT.
570 */
571 smp_wmb();
572 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
573 smp_wmb();
574
575 wake_up_process(ailp->xa_task);
576}
577
578/*
579 * Push out all items in the AIL immediately
580 */
581void
582xfs_ail_push_all(
583 struct xfs_ail *ailp)
584{
585 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
586
587 if (threshold_lsn)
588 xfs_ail_push(ailp, threshold_lsn);
589}
590
591/*
592 * Push out all items in the AIL immediately and wait until the AIL is empty.
593 */
594void
595xfs_ail_push_all_sync(
596 struct xfs_ail *ailp)
597{
598 struct xfs_log_item *lip;
599 DEFINE_WAIT(wait);
600
601 spin_lock(&ailp->xa_lock);
602 while ((lip = xfs_ail_max(ailp)) != NULL) {
603 prepare_to_wait(&ailp->xa_empty, &wait, TASK_UNINTERRUPTIBLE);
604 ailp->xa_target = lip->li_lsn;
605 wake_up_process(ailp->xa_task);
606 spin_unlock(&ailp->xa_lock);
607 schedule();
608 spin_lock(&ailp->xa_lock);
609 }
610 spin_unlock(&ailp->xa_lock);
611
612 finish_wait(&ailp->xa_empty, &wait);
613}
614
615/*
616 * xfs_trans_ail_update - bulk AIL insertion operation.
617 *
618 * @xfs_trans_ail_update takes an array of log items that all need to be
619 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
620 * be added. Otherwise, it will be repositioned by removing it and re-adding
621 * it to the AIL. If we move the first item in the AIL, update the log tail to
622 * match the new minimum LSN in the AIL.
623 *
624 * This function takes the AIL lock once to execute the update operations on
625 * all the items in the array, and as such should not be called with the AIL
626 * lock held. As a result, once we have the AIL lock, we need to check each log
627 * item LSN to confirm it needs to be moved forward in the AIL.
628 *
629 * To optimise the insert operation, we delete all the items from the AIL in
630 * the first pass, moving them into a temporary list, then splice the temporary
631 * list into the correct position in the AIL. This avoids needing to do an
632 * insert operation on every item.
633 *
634 * This function must be called with the AIL lock held. The lock is dropped
635 * before returning.
636 */
637void
638xfs_trans_ail_update_bulk(
639 struct xfs_ail *ailp,
640 struct xfs_ail_cursor *cur,
641 struct xfs_log_item **log_items,
642 int nr_items,
643 xfs_lsn_t lsn) __releases(ailp->xa_lock)
644{
645 xfs_log_item_t *mlip;
646 int mlip_changed = 0;
647 int i;
648 LIST_HEAD(tmp);
649
650 ASSERT(nr_items > 0); /* Not required, but true. */
651 mlip = xfs_ail_min(ailp);
652
653 for (i = 0; i < nr_items; i++) {
654 struct xfs_log_item *lip = log_items[i];
655 if (lip->li_flags & XFS_LI_IN_AIL) {
656 /* check if we really need to move the item */
657 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
658 continue;
659
660 xfs_ail_delete(ailp, lip);
661 if (mlip == lip)
662 mlip_changed = 1;
663 } else {
664 lip->li_flags |= XFS_LI_IN_AIL;
665 }
666 lip->li_lsn = lsn;
667 list_add(&lip->li_ail, &tmp);
668 }
669
670 if (!list_empty(&tmp))
671 xfs_ail_splice(ailp, cur, &tmp, lsn);
672
673 if (mlip_changed) {
674 if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
675 xlog_assign_tail_lsn_locked(ailp->xa_mount);
676 spin_unlock(&ailp->xa_lock);
677
678 xfs_log_space_wake(ailp->xa_mount);
679 } else {
680 spin_unlock(&ailp->xa_lock);
681 }
682}
683
684/*
685 * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
686 *
687 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
688 * removed from the AIL. The caller is already holding the AIL lock, and done
689 * all the checks necessary to ensure the items passed in via @log_items are
690 * ready for deletion. This includes checking that the items are in the AIL.
691 *
692 * For each log item to be removed, unlink it from the AIL, clear the IN_AIL
693 * flag from the item and reset the item's lsn to 0. If we remove the first
694 * item in the AIL, update the log tail to match the new minimum LSN in the
695 * AIL.
696 *
697 * This function will not drop the AIL lock until all items are removed from
698 * the AIL to minimise the amount of lock traffic on the AIL. This does not
699 * greatly increase the AIL hold time, but does significantly reduce the amount
700 * of traffic on the lock, especially during IO completion.
701 *
702 * This function must be called with the AIL lock held. The lock is dropped
703 * before returning.
704 */
705void
706xfs_trans_ail_delete_bulk(
707 struct xfs_ail *ailp,
708 struct xfs_log_item **log_items,
709 int nr_items,
710 int shutdown_type) __releases(ailp->xa_lock)
711{
712 xfs_log_item_t *mlip;
713 int mlip_changed = 0;
714 int i;
715
716 mlip = xfs_ail_min(ailp);
717
718 for (i = 0; i < nr_items; i++) {
719 struct xfs_log_item *lip = log_items[i];
720 if (!(lip->li_flags & XFS_LI_IN_AIL)) {
721 struct xfs_mount *mp = ailp->xa_mount;
722
723 spin_unlock(&ailp->xa_lock);
724 if (!XFS_FORCED_SHUTDOWN(mp)) {
725 xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
726 "%s: attempting to delete a log item that is not in the AIL",
727 __func__);
728 xfs_force_shutdown(mp, shutdown_type);
729 }
730 return;
731 }
732
733 xfs_ail_delete(ailp, lip);
734 lip->li_flags &= ~XFS_LI_IN_AIL;
735 lip->li_lsn = 0;
736 if (mlip == lip)
737 mlip_changed = 1;
738 }
739
740 if (mlip_changed) {
741 if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
742 xlog_assign_tail_lsn_locked(ailp->xa_mount);
743 if (list_empty(&ailp->xa_ail))
744 wake_up_all(&ailp->xa_empty);
745 spin_unlock(&ailp->xa_lock);
746
747 xfs_log_space_wake(ailp->xa_mount);
748 } else {
749 spin_unlock(&ailp->xa_lock);
750 }
751}
752
753int
754xfs_trans_ail_init(
755 xfs_mount_t *mp)
756{
757 struct xfs_ail *ailp;
758
759 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
760 if (!ailp)
761 return ENOMEM;
762
763 ailp->xa_mount = mp;
764 INIT_LIST_HEAD(&ailp->xa_ail);
765 INIT_LIST_HEAD(&ailp->xa_cursors);
766 spin_lock_init(&ailp->xa_lock);
767 INIT_LIST_HEAD(&ailp->xa_buf_list);
768 init_waitqueue_head(&ailp->xa_empty);
769
770 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
771 ailp->xa_mount->m_fsname);
772 if (IS_ERR(ailp->xa_task))
773 goto out_free_ailp;
774
775 mp->m_ail = ailp;
776 return 0;
777
778out_free_ailp:
779 kmem_free(ailp);
780 return ENOMEM;
781}
782
783void
784xfs_trans_ail_destroy(
785 xfs_mount_t *mp)
786{
787 struct xfs_ail *ailp = mp->m_ail;
788
789 kthread_stop(ailp->xa_task);
790 kmem_free(ailp);
791}