Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28#include "xfs_buf_item.h"
29#include "xfs_trans_priv.h"
30#include "xfs_error.h"
31#include "xfs_trace.h"
32
33
34kmem_zone_t *xfs_buf_item_zone;
35
36static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
37{
38 return container_of(lip, struct xfs_buf_log_item, bli_item);
39}
40
41
42#ifdef XFS_TRANS_DEBUG
43/*
44 * This function uses an alternate strategy for tracking the bytes
45 * that the user requests to be logged. This can then be used
46 * in conjunction with the bli_orig array in the buf log item to
47 * catch bugs in our callers' code.
48 *
49 * We also double check the bits set in xfs_buf_item_log using a
50 * simple algorithm to check that every byte is accounted for.
51 */
52STATIC void
53xfs_buf_item_log_debug(
54 xfs_buf_log_item_t *bip,
55 uint first,
56 uint last)
57{
58 uint x;
59 uint byte;
60 uint nbytes;
61 uint chunk_num;
62 uint word_num;
63 uint bit_num;
64 uint bit_set;
65 uint *wordp;
66
67 ASSERT(bip->bli_logged != NULL);
68 byte = first;
69 nbytes = last - first + 1;
70 bfset(bip->bli_logged, first, nbytes);
71 for (x = 0; x < nbytes; x++) {
72 chunk_num = byte >> XFS_BLF_SHIFT;
73 word_num = chunk_num >> BIT_TO_WORD_SHIFT;
74 bit_num = chunk_num & (NBWORD - 1);
75 wordp = &(bip->bli_format.blf_data_map[word_num]);
76 bit_set = *wordp & (1 << bit_num);
77 ASSERT(bit_set);
78 byte++;
79 }
80}
81
82/*
83 * This function is called when we flush something into a buffer without
84 * logging it. This happens for things like inodes which are logged
85 * separately from the buffer.
86 */
87void
88xfs_buf_item_flush_log_debug(
89 xfs_buf_t *bp,
90 uint first,
91 uint last)
92{
93 xfs_buf_log_item_t *bip = bp->b_fspriv;
94 uint nbytes;
95
96 if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
97 return;
98
99 ASSERT(bip->bli_logged != NULL);
100 nbytes = last - first + 1;
101 bfset(bip->bli_logged, first, nbytes);
102}
103
104/*
105 * This function is called to verify that our callers have logged
106 * all the bytes that they changed.
107 *
108 * It does this by comparing the original copy of the buffer stored in
109 * the buf log item's bli_orig array to the current copy of the buffer
110 * and ensuring that all bytes which mismatch are set in the bli_logged
111 * array of the buf log item.
112 */
113STATIC void
114xfs_buf_item_log_check(
115 xfs_buf_log_item_t *bip)
116{
117 char *orig;
118 char *buffer;
119 int x;
120 xfs_buf_t *bp;
121
122 ASSERT(bip->bli_orig != NULL);
123 ASSERT(bip->bli_logged != NULL);
124
125 bp = bip->bli_buf;
126 ASSERT(XFS_BUF_COUNT(bp) > 0);
127 ASSERT(bp->b_addr != NULL);
128 orig = bip->bli_orig;
129 buffer = bp->b_addr;
130 for (x = 0; x < XFS_BUF_COUNT(bp); x++) {
131 if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
132 xfs_emerg(bp->b_mount,
133 "%s: bip %x buffer %x orig %x index %d",
134 __func__, bip, bp, orig, x);
135 ASSERT(0);
136 }
137 }
138}
139#else
140#define xfs_buf_item_log_debug(x,y,z)
141#define xfs_buf_item_log_check(x)
142#endif
143
144STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
145
146/*
147 * This returns the number of log iovecs needed to log the
148 * given buf log item.
149 *
150 * It calculates this as 1 iovec for the buf log format structure
151 * and 1 for each stretch of non-contiguous chunks to be logged.
152 * Contiguous chunks are logged in a single iovec.
153 *
154 * If the XFS_BLI_STALE flag has been set, then log nothing.
155 */
156STATIC uint
157xfs_buf_item_size(
158 struct xfs_log_item *lip)
159{
160 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
161 struct xfs_buf *bp = bip->bli_buf;
162 uint nvecs;
163 int next_bit;
164 int last_bit;
165
166 ASSERT(atomic_read(&bip->bli_refcount) > 0);
167 if (bip->bli_flags & XFS_BLI_STALE) {
168 /*
169 * The buffer is stale, so all we need to log
170 * is the buf log format structure with the
171 * cancel flag in it.
172 */
173 trace_xfs_buf_item_size_stale(bip);
174 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
175 return 1;
176 }
177
178 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
179 nvecs = 1;
180 last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
181 bip->bli_format.blf_map_size, 0);
182 ASSERT(last_bit != -1);
183 nvecs++;
184 while (last_bit != -1) {
185 /*
186 * This takes the bit number to start looking from and
187 * returns the next set bit from there. It returns -1
188 * if there are no more bits set or the start bit is
189 * beyond the end of the bitmap.
190 */
191 next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
192 bip->bli_format.blf_map_size,
193 last_bit + 1);
194 /*
195 * If we run out of bits, leave the loop,
196 * else if we find a new set of bits bump the number of vecs,
197 * else keep scanning the current set of bits.
198 */
199 if (next_bit == -1) {
200 last_bit = -1;
201 } else if (next_bit != last_bit + 1) {
202 last_bit = next_bit;
203 nvecs++;
204 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
205 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
206 XFS_BLF_CHUNK)) {
207 last_bit = next_bit;
208 nvecs++;
209 } else {
210 last_bit++;
211 }
212 }
213
214 trace_xfs_buf_item_size(bip);
215 return nvecs;
216}
217
218/*
219 * This is called to fill in the vector of log iovecs for the
220 * given log buf item. It fills the first entry with a buf log
221 * format structure, and the rest point to contiguous chunks
222 * within the buffer.
223 */
224STATIC void
225xfs_buf_item_format(
226 struct xfs_log_item *lip,
227 struct xfs_log_iovec *vecp)
228{
229 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
230 struct xfs_buf *bp = bip->bli_buf;
231 uint base_size;
232 uint nvecs;
233 int first_bit;
234 int last_bit;
235 int next_bit;
236 uint nbits;
237 uint buffer_offset;
238
239 ASSERT(atomic_read(&bip->bli_refcount) > 0);
240 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
241 (bip->bli_flags & XFS_BLI_STALE));
242
243 /*
244 * The size of the base structure is the size of the
245 * declared structure plus the space for the extra words
246 * of the bitmap. We subtract one from the map size, because
247 * the first element of the bitmap is accounted for in the
248 * size of the base structure.
249 */
250 base_size =
251 (uint)(sizeof(xfs_buf_log_format_t) +
252 ((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
253 vecp->i_addr = &bip->bli_format;
254 vecp->i_len = base_size;
255 vecp->i_type = XLOG_REG_TYPE_BFORMAT;
256 vecp++;
257 nvecs = 1;
258
259 /*
260 * If it is an inode buffer, transfer the in-memory state to the
261 * format flags and clear the in-memory state. We do not transfer
262 * this state if the inode buffer allocation has not yet been committed
263 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
264 * correct replay of the inode allocation.
265 */
266 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
267 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
268 xfs_log_item_in_current_chkpt(lip)))
269 bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
270 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
271 }
272
273 if (bip->bli_flags & XFS_BLI_STALE) {
274 /*
275 * The buffer is stale, so all we need to log
276 * is the buf log format structure with the
277 * cancel flag in it.
278 */
279 trace_xfs_buf_item_format_stale(bip);
280 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
281 bip->bli_format.blf_size = nvecs;
282 return;
283 }
284
285 /*
286 * Fill in an iovec for each set of contiguous chunks.
287 */
288 first_bit = xfs_next_bit(bip->bli_format.blf_data_map,
289 bip->bli_format.blf_map_size, 0);
290 ASSERT(first_bit != -1);
291 last_bit = first_bit;
292 nbits = 1;
293 for (;;) {
294 /*
295 * This takes the bit number to start looking from and
296 * returns the next set bit from there. It returns -1
297 * if there are no more bits set or the start bit is
298 * beyond the end of the bitmap.
299 */
300 next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
301 bip->bli_format.blf_map_size,
302 (uint)last_bit + 1);
303 /*
304 * If we run out of bits fill in the last iovec and get
305 * out of the loop.
306 * Else if we start a new set of bits then fill in the
307 * iovec for the series we were looking at and start
308 * counting the bits in the new one.
309 * Else we're still in the same set of bits so just
310 * keep counting and scanning.
311 */
312 if (next_bit == -1) {
313 buffer_offset = first_bit * XFS_BLF_CHUNK;
314 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
315 vecp->i_len = nbits * XFS_BLF_CHUNK;
316 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
317 nvecs++;
318 break;
319 } else if (next_bit != last_bit + 1) {
320 buffer_offset = first_bit * XFS_BLF_CHUNK;
321 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
322 vecp->i_len = nbits * XFS_BLF_CHUNK;
323 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
324 nvecs++;
325 vecp++;
326 first_bit = next_bit;
327 last_bit = next_bit;
328 nbits = 1;
329 } else if (xfs_buf_offset(bp, next_bit << XFS_BLF_SHIFT) !=
330 (xfs_buf_offset(bp, last_bit << XFS_BLF_SHIFT) +
331 XFS_BLF_CHUNK)) {
332 buffer_offset = first_bit * XFS_BLF_CHUNK;
333 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
334 vecp->i_len = nbits * XFS_BLF_CHUNK;
335 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
336/* You would think we need to bump the nvecs here too, but we do not
337 * this number is used by recovery, and it gets confused by the boundary
338 * split here
339 * nvecs++;
340 */
341 vecp++;
342 first_bit = next_bit;
343 last_bit = next_bit;
344 nbits = 1;
345 } else {
346 last_bit++;
347 nbits++;
348 }
349 }
350 bip->bli_format.blf_size = nvecs;
351
352 /*
353 * Check to make sure everything is consistent.
354 */
355 trace_xfs_buf_item_format(bip);
356 xfs_buf_item_log_check(bip);
357}
358
359/*
360 * This is called to pin the buffer associated with the buf log item in memory
361 * so it cannot be written out.
362 *
363 * We also always take a reference to the buffer log item here so that the bli
364 * is held while the item is pinned in memory. This means that we can
365 * unconditionally drop the reference count a transaction holds when the
366 * transaction is completed.
367 */
368STATIC void
369xfs_buf_item_pin(
370 struct xfs_log_item *lip)
371{
372 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
373
374 ASSERT(atomic_read(&bip->bli_refcount) > 0);
375 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
376 (bip->bli_flags & XFS_BLI_STALE));
377
378 trace_xfs_buf_item_pin(bip);
379
380 atomic_inc(&bip->bli_refcount);
381 atomic_inc(&bip->bli_buf->b_pin_count);
382}
383
384/*
385 * This is called to unpin the buffer associated with the buf log
386 * item which was previously pinned with a call to xfs_buf_item_pin().
387 *
388 * Also drop the reference to the buf item for the current transaction.
389 * If the XFS_BLI_STALE flag is set and we are the last reference,
390 * then free up the buf log item and unlock the buffer.
391 *
392 * If the remove flag is set we are called from uncommit in the
393 * forced-shutdown path. If that is true and the reference count on
394 * the log item is going to drop to zero we need to free the item's
395 * descriptor in the transaction.
396 */
397STATIC void
398xfs_buf_item_unpin(
399 struct xfs_log_item *lip,
400 int remove)
401{
402 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
403 xfs_buf_t *bp = bip->bli_buf;
404 struct xfs_ail *ailp = lip->li_ailp;
405 int stale = bip->bli_flags & XFS_BLI_STALE;
406 int freed;
407
408 ASSERT(bp->b_fspriv == bip);
409 ASSERT(atomic_read(&bip->bli_refcount) > 0);
410
411 trace_xfs_buf_item_unpin(bip);
412
413 freed = atomic_dec_and_test(&bip->bli_refcount);
414
415 if (atomic_dec_and_test(&bp->b_pin_count))
416 wake_up_all(&bp->b_waiters);
417
418 if (freed && stale) {
419 ASSERT(bip->bli_flags & XFS_BLI_STALE);
420 ASSERT(xfs_buf_islocked(bp));
421 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
422 ASSERT(XFS_BUF_ISSTALE(bp));
423 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
424
425 trace_xfs_buf_item_unpin_stale(bip);
426
427 if (remove) {
428 /*
429 * If we are in a transaction context, we have to
430 * remove the log item from the transaction as we are
431 * about to release our reference to the buffer. If we
432 * don't, the unlock that occurs later in
433 * xfs_trans_uncommit() will try to reference the
434 * buffer which we no longer have a hold on.
435 */
436 if (lip->li_desc)
437 xfs_trans_del_item(lip);
438
439 /*
440 * Since the transaction no longer refers to the buffer,
441 * the buffer should no longer refer to the transaction.
442 */
443 bp->b_transp = NULL;
444 }
445
446 /*
447 * If we get called here because of an IO error, we may
448 * or may not have the item on the AIL. xfs_trans_ail_delete()
449 * will take care of that situation.
450 * xfs_trans_ail_delete() drops the AIL lock.
451 */
452 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
453 xfs_buf_do_callbacks(bp);
454 bp->b_fspriv = NULL;
455 bp->b_iodone = NULL;
456 } else {
457 spin_lock(&ailp->xa_lock);
458 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
459 xfs_buf_item_relse(bp);
460 ASSERT(bp->b_fspriv == NULL);
461 }
462 xfs_buf_relse(bp);
463 }
464}
465
466/*
467 * This is called to attempt to lock the buffer associated with this
468 * buf log item. Don't sleep on the buffer lock. If we can't get
469 * the lock right away, return 0. If we can get the lock, take a
470 * reference to the buffer. If this is a delayed write buffer that
471 * needs AIL help to be written back, invoke the pushbuf routine
472 * rather than the normal success path.
473 */
474STATIC uint
475xfs_buf_item_trylock(
476 struct xfs_log_item *lip)
477{
478 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
479 struct xfs_buf *bp = bip->bli_buf;
480
481 if (xfs_buf_ispinned(bp))
482 return XFS_ITEM_PINNED;
483 if (!xfs_buf_trylock(bp))
484 return XFS_ITEM_LOCKED;
485
486 /* take a reference to the buffer. */
487 xfs_buf_hold(bp);
488
489 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
490 trace_xfs_buf_item_trylock(bip);
491 if (XFS_BUF_ISDELAYWRITE(bp))
492 return XFS_ITEM_PUSHBUF;
493 return XFS_ITEM_SUCCESS;
494}
495
496/*
497 * Release the buffer associated with the buf log item. If there is no dirty
498 * logged data associated with the buffer recorded in the buf log item, then
499 * free the buf log item and remove the reference to it in the buffer.
500 *
501 * This call ignores the recursion count. It is only called when the buffer
502 * should REALLY be unlocked, regardless of the recursion count.
503 *
504 * We unconditionally drop the transaction's reference to the log item. If the
505 * item was logged, then another reference was taken when it was pinned, so we
506 * can safely drop the transaction reference now. This also allows us to avoid
507 * potential races with the unpin code freeing the bli by not referencing the
508 * bli after we've dropped the reference count.
509 *
510 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
511 * if necessary but do not unlock the buffer. This is for support of
512 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
513 * free the item.
514 */
515STATIC void
516xfs_buf_item_unlock(
517 struct xfs_log_item *lip)
518{
519 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
520 struct xfs_buf *bp = bip->bli_buf;
521 int aborted;
522 uint hold;
523
524 /* Clear the buffer's association with this transaction. */
525 bp->b_transp = NULL;
526
527 /*
528 * If this is a transaction abort, don't return early. Instead, allow
529 * the brelse to happen. Normally it would be done for stale
530 * (cancelled) buffers at unpin time, but we'll never go through the
531 * pin/unpin cycle if we abort inside commit.
532 */
533 aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
534
535 /*
536 * Before possibly freeing the buf item, determine if we should
537 * release the buffer at the end of this routine.
538 */
539 hold = bip->bli_flags & XFS_BLI_HOLD;
540
541 /* Clear the per transaction state. */
542 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
543
544 /*
545 * If the buf item is marked stale, then don't do anything. We'll
546 * unlock the buffer and free the buf item when the buffer is unpinned
547 * for the last time.
548 */
549 if (bip->bli_flags & XFS_BLI_STALE) {
550 trace_xfs_buf_item_unlock_stale(bip);
551 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
552 if (!aborted) {
553 atomic_dec(&bip->bli_refcount);
554 return;
555 }
556 }
557
558 trace_xfs_buf_item_unlock(bip);
559
560 /*
561 * If the buf item isn't tracking any data, free it, otherwise drop the
562 * reference we hold to it.
563 */
564 if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
565 bip->bli_format.blf_map_size))
566 xfs_buf_item_relse(bp);
567 else
568 atomic_dec(&bip->bli_refcount);
569
570 if (!hold)
571 xfs_buf_relse(bp);
572}
573
574/*
575 * This is called to find out where the oldest active copy of the
576 * buf log item in the on disk log resides now that the last log
577 * write of it completed at the given lsn.
578 * We always re-log all the dirty data in a buffer, so usually the
579 * latest copy in the on disk log is the only one that matters. For
580 * those cases we simply return the given lsn.
581 *
582 * The one exception to this is for buffers full of newly allocated
583 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
584 * flag set, indicating that only the di_next_unlinked fields from the
585 * inodes in the buffers will be replayed during recovery. If the
586 * original newly allocated inode images have not yet been flushed
587 * when the buffer is so relogged, then we need to make sure that we
588 * keep the old images in the 'active' portion of the log. We do this
589 * by returning the original lsn of that transaction here rather than
590 * the current one.
591 */
592STATIC xfs_lsn_t
593xfs_buf_item_committed(
594 struct xfs_log_item *lip,
595 xfs_lsn_t lsn)
596{
597 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
598
599 trace_xfs_buf_item_committed(bip);
600
601 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
602 return lip->li_lsn;
603 return lsn;
604}
605
606/*
607 * The buffer is locked, but is not a delayed write buffer. This happens
608 * if we race with IO completion and hence we don't want to try to write it
609 * again. Just release the buffer.
610 */
611STATIC void
612xfs_buf_item_push(
613 struct xfs_log_item *lip)
614{
615 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
616 struct xfs_buf *bp = bip->bli_buf;
617
618 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
619 ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
620
621 trace_xfs_buf_item_push(bip);
622
623 xfs_buf_relse(bp);
624}
625
626/*
627 * The buffer is locked and is a delayed write buffer. Promote the buffer
628 * in the delayed write queue as the caller knows that they must invoke
629 * the xfsbufd to get this buffer written. We have to unlock the buffer
630 * to allow the xfsbufd to write it, too.
631 */
632STATIC bool
633xfs_buf_item_pushbuf(
634 struct xfs_log_item *lip)
635{
636 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
637 struct xfs_buf *bp = bip->bli_buf;
638
639 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
640 ASSERT(XFS_BUF_ISDELAYWRITE(bp));
641
642 trace_xfs_buf_item_pushbuf(bip);
643
644 xfs_buf_delwri_promote(bp);
645 xfs_buf_relse(bp);
646 return true;
647}
648
649STATIC void
650xfs_buf_item_committing(
651 struct xfs_log_item *lip,
652 xfs_lsn_t commit_lsn)
653{
654}
655
656/*
657 * This is the ops vector shared by all buf log items.
658 */
659static struct xfs_item_ops xfs_buf_item_ops = {
660 .iop_size = xfs_buf_item_size,
661 .iop_format = xfs_buf_item_format,
662 .iop_pin = xfs_buf_item_pin,
663 .iop_unpin = xfs_buf_item_unpin,
664 .iop_trylock = xfs_buf_item_trylock,
665 .iop_unlock = xfs_buf_item_unlock,
666 .iop_committed = xfs_buf_item_committed,
667 .iop_push = xfs_buf_item_push,
668 .iop_pushbuf = xfs_buf_item_pushbuf,
669 .iop_committing = xfs_buf_item_committing
670};
671
672
673/*
674 * Allocate a new buf log item to go with the given buffer.
675 * Set the buffer's b_fsprivate field to point to the new
676 * buf log item. If there are other item's attached to the
677 * buffer (see xfs_buf_attach_iodone() below), then put the
678 * buf log item at the front.
679 */
680void
681xfs_buf_item_init(
682 xfs_buf_t *bp,
683 xfs_mount_t *mp)
684{
685 xfs_log_item_t *lip = bp->b_fspriv;
686 xfs_buf_log_item_t *bip;
687 int chunks;
688 int map_size;
689
690 /*
691 * Check to see if there is already a buf log item for
692 * this buffer. If there is, it is guaranteed to be
693 * the first. If we do already have one, there is
694 * nothing to do here so return.
695 */
696 ASSERT(bp->b_target->bt_mount == mp);
697 if (lip != NULL && lip->li_type == XFS_LI_BUF)
698 return;
699
700 /*
701 * chunks is the number of XFS_BLF_CHUNK size pieces
702 * the buffer can be divided into. Make sure not to
703 * truncate any pieces. map_size is the size of the
704 * bitmap needed to describe the chunks of the buffer.
705 */
706 chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLF_CHUNK - 1)) >> XFS_BLF_SHIFT);
707 map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
708
709 bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
710 KM_SLEEP);
711 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
712 bip->bli_buf = bp;
713 xfs_buf_hold(bp);
714 bip->bli_format.blf_type = XFS_LI_BUF;
715 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
716 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
717 bip->bli_format.blf_map_size = map_size;
718
719#ifdef XFS_TRANS_DEBUG
720 /*
721 * Allocate the arrays for tracking what needs to be logged
722 * and what our callers request to be logged. bli_orig
723 * holds a copy of the original, clean buffer for comparison
724 * against, and bli_logged keeps a 1 bit flag per byte in
725 * the buffer to indicate which bytes the callers have asked
726 * to have logged.
727 */
728 bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP);
729 memcpy(bip->bli_orig, bp->b_addr, XFS_BUF_COUNT(bp));
730 bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP);
731#endif
732
733 /*
734 * Put the buf item into the list of items attached to the
735 * buffer at the front.
736 */
737 if (bp->b_fspriv)
738 bip->bli_item.li_bio_list = bp->b_fspriv;
739 bp->b_fspriv = bip;
740}
741
742
743/*
744 * Mark bytes first through last inclusive as dirty in the buf
745 * item's bitmap.
746 */
747void
748xfs_buf_item_log(
749 xfs_buf_log_item_t *bip,
750 uint first,
751 uint last)
752{
753 uint first_bit;
754 uint last_bit;
755 uint bits_to_set;
756 uint bits_set;
757 uint word_num;
758 uint *wordp;
759 uint bit;
760 uint end_bit;
761 uint mask;
762
763 /*
764 * Mark the item as having some dirty data for
765 * quick reference in xfs_buf_item_dirty.
766 */
767 bip->bli_flags |= XFS_BLI_DIRTY;
768
769 /*
770 * Convert byte offsets to bit numbers.
771 */
772 first_bit = first >> XFS_BLF_SHIFT;
773 last_bit = last >> XFS_BLF_SHIFT;
774
775 /*
776 * Calculate the total number of bits to be set.
777 */
778 bits_to_set = last_bit - first_bit + 1;
779
780 /*
781 * Get a pointer to the first word in the bitmap
782 * to set a bit in.
783 */
784 word_num = first_bit >> BIT_TO_WORD_SHIFT;
785 wordp = &(bip->bli_format.blf_data_map[word_num]);
786
787 /*
788 * Calculate the starting bit in the first word.
789 */
790 bit = first_bit & (uint)(NBWORD - 1);
791
792 /*
793 * First set any bits in the first word of our range.
794 * If it starts at bit 0 of the word, it will be
795 * set below rather than here. That is what the variable
796 * bit tells us. The variable bits_set tracks the number
797 * of bits that have been set so far. End_bit is the number
798 * of the last bit to be set in this word plus one.
799 */
800 if (bit) {
801 end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
802 mask = ((1 << (end_bit - bit)) - 1) << bit;
803 *wordp |= mask;
804 wordp++;
805 bits_set = end_bit - bit;
806 } else {
807 bits_set = 0;
808 }
809
810 /*
811 * Now set bits a whole word at a time that are between
812 * first_bit and last_bit.
813 */
814 while ((bits_to_set - bits_set) >= NBWORD) {
815 *wordp |= 0xffffffff;
816 bits_set += NBWORD;
817 wordp++;
818 }
819
820 /*
821 * Finally, set any bits left to be set in one last partial word.
822 */
823 end_bit = bits_to_set - bits_set;
824 if (end_bit) {
825 mask = (1 << end_bit) - 1;
826 *wordp |= mask;
827 }
828
829 xfs_buf_item_log_debug(bip, first, last);
830}
831
832
833/*
834 * Return 1 if the buffer has some data that has been logged (at any
835 * point, not just the current transaction) and 0 if not.
836 */
837uint
838xfs_buf_item_dirty(
839 xfs_buf_log_item_t *bip)
840{
841 return (bip->bli_flags & XFS_BLI_DIRTY);
842}
843
844STATIC void
845xfs_buf_item_free(
846 xfs_buf_log_item_t *bip)
847{
848#ifdef XFS_TRANS_DEBUG
849 kmem_free(bip->bli_orig);
850 kmem_free(bip->bli_logged);
851#endif /* XFS_TRANS_DEBUG */
852
853 kmem_zone_free(xfs_buf_item_zone, bip);
854}
855
856/*
857 * This is called when the buf log item is no longer needed. It should
858 * free the buf log item associated with the given buffer and clear
859 * the buffer's pointer to the buf log item. If there are no more
860 * items in the list, clear the b_iodone field of the buffer (see
861 * xfs_buf_attach_iodone() below).
862 */
863void
864xfs_buf_item_relse(
865 xfs_buf_t *bp)
866{
867 xfs_buf_log_item_t *bip;
868
869 trace_xfs_buf_item_relse(bp, _RET_IP_);
870
871 bip = bp->b_fspriv;
872 bp->b_fspriv = bip->bli_item.li_bio_list;
873 if (bp->b_fspriv == NULL)
874 bp->b_iodone = NULL;
875
876 xfs_buf_rele(bp);
877 xfs_buf_item_free(bip);
878}
879
880
881/*
882 * Add the given log item with its callback to the list of callbacks
883 * to be called when the buffer's I/O completes. If it is not set
884 * already, set the buffer's b_iodone() routine to be
885 * xfs_buf_iodone_callbacks() and link the log item into the list of
886 * items rooted at b_fsprivate. Items are always added as the second
887 * entry in the list if there is a first, because the buf item code
888 * assumes that the buf log item is first.
889 */
890void
891xfs_buf_attach_iodone(
892 xfs_buf_t *bp,
893 void (*cb)(xfs_buf_t *, xfs_log_item_t *),
894 xfs_log_item_t *lip)
895{
896 xfs_log_item_t *head_lip;
897
898 ASSERT(xfs_buf_islocked(bp));
899
900 lip->li_cb = cb;
901 head_lip = bp->b_fspriv;
902 if (head_lip) {
903 lip->li_bio_list = head_lip->li_bio_list;
904 head_lip->li_bio_list = lip;
905 } else {
906 bp->b_fspriv = lip;
907 }
908
909 ASSERT(bp->b_iodone == NULL ||
910 bp->b_iodone == xfs_buf_iodone_callbacks);
911 bp->b_iodone = xfs_buf_iodone_callbacks;
912}
913
914/*
915 * We can have many callbacks on a buffer. Running the callbacks individually
916 * can cause a lot of contention on the AIL lock, so we allow for a single
917 * callback to be able to scan the remaining lip->li_bio_list for other items
918 * of the same type and callback to be processed in the first call.
919 *
920 * As a result, the loop walking the callback list below will also modify the
921 * list. it removes the first item from the list and then runs the callback.
922 * The loop then restarts from the new head of the list. This allows the
923 * callback to scan and modify the list attached to the buffer and we don't
924 * have to care about maintaining a next item pointer.
925 */
926STATIC void
927xfs_buf_do_callbacks(
928 struct xfs_buf *bp)
929{
930 struct xfs_log_item *lip;
931
932 while ((lip = bp->b_fspriv) != NULL) {
933 bp->b_fspriv = lip->li_bio_list;
934 ASSERT(lip->li_cb != NULL);
935 /*
936 * Clear the next pointer so we don't have any
937 * confusion if the item is added to another buf.
938 * Don't touch the log item after calling its
939 * callback, because it could have freed itself.
940 */
941 lip->li_bio_list = NULL;
942 lip->li_cb(bp, lip);
943 }
944}
945
946/*
947 * This is the iodone() function for buffers which have had callbacks
948 * attached to them by xfs_buf_attach_iodone(). It should remove each
949 * log item from the buffer's list and call the callback of each in turn.
950 * When done, the buffer's fsprivate field is set to NULL and the buffer
951 * is unlocked with a call to iodone().
952 */
953void
954xfs_buf_iodone_callbacks(
955 struct xfs_buf *bp)
956{
957 struct xfs_log_item *lip = bp->b_fspriv;
958 struct xfs_mount *mp = lip->li_mountp;
959 static ulong lasttime;
960 static xfs_buftarg_t *lasttarg;
961
962 if (likely(!xfs_buf_geterror(bp)))
963 goto do_callbacks;
964
965 /*
966 * If we've already decided to shutdown the filesystem because of
967 * I/O errors, there's no point in giving this a retry.
968 */
969 if (XFS_FORCED_SHUTDOWN(mp)) {
970 XFS_BUF_SUPER_STALE(bp);
971 trace_xfs_buf_item_iodone(bp, _RET_IP_);
972 goto do_callbacks;
973 }
974
975 if (bp->b_target != lasttarg ||
976 time_after(jiffies, (lasttime + 5*HZ))) {
977 lasttime = jiffies;
978 xfs_alert(mp, "Device %s: metadata write error block 0x%llx",
979 xfs_buf_target_name(bp->b_target),
980 (__uint64_t)XFS_BUF_ADDR(bp));
981 }
982 lasttarg = bp->b_target;
983
984 /*
985 * If the write was asynchronous then no one will be looking for the
986 * error. Clear the error state and write the buffer out again.
987 *
988 * During sync or umount we'll write all pending buffers again
989 * synchronous, which will catch these errors if they keep hanging
990 * around.
991 */
992 if (XFS_BUF_ISASYNC(bp)) {
993 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
994
995 if (!XFS_BUF_ISSTALE(bp)) {
996 XFS_BUF_DELAYWRITE(bp);
997 XFS_BUF_DONE(bp);
998 }
999 ASSERT(bp->b_iodone != NULL);
1000 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1001 xfs_buf_relse(bp);
1002 return;
1003 }
1004
1005 /*
1006 * If the write of the buffer was synchronous, we want to make
1007 * sure to return the error to the caller of xfs_bwrite().
1008 */
1009 XFS_BUF_STALE(bp);
1010 XFS_BUF_DONE(bp);
1011 XFS_BUF_UNDELAYWRITE(bp);
1012
1013 trace_xfs_buf_error_relse(bp, _RET_IP_);
1014
1015do_callbacks:
1016 xfs_buf_do_callbacks(bp);
1017 bp->b_fspriv = NULL;
1018 bp->b_iodone = NULL;
1019 xfs_buf_ioend(bp, 0);
1020}
1021
1022/*
1023 * This is the iodone() function for buffers which have been
1024 * logged. It is called when they are eventually flushed out.
1025 * It should remove the buf item from the AIL, and free the buf item.
1026 * It is called by xfs_buf_iodone_callbacks() above which will take
1027 * care of cleaning up the buffer itself.
1028 */
1029void
1030xfs_buf_iodone(
1031 struct xfs_buf *bp,
1032 struct xfs_log_item *lip)
1033{
1034 struct xfs_ail *ailp = lip->li_ailp;
1035
1036 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1037
1038 xfs_buf_rele(bp);
1039
1040 /*
1041 * If we are forcibly shutting down, this may well be
1042 * off the AIL already. That's because we simulate the
1043 * log-committed callbacks to unpin these buffers. Or we may never
1044 * have put this item on AIL because of the transaction was
1045 * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1046 *
1047 * Either way, AIL is useless if we're forcing a shutdown.
1048 */
1049 spin_lock(&ailp->xa_lock);
1050 xfs_trans_ail_delete(ailp, lip);
1051 xfs_buf_item_free(BUF_ITEM(lip));
1052}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_buf_item.h"
17#include "xfs_inode.h"
18#include "xfs_inode_item.h"
19#include "xfs_quota.h"
20#include "xfs_dquot_item.h"
21#include "xfs_dquot.h"
22#include "xfs_trace.h"
23#include "xfs_log.h"
24
25
26kmem_zone_t *xfs_buf_item_zone;
27
28static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
29{
30 return container_of(lip, struct xfs_buf_log_item, bli_item);
31}
32
33static void xfs_buf_item_done(struct xfs_buf *bp);
34
35/* Is this log iovec plausibly large enough to contain the buffer log format? */
36bool
37xfs_buf_log_check_iovec(
38 struct xfs_log_iovec *iovec)
39{
40 struct xfs_buf_log_format *blfp = iovec->i_addr;
41 char *bmp_end;
42 char *item_end;
43
44 if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
45 return false;
46
47 item_end = (char *)iovec->i_addr + iovec->i_len;
48 bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
49 return bmp_end <= item_end;
50}
51
52static inline int
53xfs_buf_log_format_size(
54 struct xfs_buf_log_format *blfp)
55{
56 return offsetof(struct xfs_buf_log_format, blf_data_map) +
57 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
58}
59
60/*
61 * This returns the number of log iovecs needed to log the
62 * given buf log item.
63 *
64 * It calculates this as 1 iovec for the buf log format structure
65 * and 1 for each stretch of non-contiguous chunks to be logged.
66 * Contiguous chunks are logged in a single iovec.
67 *
68 * If the XFS_BLI_STALE flag has been set, then log nothing.
69 */
70STATIC void
71xfs_buf_item_size_segment(
72 struct xfs_buf_log_item *bip,
73 struct xfs_buf_log_format *blfp,
74 int *nvecs,
75 int *nbytes)
76{
77 struct xfs_buf *bp = bip->bli_buf;
78 int next_bit;
79 int last_bit;
80
81 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
82 if (last_bit == -1)
83 return;
84
85 /*
86 * initial count for a dirty buffer is 2 vectors - the format structure
87 * and the first dirty region.
88 */
89 *nvecs += 2;
90 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
91
92 while (last_bit != -1) {
93 /*
94 * This takes the bit number to start looking from and
95 * returns the next set bit from there. It returns -1
96 * if there are no more bits set or the start bit is
97 * beyond the end of the bitmap.
98 */
99 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
100 last_bit + 1);
101 /*
102 * If we run out of bits, leave the loop,
103 * else if we find a new set of bits bump the number of vecs,
104 * else keep scanning the current set of bits.
105 */
106 if (next_bit == -1) {
107 break;
108 } else if (next_bit != last_bit + 1) {
109 last_bit = next_bit;
110 (*nvecs)++;
111 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
112 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
113 XFS_BLF_CHUNK)) {
114 last_bit = next_bit;
115 (*nvecs)++;
116 } else {
117 last_bit++;
118 }
119 *nbytes += XFS_BLF_CHUNK;
120 }
121}
122
123/*
124 * This returns the number of log iovecs needed to log the given buf log item.
125 *
126 * It calculates this as 1 iovec for the buf log format structure and 1 for each
127 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
128 * in a single iovec.
129 *
130 * Discontiguous buffers need a format structure per region that is being
131 * logged. This makes the changes in the buffer appear to log recovery as though
132 * they came from separate buffers, just like would occur if multiple buffers
133 * were used instead of a single discontiguous buffer. This enables
134 * discontiguous buffers to be in-memory constructs, completely transparent to
135 * what ends up on disk.
136 *
137 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
138 * format structures.
139 */
140STATIC void
141xfs_buf_item_size(
142 struct xfs_log_item *lip,
143 int *nvecs,
144 int *nbytes)
145{
146 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
147 int i;
148
149 ASSERT(atomic_read(&bip->bli_refcount) > 0);
150 if (bip->bli_flags & XFS_BLI_STALE) {
151 /*
152 * The buffer is stale, so all we need to log
153 * is the buf log format structure with the
154 * cancel flag in it.
155 */
156 trace_xfs_buf_item_size_stale(bip);
157 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
158 *nvecs += bip->bli_format_count;
159 for (i = 0; i < bip->bli_format_count; i++) {
160 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
161 }
162 return;
163 }
164
165 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
166
167 if (bip->bli_flags & XFS_BLI_ORDERED) {
168 /*
169 * The buffer has been logged just to order it.
170 * It is not being included in the transaction
171 * commit, so no vectors are used at all.
172 */
173 trace_xfs_buf_item_size_ordered(bip);
174 *nvecs = XFS_LOG_VEC_ORDERED;
175 return;
176 }
177
178 /*
179 * the vector count is based on the number of buffer vectors we have
180 * dirty bits in. This will only be greater than one when we have a
181 * compound buffer with more than one segment dirty. Hence for compound
182 * buffers we need to track which segment the dirty bits correspond to,
183 * and when we move from one segment to the next increment the vector
184 * count for the extra buf log format structure that will need to be
185 * written.
186 */
187 for (i = 0; i < bip->bli_format_count; i++) {
188 xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
189 nvecs, nbytes);
190 }
191 trace_xfs_buf_item_size(bip);
192}
193
194static inline void
195xfs_buf_item_copy_iovec(
196 struct xfs_log_vec *lv,
197 struct xfs_log_iovec **vecp,
198 struct xfs_buf *bp,
199 uint offset,
200 int first_bit,
201 uint nbits)
202{
203 offset += first_bit * XFS_BLF_CHUNK;
204 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
205 xfs_buf_offset(bp, offset),
206 nbits * XFS_BLF_CHUNK);
207}
208
209static inline bool
210xfs_buf_item_straddle(
211 struct xfs_buf *bp,
212 uint offset,
213 int next_bit,
214 int last_bit)
215{
216 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
217 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
218 XFS_BLF_CHUNK);
219}
220
221static void
222xfs_buf_item_format_segment(
223 struct xfs_buf_log_item *bip,
224 struct xfs_log_vec *lv,
225 struct xfs_log_iovec **vecp,
226 uint offset,
227 struct xfs_buf_log_format *blfp)
228{
229 struct xfs_buf *bp = bip->bli_buf;
230 uint base_size;
231 int first_bit;
232 int last_bit;
233 int next_bit;
234 uint nbits;
235
236 /* copy the flags across from the base format item */
237 blfp->blf_flags = bip->__bli_format.blf_flags;
238
239 /*
240 * Base size is the actual size of the ondisk structure - it reflects
241 * the actual size of the dirty bitmap rather than the size of the in
242 * memory structure.
243 */
244 base_size = xfs_buf_log_format_size(blfp);
245
246 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
247 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
248 /*
249 * If the map is not be dirty in the transaction, mark
250 * the size as zero and do not advance the vector pointer.
251 */
252 return;
253 }
254
255 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
256 blfp->blf_size = 1;
257
258 if (bip->bli_flags & XFS_BLI_STALE) {
259 /*
260 * The buffer is stale, so all we need to log
261 * is the buf log format structure with the
262 * cancel flag in it.
263 */
264 trace_xfs_buf_item_format_stale(bip);
265 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
266 return;
267 }
268
269
270 /*
271 * Fill in an iovec for each set of contiguous chunks.
272 */
273 last_bit = first_bit;
274 nbits = 1;
275 for (;;) {
276 /*
277 * This takes the bit number to start looking from and
278 * returns the next set bit from there. It returns -1
279 * if there are no more bits set or the start bit is
280 * beyond the end of the bitmap.
281 */
282 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
283 (uint)last_bit + 1);
284 /*
285 * If we run out of bits fill in the last iovec and get out of
286 * the loop. Else if we start a new set of bits then fill in
287 * the iovec for the series we were looking at and start
288 * counting the bits in the new one. Else we're still in the
289 * same set of bits so just keep counting and scanning.
290 */
291 if (next_bit == -1) {
292 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
293 first_bit, nbits);
294 blfp->blf_size++;
295 break;
296 } else if (next_bit != last_bit + 1 ||
297 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
298 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
299 first_bit, nbits);
300 blfp->blf_size++;
301 first_bit = next_bit;
302 last_bit = next_bit;
303 nbits = 1;
304 } else {
305 last_bit++;
306 nbits++;
307 }
308 }
309}
310
311/*
312 * This is called to fill in the vector of log iovecs for the
313 * given log buf item. It fills the first entry with a buf log
314 * format structure, and the rest point to contiguous chunks
315 * within the buffer.
316 */
317STATIC void
318xfs_buf_item_format(
319 struct xfs_log_item *lip,
320 struct xfs_log_vec *lv)
321{
322 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
323 struct xfs_buf *bp = bip->bli_buf;
324 struct xfs_log_iovec *vecp = NULL;
325 uint offset = 0;
326 int i;
327
328 ASSERT(atomic_read(&bip->bli_refcount) > 0);
329 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
330 (bip->bli_flags & XFS_BLI_STALE));
331 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
332 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
333 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
334 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
335 (bip->bli_flags & XFS_BLI_STALE));
336
337
338 /*
339 * If it is an inode buffer, transfer the in-memory state to the
340 * format flags and clear the in-memory state.
341 *
342 * For buffer based inode allocation, we do not transfer
343 * this state if the inode buffer allocation has not yet been committed
344 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
345 * correct replay of the inode allocation.
346 *
347 * For icreate item based inode allocation, the buffers aren't written
348 * to the journal during allocation, and hence we should always tag the
349 * buffer as an inode buffer so that the correct unlinked list replay
350 * occurs during recovery.
351 */
352 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
353 if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) ||
354 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
355 xfs_log_item_in_current_chkpt(lip)))
356 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
357 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
358 }
359
360 for (i = 0; i < bip->bli_format_count; i++) {
361 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
362 &bip->bli_formats[i]);
363 offset += BBTOB(bp->b_maps[i].bm_len);
364 }
365
366 /*
367 * Check to make sure everything is consistent.
368 */
369 trace_xfs_buf_item_format(bip);
370}
371
372/*
373 * This is called to pin the buffer associated with the buf log item in memory
374 * so it cannot be written out.
375 *
376 * We also always take a reference to the buffer log item here so that the bli
377 * is held while the item is pinned in memory. This means that we can
378 * unconditionally drop the reference count a transaction holds when the
379 * transaction is completed.
380 */
381STATIC void
382xfs_buf_item_pin(
383 struct xfs_log_item *lip)
384{
385 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
386
387 ASSERT(atomic_read(&bip->bli_refcount) > 0);
388 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
389 (bip->bli_flags & XFS_BLI_ORDERED) ||
390 (bip->bli_flags & XFS_BLI_STALE));
391
392 trace_xfs_buf_item_pin(bip);
393
394 atomic_inc(&bip->bli_refcount);
395 atomic_inc(&bip->bli_buf->b_pin_count);
396}
397
398/*
399 * This is called to unpin the buffer associated with the buf log
400 * item which was previously pinned with a call to xfs_buf_item_pin().
401 *
402 * Also drop the reference to the buf item for the current transaction.
403 * If the XFS_BLI_STALE flag is set and we are the last reference,
404 * then free up the buf log item and unlock the buffer.
405 *
406 * If the remove flag is set we are called from uncommit in the
407 * forced-shutdown path. If that is true and the reference count on
408 * the log item is going to drop to zero we need to free the item's
409 * descriptor in the transaction.
410 */
411STATIC void
412xfs_buf_item_unpin(
413 struct xfs_log_item *lip,
414 int remove)
415{
416 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
417 xfs_buf_t *bp = bip->bli_buf;
418 int stale = bip->bli_flags & XFS_BLI_STALE;
419 int freed;
420
421 ASSERT(bp->b_log_item == bip);
422 ASSERT(atomic_read(&bip->bli_refcount) > 0);
423
424 trace_xfs_buf_item_unpin(bip);
425
426 freed = atomic_dec_and_test(&bip->bli_refcount);
427
428 if (atomic_dec_and_test(&bp->b_pin_count))
429 wake_up_all(&bp->b_waiters);
430
431 if (freed && stale) {
432 ASSERT(bip->bli_flags & XFS_BLI_STALE);
433 ASSERT(xfs_buf_islocked(bp));
434 ASSERT(bp->b_flags & XBF_STALE);
435 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
436
437 trace_xfs_buf_item_unpin_stale(bip);
438
439 if (remove) {
440 /*
441 * If we are in a transaction context, we have to
442 * remove the log item from the transaction as we are
443 * about to release our reference to the buffer. If we
444 * don't, the unlock that occurs later in
445 * xfs_trans_uncommit() will try to reference the
446 * buffer which we no longer have a hold on.
447 */
448 if (!list_empty(&lip->li_trans))
449 xfs_trans_del_item(lip);
450
451 /*
452 * Since the transaction no longer refers to the buffer,
453 * the buffer should no longer refer to the transaction.
454 */
455 bp->b_transp = NULL;
456 }
457
458 /*
459 * If we get called here because of an IO error, we may or may
460 * not have the item on the AIL. xfs_trans_ail_delete() will
461 * take care of that situation. xfs_trans_ail_delete() drops
462 * the AIL lock.
463 */
464 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
465 xfs_buf_item_done(bp);
466 xfs_iflush_done(bp);
467 ASSERT(list_empty(&bp->b_li_list));
468 } else {
469 xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
470 xfs_buf_item_relse(bp);
471 ASSERT(bp->b_log_item == NULL);
472 }
473 xfs_buf_relse(bp);
474 } else if (freed && remove) {
475 /*
476 * The buffer must be locked and held by the caller to simulate
477 * an async I/O failure.
478 */
479 xfs_buf_lock(bp);
480 xfs_buf_hold(bp);
481 bp->b_flags |= XBF_ASYNC;
482 xfs_buf_ioend_fail(bp);
483 }
484}
485
486STATIC uint
487xfs_buf_item_push(
488 struct xfs_log_item *lip,
489 struct list_head *buffer_list)
490{
491 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
492 struct xfs_buf *bp = bip->bli_buf;
493 uint rval = XFS_ITEM_SUCCESS;
494
495 if (xfs_buf_ispinned(bp))
496 return XFS_ITEM_PINNED;
497 if (!xfs_buf_trylock(bp)) {
498 /*
499 * If we have just raced with a buffer being pinned and it has
500 * been marked stale, we could end up stalling until someone else
501 * issues a log force to unpin the stale buffer. Check for the
502 * race condition here so xfsaild recognizes the buffer is pinned
503 * and queues a log force to move it along.
504 */
505 if (xfs_buf_ispinned(bp))
506 return XFS_ITEM_PINNED;
507 return XFS_ITEM_LOCKED;
508 }
509
510 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
511
512 trace_xfs_buf_item_push(bip);
513
514 /* has a previous flush failed due to IO errors? */
515 if (bp->b_flags & XBF_WRITE_FAIL) {
516 xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
517 "Failing async write on buffer block 0x%llx. Retrying async write.",
518 (long long)bp->b_bn);
519 }
520
521 if (!xfs_buf_delwri_queue(bp, buffer_list))
522 rval = XFS_ITEM_FLUSHING;
523 xfs_buf_unlock(bp);
524 return rval;
525}
526
527/*
528 * Drop the buffer log item refcount and take appropriate action. This helper
529 * determines whether the bli must be freed or not, since a decrement to zero
530 * does not necessarily mean the bli is unused.
531 *
532 * Return true if the bli is freed, false otherwise.
533 */
534bool
535xfs_buf_item_put(
536 struct xfs_buf_log_item *bip)
537{
538 struct xfs_log_item *lip = &bip->bli_item;
539 bool aborted;
540 bool dirty;
541
542 /* drop the bli ref and return if it wasn't the last one */
543 if (!atomic_dec_and_test(&bip->bli_refcount))
544 return false;
545
546 /*
547 * We dropped the last ref and must free the item if clean or aborted.
548 * If the bli is dirty and non-aborted, the buffer was clean in the
549 * transaction but still awaiting writeback from previous changes. In
550 * that case, the bli is freed on buffer writeback completion.
551 */
552 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
553 XFS_FORCED_SHUTDOWN(lip->li_mountp);
554 dirty = bip->bli_flags & XFS_BLI_DIRTY;
555 if (dirty && !aborted)
556 return false;
557
558 /*
559 * The bli is aborted or clean. An aborted item may be in the AIL
560 * regardless of dirty state. For example, consider an aborted
561 * transaction that invalidated a dirty bli and cleared the dirty
562 * state.
563 */
564 if (aborted)
565 xfs_trans_ail_delete(lip, 0);
566 xfs_buf_item_relse(bip->bli_buf);
567 return true;
568}
569
570/*
571 * Release the buffer associated with the buf log item. If there is no dirty
572 * logged data associated with the buffer recorded in the buf log item, then
573 * free the buf log item and remove the reference to it in the buffer.
574 *
575 * This call ignores the recursion count. It is only called when the buffer
576 * should REALLY be unlocked, regardless of the recursion count.
577 *
578 * We unconditionally drop the transaction's reference to the log item. If the
579 * item was logged, then another reference was taken when it was pinned, so we
580 * can safely drop the transaction reference now. This also allows us to avoid
581 * potential races with the unpin code freeing the bli by not referencing the
582 * bli after we've dropped the reference count.
583 *
584 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
585 * if necessary but do not unlock the buffer. This is for support of
586 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
587 * free the item.
588 */
589STATIC void
590xfs_buf_item_release(
591 struct xfs_log_item *lip)
592{
593 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
594 struct xfs_buf *bp = bip->bli_buf;
595 bool released;
596 bool hold = bip->bli_flags & XFS_BLI_HOLD;
597 bool stale = bip->bli_flags & XFS_BLI_STALE;
598#if defined(DEBUG) || defined(XFS_WARN)
599 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
600 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
601 bool aborted = test_bit(XFS_LI_ABORTED,
602 &lip->li_flags);
603#endif
604
605 trace_xfs_buf_item_release(bip);
606
607 /*
608 * The bli dirty state should match whether the blf has logged segments
609 * except for ordered buffers, where only the bli should be dirty.
610 */
611 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
612 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
613 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
614
615 /*
616 * Clear the buffer's association with this transaction and
617 * per-transaction state from the bli, which has been copied above.
618 */
619 bp->b_transp = NULL;
620 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
621
622 /*
623 * Unref the item and unlock the buffer unless held or stale. Stale
624 * buffers remain locked until final unpin unless the bli is freed by
625 * the unref call. The latter implies shutdown because buffer
626 * invalidation dirties the bli and transaction.
627 */
628 released = xfs_buf_item_put(bip);
629 if (hold || (stale && !released))
630 return;
631 ASSERT(!stale || aborted);
632 xfs_buf_relse(bp);
633}
634
635STATIC void
636xfs_buf_item_committing(
637 struct xfs_log_item *lip,
638 xfs_lsn_t commit_lsn)
639{
640 return xfs_buf_item_release(lip);
641}
642
643/*
644 * This is called to find out where the oldest active copy of the
645 * buf log item in the on disk log resides now that the last log
646 * write of it completed at the given lsn.
647 * We always re-log all the dirty data in a buffer, so usually the
648 * latest copy in the on disk log is the only one that matters. For
649 * those cases we simply return the given lsn.
650 *
651 * The one exception to this is for buffers full of newly allocated
652 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
653 * flag set, indicating that only the di_next_unlinked fields from the
654 * inodes in the buffers will be replayed during recovery. If the
655 * original newly allocated inode images have not yet been flushed
656 * when the buffer is so relogged, then we need to make sure that we
657 * keep the old images in the 'active' portion of the log. We do this
658 * by returning the original lsn of that transaction here rather than
659 * the current one.
660 */
661STATIC xfs_lsn_t
662xfs_buf_item_committed(
663 struct xfs_log_item *lip,
664 xfs_lsn_t lsn)
665{
666 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
667
668 trace_xfs_buf_item_committed(bip);
669
670 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
671 return lip->li_lsn;
672 return lsn;
673}
674
675static const struct xfs_item_ops xfs_buf_item_ops = {
676 .iop_size = xfs_buf_item_size,
677 .iop_format = xfs_buf_item_format,
678 .iop_pin = xfs_buf_item_pin,
679 .iop_unpin = xfs_buf_item_unpin,
680 .iop_release = xfs_buf_item_release,
681 .iop_committing = xfs_buf_item_committing,
682 .iop_committed = xfs_buf_item_committed,
683 .iop_push = xfs_buf_item_push,
684};
685
686STATIC void
687xfs_buf_item_get_format(
688 struct xfs_buf_log_item *bip,
689 int count)
690{
691 ASSERT(bip->bli_formats == NULL);
692 bip->bli_format_count = count;
693
694 if (count == 1) {
695 bip->bli_formats = &bip->__bli_format;
696 return;
697 }
698
699 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
700 0);
701}
702
703STATIC void
704xfs_buf_item_free_format(
705 struct xfs_buf_log_item *bip)
706{
707 if (bip->bli_formats != &bip->__bli_format) {
708 kmem_free(bip->bli_formats);
709 bip->bli_formats = NULL;
710 }
711}
712
713/*
714 * Allocate a new buf log item to go with the given buffer.
715 * Set the buffer's b_log_item field to point to the new
716 * buf log item.
717 */
718int
719xfs_buf_item_init(
720 struct xfs_buf *bp,
721 struct xfs_mount *mp)
722{
723 struct xfs_buf_log_item *bip = bp->b_log_item;
724 int chunks;
725 int map_size;
726 int i;
727
728 /*
729 * Check to see if there is already a buf log item for
730 * this buffer. If we do already have one, there is
731 * nothing to do here so return.
732 */
733 ASSERT(bp->b_mount == mp);
734 if (bip) {
735 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
736 ASSERT(!bp->b_transp);
737 ASSERT(bip->bli_buf == bp);
738 return 0;
739 }
740
741 bip = kmem_cache_zalloc(xfs_buf_item_zone, GFP_KERNEL | __GFP_NOFAIL);
742 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
743 bip->bli_buf = bp;
744
745 /*
746 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
747 * can be divided into. Make sure not to truncate any pieces.
748 * map_size is the size of the bitmap needed to describe the
749 * chunks of the buffer.
750 *
751 * Discontiguous buffer support follows the layout of the underlying
752 * buffer. This makes the implementation as simple as possible.
753 */
754 xfs_buf_item_get_format(bip, bp->b_map_count);
755
756 for (i = 0; i < bip->bli_format_count; i++) {
757 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
758 XFS_BLF_CHUNK);
759 map_size = DIV_ROUND_UP(chunks, NBWORD);
760
761 if (map_size > XFS_BLF_DATAMAP_SIZE) {
762 kmem_cache_free(xfs_buf_item_zone, bip);
763 xfs_err(mp,
764 "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
765 map_size,
766 BBTOB(bp->b_maps[i].bm_len));
767 return -EFSCORRUPTED;
768 }
769
770 bip->bli_formats[i].blf_type = XFS_LI_BUF;
771 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
772 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
773 bip->bli_formats[i].blf_map_size = map_size;
774 }
775
776 bp->b_log_item = bip;
777 xfs_buf_hold(bp);
778 return 0;
779}
780
781
782/*
783 * Mark bytes first through last inclusive as dirty in the buf
784 * item's bitmap.
785 */
786static void
787xfs_buf_item_log_segment(
788 uint first,
789 uint last,
790 uint *map)
791{
792 uint first_bit;
793 uint last_bit;
794 uint bits_to_set;
795 uint bits_set;
796 uint word_num;
797 uint *wordp;
798 uint bit;
799 uint end_bit;
800 uint mask;
801
802 ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
803 ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
804
805 /*
806 * Convert byte offsets to bit numbers.
807 */
808 first_bit = first >> XFS_BLF_SHIFT;
809 last_bit = last >> XFS_BLF_SHIFT;
810
811 /*
812 * Calculate the total number of bits to be set.
813 */
814 bits_to_set = last_bit - first_bit + 1;
815
816 /*
817 * Get a pointer to the first word in the bitmap
818 * to set a bit in.
819 */
820 word_num = first_bit >> BIT_TO_WORD_SHIFT;
821 wordp = &map[word_num];
822
823 /*
824 * Calculate the starting bit in the first word.
825 */
826 bit = first_bit & (uint)(NBWORD - 1);
827
828 /*
829 * First set any bits in the first word of our range.
830 * If it starts at bit 0 of the word, it will be
831 * set below rather than here. That is what the variable
832 * bit tells us. The variable bits_set tracks the number
833 * of bits that have been set so far. End_bit is the number
834 * of the last bit to be set in this word plus one.
835 */
836 if (bit) {
837 end_bit = min(bit + bits_to_set, (uint)NBWORD);
838 mask = ((1U << (end_bit - bit)) - 1) << bit;
839 *wordp |= mask;
840 wordp++;
841 bits_set = end_bit - bit;
842 } else {
843 bits_set = 0;
844 }
845
846 /*
847 * Now set bits a whole word at a time that are between
848 * first_bit and last_bit.
849 */
850 while ((bits_to_set - bits_set) >= NBWORD) {
851 *wordp = 0xffffffff;
852 bits_set += NBWORD;
853 wordp++;
854 }
855
856 /*
857 * Finally, set any bits left to be set in one last partial word.
858 */
859 end_bit = bits_to_set - bits_set;
860 if (end_bit) {
861 mask = (1U << end_bit) - 1;
862 *wordp |= mask;
863 }
864}
865
866/*
867 * Mark bytes first through last inclusive as dirty in the buf
868 * item's bitmap.
869 */
870void
871xfs_buf_item_log(
872 struct xfs_buf_log_item *bip,
873 uint first,
874 uint last)
875{
876 int i;
877 uint start;
878 uint end;
879 struct xfs_buf *bp = bip->bli_buf;
880
881 /*
882 * walk each buffer segment and mark them dirty appropriately.
883 */
884 start = 0;
885 for (i = 0; i < bip->bli_format_count; i++) {
886 if (start > last)
887 break;
888 end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
889
890 /* skip to the map that includes the first byte to log */
891 if (first > end) {
892 start += BBTOB(bp->b_maps[i].bm_len);
893 continue;
894 }
895
896 /*
897 * Trim the range to this segment and mark it in the bitmap.
898 * Note that we must convert buffer offsets to segment relative
899 * offsets (e.g., the first byte of each segment is byte 0 of
900 * that segment).
901 */
902 if (first < start)
903 first = start;
904 if (end > last)
905 end = last;
906 xfs_buf_item_log_segment(first - start, end - start,
907 &bip->bli_formats[i].blf_data_map[0]);
908
909 start += BBTOB(bp->b_maps[i].bm_len);
910 }
911}
912
913
914/*
915 * Return true if the buffer has any ranges logged/dirtied by a transaction,
916 * false otherwise.
917 */
918bool
919xfs_buf_item_dirty_format(
920 struct xfs_buf_log_item *bip)
921{
922 int i;
923
924 for (i = 0; i < bip->bli_format_count; i++) {
925 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
926 bip->bli_formats[i].blf_map_size))
927 return true;
928 }
929
930 return false;
931}
932
933STATIC void
934xfs_buf_item_free(
935 struct xfs_buf_log_item *bip)
936{
937 xfs_buf_item_free_format(bip);
938 kmem_free(bip->bli_item.li_lv_shadow);
939 kmem_cache_free(xfs_buf_item_zone, bip);
940}
941
942/*
943 * xfs_buf_item_relse() is called when the buf log item is no longer needed.
944 */
945void
946xfs_buf_item_relse(
947 xfs_buf_t *bp)
948{
949 struct xfs_buf_log_item *bip = bp->b_log_item;
950
951 trace_xfs_buf_item_relse(bp, _RET_IP_);
952 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
953
954 bp->b_log_item = NULL;
955 xfs_buf_rele(bp);
956 xfs_buf_item_free(bip);
957}
958
959/*
960 * Decide if we're going to retry the write after a failure, and prepare
961 * the buffer for retrying the write.
962 */
963static bool
964xfs_buf_ioerror_fail_without_retry(
965 struct xfs_buf *bp)
966{
967 struct xfs_mount *mp = bp->b_mount;
968 static ulong lasttime;
969 static xfs_buftarg_t *lasttarg;
970
971 /*
972 * If we've already decided to shutdown the filesystem because of
973 * I/O errors, there's no point in giving this a retry.
974 */
975 if (XFS_FORCED_SHUTDOWN(mp))
976 return true;
977
978 if (bp->b_target != lasttarg ||
979 time_after(jiffies, (lasttime + 5*HZ))) {
980 lasttime = jiffies;
981 xfs_buf_ioerror_alert(bp, __this_address);
982 }
983 lasttarg = bp->b_target;
984
985 /* synchronous writes will have callers process the error */
986 if (!(bp->b_flags & XBF_ASYNC))
987 return true;
988 return false;
989}
990
991static bool
992xfs_buf_ioerror_retry(
993 struct xfs_buf *bp,
994 struct xfs_error_cfg *cfg)
995{
996 if ((bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) &&
997 bp->b_last_error == bp->b_error)
998 return false;
999
1000 bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
1001 bp->b_last_error = bp->b_error;
1002 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1003 !bp->b_first_retry_time)
1004 bp->b_first_retry_time = jiffies;
1005 return true;
1006}
1007
1008/*
1009 * Account for this latest trip around the retry handler, and decide if
1010 * we've failed enough times to constitute a permanent failure.
1011 */
1012static bool
1013xfs_buf_ioerror_permanent(
1014 struct xfs_buf *bp,
1015 struct xfs_error_cfg *cfg)
1016{
1017 struct xfs_mount *mp = bp->b_mount;
1018
1019 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1020 ++bp->b_retries > cfg->max_retries)
1021 return true;
1022 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1023 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1024 return true;
1025
1026 /* At unmount we may treat errors differently */
1027 if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
1028 return true;
1029
1030 return false;
1031}
1032
1033/*
1034 * On a sync write or shutdown we just want to stale the buffer and let the
1035 * caller handle the error in bp->b_error appropriately.
1036 *
1037 * If the write was asynchronous then no one will be looking for the error. If
1038 * this is the first failure of this type, clear the error state and write the
1039 * buffer out again. This means we always retry an async write failure at least
1040 * once, but we also need to set the buffer up to behave correctly now for
1041 * repeated failures.
1042 *
1043 * If we get repeated async write failures, then we take action according to the
1044 * error configuration we have been set up to use.
1045 *
1046 * Multi-state return value:
1047 *
1048 * XBF_IOERROR_FINISH: clear IO error retry state and run callback completions
1049 * XBF_IOERROR_DONE: resubmitted immediately, do not run any completions
1050 * XBF_IOERROR_FAIL: transient error, run failure callback completions and then
1051 * release the buffer
1052 */
1053enum {
1054 XBF_IOERROR_FINISH,
1055 XBF_IOERROR_DONE,
1056 XBF_IOERROR_FAIL,
1057};
1058
1059static int
1060xfs_buf_iodone_error(
1061 struct xfs_buf *bp)
1062{
1063 struct xfs_mount *mp = bp->b_mount;
1064 struct xfs_error_cfg *cfg;
1065
1066 if (xfs_buf_ioerror_fail_without_retry(bp))
1067 goto out_stale;
1068
1069 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1070
1071 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1072 if (xfs_buf_ioerror_retry(bp, cfg)) {
1073 xfs_buf_ioerror(bp, 0);
1074 xfs_buf_submit(bp);
1075 return XBF_IOERROR_DONE;
1076 }
1077
1078 /*
1079 * Permanent error - we need to trigger a shutdown if we haven't already
1080 * to indicate that inconsistency will result from this action.
1081 */
1082 if (xfs_buf_ioerror_permanent(bp, cfg)) {
1083 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1084 goto out_stale;
1085 }
1086
1087 /* Still considered a transient error. Caller will schedule retries. */
1088 return XBF_IOERROR_FAIL;
1089
1090out_stale:
1091 xfs_buf_stale(bp);
1092 bp->b_flags |= XBF_DONE;
1093 trace_xfs_buf_error_relse(bp, _RET_IP_);
1094 return XBF_IOERROR_FINISH;
1095}
1096
1097static void
1098xfs_buf_item_done(
1099 struct xfs_buf *bp)
1100{
1101 struct xfs_buf_log_item *bip = bp->b_log_item;
1102
1103 if (!bip)
1104 return;
1105
1106 /*
1107 * If we are forcibly shutting down, this may well be off the AIL
1108 * already. That's because we simulate the log-committed callbacks to
1109 * unpin these buffers. Or we may never have put this item on AIL
1110 * because of the transaction was aborted forcibly.
1111 * xfs_trans_ail_delete() takes care of these.
1112 *
1113 * Either way, AIL is useless if we're forcing a shutdown.
1114 */
1115 xfs_trans_ail_delete(&bip->bli_item, SHUTDOWN_CORRUPT_INCORE);
1116 bp->b_log_item = NULL;
1117 xfs_buf_item_free(bip);
1118 xfs_buf_rele(bp);
1119}
1120
1121static inline void
1122xfs_buf_clear_ioerror_retry_state(
1123 struct xfs_buf *bp)
1124{
1125 bp->b_last_error = 0;
1126 bp->b_retries = 0;
1127 bp->b_first_retry_time = 0;
1128}
1129
1130/*
1131 * Inode buffer iodone callback function.
1132 */
1133void
1134xfs_buf_inode_iodone(
1135 struct xfs_buf *bp)
1136{
1137 if (bp->b_error) {
1138 struct xfs_log_item *lip;
1139 int ret = xfs_buf_iodone_error(bp);
1140
1141 if (ret == XBF_IOERROR_FINISH)
1142 goto finish_iodone;
1143 if (ret == XBF_IOERROR_DONE)
1144 return;
1145 ASSERT(ret == XBF_IOERROR_FAIL);
1146 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
1147 set_bit(XFS_LI_FAILED, &lip->li_flags);
1148 }
1149 xfs_buf_ioerror(bp, 0);
1150 xfs_buf_relse(bp);
1151 return;
1152 }
1153
1154finish_iodone:
1155 xfs_buf_clear_ioerror_retry_state(bp);
1156 xfs_buf_item_done(bp);
1157 xfs_iflush_done(bp);
1158 xfs_buf_ioend_finish(bp);
1159}
1160
1161/*
1162 * Dquot buffer iodone callback function.
1163 */
1164void
1165xfs_buf_dquot_iodone(
1166 struct xfs_buf *bp)
1167{
1168 if (bp->b_error) {
1169 struct xfs_log_item *lip;
1170 int ret = xfs_buf_iodone_error(bp);
1171
1172 if (ret == XBF_IOERROR_FINISH)
1173 goto finish_iodone;
1174 if (ret == XBF_IOERROR_DONE)
1175 return;
1176 ASSERT(ret == XBF_IOERROR_FAIL);
1177 spin_lock(&bp->b_mount->m_ail->ail_lock);
1178 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
1179 xfs_set_li_failed(lip, bp);
1180 }
1181 spin_unlock(&bp->b_mount->m_ail->ail_lock);
1182 xfs_buf_ioerror(bp, 0);
1183 xfs_buf_relse(bp);
1184 return;
1185 }
1186
1187finish_iodone:
1188 xfs_buf_clear_ioerror_retry_state(bp);
1189 /* a newly allocated dquot buffer might have a log item attached */
1190 xfs_buf_item_done(bp);
1191 xfs_dquot_done(bp);
1192 xfs_buf_ioend_finish(bp);
1193}
1194
1195/*
1196 * Dirty buffer iodone callback function.
1197 *
1198 * Note that for things like remote attribute buffers, there may not be a buffer
1199 * log item here, so processing the buffer log item must remain be optional.
1200 */
1201void
1202xfs_buf_iodone(
1203 struct xfs_buf *bp)
1204{
1205 if (bp->b_error) {
1206 int ret = xfs_buf_iodone_error(bp);
1207
1208 if (ret == XBF_IOERROR_FINISH)
1209 goto finish_iodone;
1210 if (ret == XBF_IOERROR_DONE)
1211 return;
1212 ASSERT(ret == XBF_IOERROR_FAIL);
1213 ASSERT(list_empty(&bp->b_li_list));
1214 xfs_buf_ioerror(bp, 0);
1215 xfs_buf_relse(bp);
1216 return;
1217 }
1218
1219finish_iodone:
1220 xfs_buf_clear_ioerror_retry_state(bp);
1221 xfs_buf_item_done(bp);
1222 xfs_buf_ioend_finish(bp);
1223}