Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28#include "xfs_buf_item.h"
29#include "xfs_trans_priv.h"
30#include "xfs_error.h"
31#include "xfs_trace.h"
32
33
34kmem_zone_t *xfs_buf_item_zone;
35
36static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
37{
38 return container_of(lip, struct xfs_buf_log_item, bli_item);
39}
40
41
42#ifdef XFS_TRANS_DEBUG
43/*
44 * This function uses an alternate strategy for tracking the bytes
45 * that the user requests to be logged. This can then be used
46 * in conjunction with the bli_orig array in the buf log item to
47 * catch bugs in our callers' code.
48 *
49 * We also double check the bits set in xfs_buf_item_log using a
50 * simple algorithm to check that every byte is accounted for.
51 */
52STATIC void
53xfs_buf_item_log_debug(
54 xfs_buf_log_item_t *bip,
55 uint first,
56 uint last)
57{
58 uint x;
59 uint byte;
60 uint nbytes;
61 uint chunk_num;
62 uint word_num;
63 uint bit_num;
64 uint bit_set;
65 uint *wordp;
66
67 ASSERT(bip->bli_logged != NULL);
68 byte = first;
69 nbytes = last - first + 1;
70 bfset(bip->bli_logged, first, nbytes);
71 for (x = 0; x < nbytes; x++) {
72 chunk_num = byte >> XFS_BLF_SHIFT;
73 word_num = chunk_num >> BIT_TO_WORD_SHIFT;
74 bit_num = chunk_num & (NBWORD - 1);
75 wordp = &(bip->bli_format.blf_data_map[word_num]);
76 bit_set = *wordp & (1 << bit_num);
77 ASSERT(bit_set);
78 byte++;
79 }
80}
81
82/*
83 * This function is called when we flush something into a buffer without
84 * logging it. This happens for things like inodes which are logged
85 * separately from the buffer.
86 */
87void
88xfs_buf_item_flush_log_debug(
89 xfs_buf_t *bp,
90 uint first,
91 uint last)
92{
93 xfs_buf_log_item_t *bip = bp->b_fspriv;
94 uint nbytes;
95
96 if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
97 return;
98
99 ASSERT(bip->bli_logged != NULL);
100 nbytes = last - first + 1;
101 bfset(bip->bli_logged, first, nbytes);
102}
103
104/*
105 * This function is called to verify that our callers have logged
106 * all the bytes that they changed.
107 *
108 * It does this by comparing the original copy of the buffer stored in
109 * the buf log item's bli_orig array to the current copy of the buffer
110 * and ensuring that all bytes which mismatch are set in the bli_logged
111 * array of the buf log item.
112 */
113STATIC void
114xfs_buf_item_log_check(
115 xfs_buf_log_item_t *bip)
116{
117 char *orig;
118 char *buffer;
119 int x;
120 xfs_buf_t *bp;
121
122 ASSERT(bip->bli_orig != NULL);
123 ASSERT(bip->bli_logged != NULL);
124
125 bp = bip->bli_buf;
126 ASSERT(XFS_BUF_COUNT(bp) > 0);
127 ASSERT(bp->b_addr != NULL);
128 orig = bip->bli_orig;
129 buffer = bp->b_addr;
130 for (x = 0; x < XFS_BUF_COUNT(bp); x++) {
131 if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
132 xfs_emerg(bp->b_mount,
133 "%s: bip %x buffer %x orig %x index %d",
134 __func__, bip, bp, orig, x);
135 ASSERT(0);
136 }
137 }
138}
139#else
140#define xfs_buf_item_log_debug(x,y,z)
141#define xfs_buf_item_log_check(x)
142#endif
143
144STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
145
146/*
147 * This returns the number of log iovecs needed to log the
148 * given buf log item.
149 *
150 * It calculates this as 1 iovec for the buf log format structure
151 * and 1 for each stretch of non-contiguous chunks to be logged.
152 * Contiguous chunks are logged in a single iovec.
153 *
154 * If the XFS_BLI_STALE flag has been set, then log nothing.
155 */
156STATIC uint
157xfs_buf_item_size(
158 struct xfs_log_item *lip)
159{
160 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
161 struct xfs_buf *bp = bip->bli_buf;
162 uint nvecs;
163 int next_bit;
164 int last_bit;
165
166 ASSERT(atomic_read(&bip->bli_refcount) > 0);
167 if (bip->bli_flags & XFS_BLI_STALE) {
168 /*
169 * The buffer is stale, so all we need to log
170 * is the buf log format structure with the
171 * cancel flag in it.
172 */
173 trace_xfs_buf_item_size_stale(bip);
174 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
175 return 1;
176 }
177
178 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
179 nvecs = 1;
180 last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
181 bip->bli_format.blf_map_size, 0);
182 ASSERT(last_bit != -1);
183 nvecs++;
184 while (last_bit != -1) {
185 /*
186 * This takes the bit number to start looking from and
187 * returns the next set bit from there. It returns -1
188 * if there are no more bits set or the start bit is
189 * beyond the end of the bitmap.
190 */
191 next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
192 bip->bli_format.blf_map_size,
193 last_bit + 1);
194 /*
195 * If we run out of bits, leave the loop,
196 * else if we find a new set of bits bump the number of vecs,
197 * else keep scanning the current set of bits.
198 */
199 if (next_bit == -1) {
200 last_bit = -1;
201 } else if (next_bit != last_bit + 1) {
202 last_bit = next_bit;
203 nvecs++;
204 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
205 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
206 XFS_BLF_CHUNK)) {
207 last_bit = next_bit;
208 nvecs++;
209 } else {
210 last_bit++;
211 }
212 }
213
214 trace_xfs_buf_item_size(bip);
215 return nvecs;
216}
217
218/*
219 * This is called to fill in the vector of log iovecs for the
220 * given log buf item. It fills the first entry with a buf log
221 * format structure, and the rest point to contiguous chunks
222 * within the buffer.
223 */
224STATIC void
225xfs_buf_item_format(
226 struct xfs_log_item *lip,
227 struct xfs_log_iovec *vecp)
228{
229 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
230 struct xfs_buf *bp = bip->bli_buf;
231 uint base_size;
232 uint nvecs;
233 int first_bit;
234 int last_bit;
235 int next_bit;
236 uint nbits;
237 uint buffer_offset;
238
239 ASSERT(atomic_read(&bip->bli_refcount) > 0);
240 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
241 (bip->bli_flags & XFS_BLI_STALE));
242
243 /*
244 * The size of the base structure is the size of the
245 * declared structure plus the space for the extra words
246 * of the bitmap. We subtract one from the map size, because
247 * the first element of the bitmap is accounted for in the
248 * size of the base structure.
249 */
250 base_size =
251 (uint)(sizeof(xfs_buf_log_format_t) +
252 ((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
253 vecp->i_addr = &bip->bli_format;
254 vecp->i_len = base_size;
255 vecp->i_type = XLOG_REG_TYPE_BFORMAT;
256 vecp++;
257 nvecs = 1;
258
259 /*
260 * If it is an inode buffer, transfer the in-memory state to the
261 * format flags and clear the in-memory state. We do not transfer
262 * this state if the inode buffer allocation has not yet been committed
263 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
264 * correct replay of the inode allocation.
265 */
266 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
267 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
268 xfs_log_item_in_current_chkpt(lip)))
269 bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
270 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
271 }
272
273 if (bip->bli_flags & XFS_BLI_STALE) {
274 /*
275 * The buffer is stale, so all we need to log
276 * is the buf log format structure with the
277 * cancel flag in it.
278 */
279 trace_xfs_buf_item_format_stale(bip);
280 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
281 bip->bli_format.blf_size = nvecs;
282 return;
283 }
284
285 /*
286 * Fill in an iovec for each set of contiguous chunks.
287 */
288 first_bit = xfs_next_bit(bip->bli_format.blf_data_map,
289 bip->bli_format.blf_map_size, 0);
290 ASSERT(first_bit != -1);
291 last_bit = first_bit;
292 nbits = 1;
293 for (;;) {
294 /*
295 * This takes the bit number to start looking from and
296 * returns the next set bit from there. It returns -1
297 * if there are no more bits set or the start bit is
298 * beyond the end of the bitmap.
299 */
300 next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
301 bip->bli_format.blf_map_size,
302 (uint)last_bit + 1);
303 /*
304 * If we run out of bits fill in the last iovec and get
305 * out of the loop.
306 * Else if we start a new set of bits then fill in the
307 * iovec for the series we were looking at and start
308 * counting the bits in the new one.
309 * Else we're still in the same set of bits so just
310 * keep counting and scanning.
311 */
312 if (next_bit == -1) {
313 buffer_offset = first_bit * XFS_BLF_CHUNK;
314 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
315 vecp->i_len = nbits * XFS_BLF_CHUNK;
316 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
317 nvecs++;
318 break;
319 } else if (next_bit != last_bit + 1) {
320 buffer_offset = first_bit * XFS_BLF_CHUNK;
321 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
322 vecp->i_len = nbits * XFS_BLF_CHUNK;
323 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
324 nvecs++;
325 vecp++;
326 first_bit = next_bit;
327 last_bit = next_bit;
328 nbits = 1;
329 } else if (xfs_buf_offset(bp, next_bit << XFS_BLF_SHIFT) !=
330 (xfs_buf_offset(bp, last_bit << XFS_BLF_SHIFT) +
331 XFS_BLF_CHUNK)) {
332 buffer_offset = first_bit * XFS_BLF_CHUNK;
333 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
334 vecp->i_len = nbits * XFS_BLF_CHUNK;
335 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
336/* You would think we need to bump the nvecs here too, but we do not
337 * this number is used by recovery, and it gets confused by the boundary
338 * split here
339 * nvecs++;
340 */
341 vecp++;
342 first_bit = next_bit;
343 last_bit = next_bit;
344 nbits = 1;
345 } else {
346 last_bit++;
347 nbits++;
348 }
349 }
350 bip->bli_format.blf_size = nvecs;
351
352 /*
353 * Check to make sure everything is consistent.
354 */
355 trace_xfs_buf_item_format(bip);
356 xfs_buf_item_log_check(bip);
357}
358
359/*
360 * This is called to pin the buffer associated with the buf log item in memory
361 * so it cannot be written out.
362 *
363 * We also always take a reference to the buffer log item here so that the bli
364 * is held while the item is pinned in memory. This means that we can
365 * unconditionally drop the reference count a transaction holds when the
366 * transaction is completed.
367 */
368STATIC void
369xfs_buf_item_pin(
370 struct xfs_log_item *lip)
371{
372 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
373
374 ASSERT(atomic_read(&bip->bli_refcount) > 0);
375 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
376 (bip->bli_flags & XFS_BLI_STALE));
377
378 trace_xfs_buf_item_pin(bip);
379
380 atomic_inc(&bip->bli_refcount);
381 atomic_inc(&bip->bli_buf->b_pin_count);
382}
383
384/*
385 * This is called to unpin the buffer associated with the buf log
386 * item which was previously pinned with a call to xfs_buf_item_pin().
387 *
388 * Also drop the reference to the buf item for the current transaction.
389 * If the XFS_BLI_STALE flag is set and we are the last reference,
390 * then free up the buf log item and unlock the buffer.
391 *
392 * If the remove flag is set we are called from uncommit in the
393 * forced-shutdown path. If that is true and the reference count on
394 * the log item is going to drop to zero we need to free the item's
395 * descriptor in the transaction.
396 */
397STATIC void
398xfs_buf_item_unpin(
399 struct xfs_log_item *lip,
400 int remove)
401{
402 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
403 xfs_buf_t *bp = bip->bli_buf;
404 struct xfs_ail *ailp = lip->li_ailp;
405 int stale = bip->bli_flags & XFS_BLI_STALE;
406 int freed;
407
408 ASSERT(bp->b_fspriv == bip);
409 ASSERT(atomic_read(&bip->bli_refcount) > 0);
410
411 trace_xfs_buf_item_unpin(bip);
412
413 freed = atomic_dec_and_test(&bip->bli_refcount);
414
415 if (atomic_dec_and_test(&bp->b_pin_count))
416 wake_up_all(&bp->b_waiters);
417
418 if (freed && stale) {
419 ASSERT(bip->bli_flags & XFS_BLI_STALE);
420 ASSERT(xfs_buf_islocked(bp));
421 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
422 ASSERT(XFS_BUF_ISSTALE(bp));
423 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
424
425 trace_xfs_buf_item_unpin_stale(bip);
426
427 if (remove) {
428 /*
429 * If we are in a transaction context, we have to
430 * remove the log item from the transaction as we are
431 * about to release our reference to the buffer. If we
432 * don't, the unlock that occurs later in
433 * xfs_trans_uncommit() will try to reference the
434 * buffer which we no longer have a hold on.
435 */
436 if (lip->li_desc)
437 xfs_trans_del_item(lip);
438
439 /*
440 * Since the transaction no longer refers to the buffer,
441 * the buffer should no longer refer to the transaction.
442 */
443 bp->b_transp = NULL;
444 }
445
446 /*
447 * If we get called here because of an IO error, we may
448 * or may not have the item on the AIL. xfs_trans_ail_delete()
449 * will take care of that situation.
450 * xfs_trans_ail_delete() drops the AIL lock.
451 */
452 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
453 xfs_buf_do_callbacks(bp);
454 bp->b_fspriv = NULL;
455 bp->b_iodone = NULL;
456 } else {
457 spin_lock(&ailp->xa_lock);
458 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
459 xfs_buf_item_relse(bp);
460 ASSERT(bp->b_fspriv == NULL);
461 }
462 xfs_buf_relse(bp);
463 }
464}
465
466/*
467 * This is called to attempt to lock the buffer associated with this
468 * buf log item. Don't sleep on the buffer lock. If we can't get
469 * the lock right away, return 0. If we can get the lock, take a
470 * reference to the buffer. If this is a delayed write buffer that
471 * needs AIL help to be written back, invoke the pushbuf routine
472 * rather than the normal success path.
473 */
474STATIC uint
475xfs_buf_item_trylock(
476 struct xfs_log_item *lip)
477{
478 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
479 struct xfs_buf *bp = bip->bli_buf;
480
481 if (xfs_buf_ispinned(bp))
482 return XFS_ITEM_PINNED;
483 if (!xfs_buf_trylock(bp))
484 return XFS_ITEM_LOCKED;
485
486 /* take a reference to the buffer. */
487 xfs_buf_hold(bp);
488
489 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
490 trace_xfs_buf_item_trylock(bip);
491 if (XFS_BUF_ISDELAYWRITE(bp))
492 return XFS_ITEM_PUSHBUF;
493 return XFS_ITEM_SUCCESS;
494}
495
496/*
497 * Release the buffer associated with the buf log item. If there is no dirty
498 * logged data associated with the buffer recorded in the buf log item, then
499 * free the buf log item and remove the reference to it in the buffer.
500 *
501 * This call ignores the recursion count. It is only called when the buffer
502 * should REALLY be unlocked, regardless of the recursion count.
503 *
504 * We unconditionally drop the transaction's reference to the log item. If the
505 * item was logged, then another reference was taken when it was pinned, so we
506 * can safely drop the transaction reference now. This also allows us to avoid
507 * potential races with the unpin code freeing the bli by not referencing the
508 * bli after we've dropped the reference count.
509 *
510 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
511 * if necessary but do not unlock the buffer. This is for support of
512 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
513 * free the item.
514 */
515STATIC void
516xfs_buf_item_unlock(
517 struct xfs_log_item *lip)
518{
519 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
520 struct xfs_buf *bp = bip->bli_buf;
521 int aborted;
522 uint hold;
523
524 /* Clear the buffer's association with this transaction. */
525 bp->b_transp = NULL;
526
527 /*
528 * If this is a transaction abort, don't return early. Instead, allow
529 * the brelse to happen. Normally it would be done for stale
530 * (cancelled) buffers at unpin time, but we'll never go through the
531 * pin/unpin cycle if we abort inside commit.
532 */
533 aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
534
535 /*
536 * Before possibly freeing the buf item, determine if we should
537 * release the buffer at the end of this routine.
538 */
539 hold = bip->bli_flags & XFS_BLI_HOLD;
540
541 /* Clear the per transaction state. */
542 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
543
544 /*
545 * If the buf item is marked stale, then don't do anything. We'll
546 * unlock the buffer and free the buf item when the buffer is unpinned
547 * for the last time.
548 */
549 if (bip->bli_flags & XFS_BLI_STALE) {
550 trace_xfs_buf_item_unlock_stale(bip);
551 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
552 if (!aborted) {
553 atomic_dec(&bip->bli_refcount);
554 return;
555 }
556 }
557
558 trace_xfs_buf_item_unlock(bip);
559
560 /*
561 * If the buf item isn't tracking any data, free it, otherwise drop the
562 * reference we hold to it.
563 */
564 if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
565 bip->bli_format.blf_map_size))
566 xfs_buf_item_relse(bp);
567 else
568 atomic_dec(&bip->bli_refcount);
569
570 if (!hold)
571 xfs_buf_relse(bp);
572}
573
574/*
575 * This is called to find out where the oldest active copy of the
576 * buf log item in the on disk log resides now that the last log
577 * write of it completed at the given lsn.
578 * We always re-log all the dirty data in a buffer, so usually the
579 * latest copy in the on disk log is the only one that matters. For
580 * those cases we simply return the given lsn.
581 *
582 * The one exception to this is for buffers full of newly allocated
583 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
584 * flag set, indicating that only the di_next_unlinked fields from the
585 * inodes in the buffers will be replayed during recovery. If the
586 * original newly allocated inode images have not yet been flushed
587 * when the buffer is so relogged, then we need to make sure that we
588 * keep the old images in the 'active' portion of the log. We do this
589 * by returning the original lsn of that transaction here rather than
590 * the current one.
591 */
592STATIC xfs_lsn_t
593xfs_buf_item_committed(
594 struct xfs_log_item *lip,
595 xfs_lsn_t lsn)
596{
597 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
598
599 trace_xfs_buf_item_committed(bip);
600
601 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
602 return lip->li_lsn;
603 return lsn;
604}
605
606/*
607 * The buffer is locked, but is not a delayed write buffer. This happens
608 * if we race with IO completion and hence we don't want to try to write it
609 * again. Just release the buffer.
610 */
611STATIC void
612xfs_buf_item_push(
613 struct xfs_log_item *lip)
614{
615 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
616 struct xfs_buf *bp = bip->bli_buf;
617
618 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
619 ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
620
621 trace_xfs_buf_item_push(bip);
622
623 xfs_buf_relse(bp);
624}
625
626/*
627 * The buffer is locked and is a delayed write buffer. Promote the buffer
628 * in the delayed write queue as the caller knows that they must invoke
629 * the xfsbufd to get this buffer written. We have to unlock the buffer
630 * to allow the xfsbufd to write it, too.
631 */
632STATIC bool
633xfs_buf_item_pushbuf(
634 struct xfs_log_item *lip)
635{
636 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
637 struct xfs_buf *bp = bip->bli_buf;
638
639 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
640 ASSERT(XFS_BUF_ISDELAYWRITE(bp));
641
642 trace_xfs_buf_item_pushbuf(bip);
643
644 xfs_buf_delwri_promote(bp);
645 xfs_buf_relse(bp);
646 return true;
647}
648
649STATIC void
650xfs_buf_item_committing(
651 struct xfs_log_item *lip,
652 xfs_lsn_t commit_lsn)
653{
654}
655
656/*
657 * This is the ops vector shared by all buf log items.
658 */
659static struct xfs_item_ops xfs_buf_item_ops = {
660 .iop_size = xfs_buf_item_size,
661 .iop_format = xfs_buf_item_format,
662 .iop_pin = xfs_buf_item_pin,
663 .iop_unpin = xfs_buf_item_unpin,
664 .iop_trylock = xfs_buf_item_trylock,
665 .iop_unlock = xfs_buf_item_unlock,
666 .iop_committed = xfs_buf_item_committed,
667 .iop_push = xfs_buf_item_push,
668 .iop_pushbuf = xfs_buf_item_pushbuf,
669 .iop_committing = xfs_buf_item_committing
670};
671
672
673/*
674 * Allocate a new buf log item to go with the given buffer.
675 * Set the buffer's b_fsprivate field to point to the new
676 * buf log item. If there are other item's attached to the
677 * buffer (see xfs_buf_attach_iodone() below), then put the
678 * buf log item at the front.
679 */
680void
681xfs_buf_item_init(
682 xfs_buf_t *bp,
683 xfs_mount_t *mp)
684{
685 xfs_log_item_t *lip = bp->b_fspriv;
686 xfs_buf_log_item_t *bip;
687 int chunks;
688 int map_size;
689
690 /*
691 * Check to see if there is already a buf log item for
692 * this buffer. If there is, it is guaranteed to be
693 * the first. If we do already have one, there is
694 * nothing to do here so return.
695 */
696 ASSERT(bp->b_target->bt_mount == mp);
697 if (lip != NULL && lip->li_type == XFS_LI_BUF)
698 return;
699
700 /*
701 * chunks is the number of XFS_BLF_CHUNK size pieces
702 * the buffer can be divided into. Make sure not to
703 * truncate any pieces. map_size is the size of the
704 * bitmap needed to describe the chunks of the buffer.
705 */
706 chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLF_CHUNK - 1)) >> XFS_BLF_SHIFT);
707 map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
708
709 bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
710 KM_SLEEP);
711 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
712 bip->bli_buf = bp;
713 xfs_buf_hold(bp);
714 bip->bli_format.blf_type = XFS_LI_BUF;
715 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
716 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
717 bip->bli_format.blf_map_size = map_size;
718
719#ifdef XFS_TRANS_DEBUG
720 /*
721 * Allocate the arrays for tracking what needs to be logged
722 * and what our callers request to be logged. bli_orig
723 * holds a copy of the original, clean buffer for comparison
724 * against, and bli_logged keeps a 1 bit flag per byte in
725 * the buffer to indicate which bytes the callers have asked
726 * to have logged.
727 */
728 bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP);
729 memcpy(bip->bli_orig, bp->b_addr, XFS_BUF_COUNT(bp));
730 bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP);
731#endif
732
733 /*
734 * Put the buf item into the list of items attached to the
735 * buffer at the front.
736 */
737 if (bp->b_fspriv)
738 bip->bli_item.li_bio_list = bp->b_fspriv;
739 bp->b_fspriv = bip;
740}
741
742
743/*
744 * Mark bytes first through last inclusive as dirty in the buf
745 * item's bitmap.
746 */
747void
748xfs_buf_item_log(
749 xfs_buf_log_item_t *bip,
750 uint first,
751 uint last)
752{
753 uint first_bit;
754 uint last_bit;
755 uint bits_to_set;
756 uint bits_set;
757 uint word_num;
758 uint *wordp;
759 uint bit;
760 uint end_bit;
761 uint mask;
762
763 /*
764 * Mark the item as having some dirty data for
765 * quick reference in xfs_buf_item_dirty.
766 */
767 bip->bli_flags |= XFS_BLI_DIRTY;
768
769 /*
770 * Convert byte offsets to bit numbers.
771 */
772 first_bit = first >> XFS_BLF_SHIFT;
773 last_bit = last >> XFS_BLF_SHIFT;
774
775 /*
776 * Calculate the total number of bits to be set.
777 */
778 bits_to_set = last_bit - first_bit + 1;
779
780 /*
781 * Get a pointer to the first word in the bitmap
782 * to set a bit in.
783 */
784 word_num = first_bit >> BIT_TO_WORD_SHIFT;
785 wordp = &(bip->bli_format.blf_data_map[word_num]);
786
787 /*
788 * Calculate the starting bit in the first word.
789 */
790 bit = first_bit & (uint)(NBWORD - 1);
791
792 /*
793 * First set any bits in the first word of our range.
794 * If it starts at bit 0 of the word, it will be
795 * set below rather than here. That is what the variable
796 * bit tells us. The variable bits_set tracks the number
797 * of bits that have been set so far. End_bit is the number
798 * of the last bit to be set in this word plus one.
799 */
800 if (bit) {
801 end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
802 mask = ((1 << (end_bit - bit)) - 1) << bit;
803 *wordp |= mask;
804 wordp++;
805 bits_set = end_bit - bit;
806 } else {
807 bits_set = 0;
808 }
809
810 /*
811 * Now set bits a whole word at a time that are between
812 * first_bit and last_bit.
813 */
814 while ((bits_to_set - bits_set) >= NBWORD) {
815 *wordp |= 0xffffffff;
816 bits_set += NBWORD;
817 wordp++;
818 }
819
820 /*
821 * Finally, set any bits left to be set in one last partial word.
822 */
823 end_bit = bits_to_set - bits_set;
824 if (end_bit) {
825 mask = (1 << end_bit) - 1;
826 *wordp |= mask;
827 }
828
829 xfs_buf_item_log_debug(bip, first, last);
830}
831
832
833/*
834 * Return 1 if the buffer has some data that has been logged (at any
835 * point, not just the current transaction) and 0 if not.
836 */
837uint
838xfs_buf_item_dirty(
839 xfs_buf_log_item_t *bip)
840{
841 return (bip->bli_flags & XFS_BLI_DIRTY);
842}
843
844STATIC void
845xfs_buf_item_free(
846 xfs_buf_log_item_t *bip)
847{
848#ifdef XFS_TRANS_DEBUG
849 kmem_free(bip->bli_orig);
850 kmem_free(bip->bli_logged);
851#endif /* XFS_TRANS_DEBUG */
852
853 kmem_zone_free(xfs_buf_item_zone, bip);
854}
855
856/*
857 * This is called when the buf log item is no longer needed. It should
858 * free the buf log item associated with the given buffer and clear
859 * the buffer's pointer to the buf log item. If there are no more
860 * items in the list, clear the b_iodone field of the buffer (see
861 * xfs_buf_attach_iodone() below).
862 */
863void
864xfs_buf_item_relse(
865 xfs_buf_t *bp)
866{
867 xfs_buf_log_item_t *bip;
868
869 trace_xfs_buf_item_relse(bp, _RET_IP_);
870
871 bip = bp->b_fspriv;
872 bp->b_fspriv = bip->bli_item.li_bio_list;
873 if (bp->b_fspriv == NULL)
874 bp->b_iodone = NULL;
875
876 xfs_buf_rele(bp);
877 xfs_buf_item_free(bip);
878}
879
880
881/*
882 * Add the given log item with its callback to the list of callbacks
883 * to be called when the buffer's I/O completes. If it is not set
884 * already, set the buffer's b_iodone() routine to be
885 * xfs_buf_iodone_callbacks() and link the log item into the list of
886 * items rooted at b_fsprivate. Items are always added as the second
887 * entry in the list if there is a first, because the buf item code
888 * assumes that the buf log item is first.
889 */
890void
891xfs_buf_attach_iodone(
892 xfs_buf_t *bp,
893 void (*cb)(xfs_buf_t *, xfs_log_item_t *),
894 xfs_log_item_t *lip)
895{
896 xfs_log_item_t *head_lip;
897
898 ASSERT(xfs_buf_islocked(bp));
899
900 lip->li_cb = cb;
901 head_lip = bp->b_fspriv;
902 if (head_lip) {
903 lip->li_bio_list = head_lip->li_bio_list;
904 head_lip->li_bio_list = lip;
905 } else {
906 bp->b_fspriv = lip;
907 }
908
909 ASSERT(bp->b_iodone == NULL ||
910 bp->b_iodone == xfs_buf_iodone_callbacks);
911 bp->b_iodone = xfs_buf_iodone_callbacks;
912}
913
914/*
915 * We can have many callbacks on a buffer. Running the callbacks individually
916 * can cause a lot of contention on the AIL lock, so we allow for a single
917 * callback to be able to scan the remaining lip->li_bio_list for other items
918 * of the same type and callback to be processed in the first call.
919 *
920 * As a result, the loop walking the callback list below will also modify the
921 * list. it removes the first item from the list and then runs the callback.
922 * The loop then restarts from the new head of the list. This allows the
923 * callback to scan and modify the list attached to the buffer and we don't
924 * have to care about maintaining a next item pointer.
925 */
926STATIC void
927xfs_buf_do_callbacks(
928 struct xfs_buf *bp)
929{
930 struct xfs_log_item *lip;
931
932 while ((lip = bp->b_fspriv) != NULL) {
933 bp->b_fspriv = lip->li_bio_list;
934 ASSERT(lip->li_cb != NULL);
935 /*
936 * Clear the next pointer so we don't have any
937 * confusion if the item is added to another buf.
938 * Don't touch the log item after calling its
939 * callback, because it could have freed itself.
940 */
941 lip->li_bio_list = NULL;
942 lip->li_cb(bp, lip);
943 }
944}
945
946/*
947 * This is the iodone() function for buffers which have had callbacks
948 * attached to them by xfs_buf_attach_iodone(). It should remove each
949 * log item from the buffer's list and call the callback of each in turn.
950 * When done, the buffer's fsprivate field is set to NULL and the buffer
951 * is unlocked with a call to iodone().
952 */
953void
954xfs_buf_iodone_callbacks(
955 struct xfs_buf *bp)
956{
957 struct xfs_log_item *lip = bp->b_fspriv;
958 struct xfs_mount *mp = lip->li_mountp;
959 static ulong lasttime;
960 static xfs_buftarg_t *lasttarg;
961
962 if (likely(!xfs_buf_geterror(bp)))
963 goto do_callbacks;
964
965 /*
966 * If we've already decided to shutdown the filesystem because of
967 * I/O errors, there's no point in giving this a retry.
968 */
969 if (XFS_FORCED_SHUTDOWN(mp)) {
970 XFS_BUF_SUPER_STALE(bp);
971 trace_xfs_buf_item_iodone(bp, _RET_IP_);
972 goto do_callbacks;
973 }
974
975 if (bp->b_target != lasttarg ||
976 time_after(jiffies, (lasttime + 5*HZ))) {
977 lasttime = jiffies;
978 xfs_alert(mp, "Device %s: metadata write error block 0x%llx",
979 xfs_buf_target_name(bp->b_target),
980 (__uint64_t)XFS_BUF_ADDR(bp));
981 }
982 lasttarg = bp->b_target;
983
984 /*
985 * If the write was asynchronous then no one will be looking for the
986 * error. Clear the error state and write the buffer out again.
987 *
988 * During sync or umount we'll write all pending buffers again
989 * synchronous, which will catch these errors if they keep hanging
990 * around.
991 */
992 if (XFS_BUF_ISASYNC(bp)) {
993 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
994
995 if (!XFS_BUF_ISSTALE(bp)) {
996 XFS_BUF_DELAYWRITE(bp);
997 XFS_BUF_DONE(bp);
998 }
999 ASSERT(bp->b_iodone != NULL);
1000 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1001 xfs_buf_relse(bp);
1002 return;
1003 }
1004
1005 /*
1006 * If the write of the buffer was synchronous, we want to make
1007 * sure to return the error to the caller of xfs_bwrite().
1008 */
1009 XFS_BUF_STALE(bp);
1010 XFS_BUF_DONE(bp);
1011 XFS_BUF_UNDELAYWRITE(bp);
1012
1013 trace_xfs_buf_error_relse(bp, _RET_IP_);
1014
1015do_callbacks:
1016 xfs_buf_do_callbacks(bp);
1017 bp->b_fspriv = NULL;
1018 bp->b_iodone = NULL;
1019 xfs_buf_ioend(bp, 0);
1020}
1021
1022/*
1023 * This is the iodone() function for buffers which have been
1024 * logged. It is called when they are eventually flushed out.
1025 * It should remove the buf item from the AIL, and free the buf item.
1026 * It is called by xfs_buf_iodone_callbacks() above which will take
1027 * care of cleaning up the buffer itself.
1028 */
1029void
1030xfs_buf_iodone(
1031 struct xfs_buf *bp,
1032 struct xfs_log_item *lip)
1033{
1034 struct xfs_ail *ailp = lip->li_ailp;
1035
1036 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1037
1038 xfs_buf_rele(bp);
1039
1040 /*
1041 * If we are forcibly shutting down, this may well be
1042 * off the AIL already. That's because we simulate the
1043 * log-committed callbacks to unpin these buffers. Or we may never
1044 * have put this item on AIL because of the transaction was
1045 * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1046 *
1047 * Either way, AIL is useless if we're forcing a shutdown.
1048 */
1049 spin_lock(&ailp->xa_lock);
1050 xfs_trans_ail_delete(ailp, lip);
1051 xfs_buf_item_free(BUF_ITEM(lip));
1052}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_buf_item.h"
17#include "xfs_inode.h"
18#include "xfs_inode_item.h"
19#include "xfs_quota.h"
20#include "xfs_dquot_item.h"
21#include "xfs_dquot.h"
22#include "xfs_trace.h"
23#include "xfs_log.h"
24#include "xfs_log_priv.h"
25#include "xfs_error.h"
26
27
28struct kmem_cache *xfs_buf_item_cache;
29
30static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
31{
32 return container_of(lip, struct xfs_buf_log_item, bli_item);
33}
34
35/* Is this log iovec plausibly large enough to contain the buffer log format? */
36bool
37xfs_buf_log_check_iovec(
38 struct xfs_log_iovec *iovec)
39{
40 struct xfs_buf_log_format *blfp = iovec->i_addr;
41 char *bmp_end;
42 char *item_end;
43
44 if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
45 return false;
46
47 item_end = (char *)iovec->i_addr + iovec->i_len;
48 bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
49 return bmp_end <= item_end;
50}
51
52static inline int
53xfs_buf_log_format_size(
54 struct xfs_buf_log_format *blfp)
55{
56 return offsetof(struct xfs_buf_log_format, blf_data_map) +
57 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
58}
59
60static inline bool
61xfs_buf_item_straddle(
62 struct xfs_buf *bp,
63 uint offset,
64 int first_bit,
65 int nbits)
66{
67 void *first, *last;
68
69 first = xfs_buf_offset(bp, offset + (first_bit << XFS_BLF_SHIFT));
70 last = xfs_buf_offset(bp,
71 offset + ((first_bit + nbits) << XFS_BLF_SHIFT));
72
73 if (last - first != nbits * XFS_BLF_CHUNK)
74 return true;
75 return false;
76}
77
78/*
79 * Return the number of log iovecs and space needed to log the given buf log
80 * item segment.
81 *
82 * It calculates this as 1 iovec for the buf log format structure and 1 for each
83 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
84 * in a single iovec.
85 */
86STATIC void
87xfs_buf_item_size_segment(
88 struct xfs_buf_log_item *bip,
89 struct xfs_buf_log_format *blfp,
90 uint offset,
91 int *nvecs,
92 int *nbytes)
93{
94 struct xfs_buf *bp = bip->bli_buf;
95 int first_bit;
96 int nbits;
97 int next_bit;
98 int last_bit;
99
100 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
101 if (first_bit == -1)
102 return;
103
104 (*nvecs)++;
105 *nbytes += xfs_buf_log_format_size(blfp);
106
107 do {
108 nbits = xfs_contig_bits(blfp->blf_data_map,
109 blfp->blf_map_size, first_bit);
110 ASSERT(nbits > 0);
111
112 /*
113 * Straddling a page is rare because we don't log contiguous
114 * chunks of unmapped buffers anywhere.
115 */
116 if (nbits > 1 &&
117 xfs_buf_item_straddle(bp, offset, first_bit, nbits))
118 goto slow_scan;
119
120 (*nvecs)++;
121 *nbytes += nbits * XFS_BLF_CHUNK;
122
123 /*
124 * This takes the bit number to start looking from and
125 * returns the next set bit from there. It returns -1
126 * if there are no more bits set or the start bit is
127 * beyond the end of the bitmap.
128 */
129 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
130 (uint)first_bit + nbits + 1);
131 } while (first_bit != -1);
132
133 return;
134
135slow_scan:
136 /* Count the first bit we jumped out of the above loop from */
137 (*nvecs)++;
138 *nbytes += XFS_BLF_CHUNK;
139 last_bit = first_bit;
140 while (last_bit != -1) {
141 /*
142 * This takes the bit number to start looking from and
143 * returns the next set bit from there. It returns -1
144 * if there are no more bits set or the start bit is
145 * beyond the end of the bitmap.
146 */
147 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
148 last_bit + 1);
149 /*
150 * If we run out of bits, leave the loop,
151 * else if we find a new set of bits bump the number of vecs,
152 * else keep scanning the current set of bits.
153 */
154 if (next_bit == -1) {
155 break;
156 } else if (next_bit != last_bit + 1 ||
157 xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
158 last_bit = next_bit;
159 first_bit = next_bit;
160 (*nvecs)++;
161 nbits = 1;
162 } else {
163 last_bit++;
164 nbits++;
165 }
166 *nbytes += XFS_BLF_CHUNK;
167 }
168}
169
170/*
171 * Return the number of log iovecs and space needed to log the given buf log
172 * item.
173 *
174 * Discontiguous buffers need a format structure per region that is being
175 * logged. This makes the changes in the buffer appear to log recovery as though
176 * they came from separate buffers, just like would occur if multiple buffers
177 * were used instead of a single discontiguous buffer. This enables
178 * discontiguous buffers to be in-memory constructs, completely transparent to
179 * what ends up on disk.
180 *
181 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
182 * format structures. If the item has previously been logged and has dirty
183 * regions, we do not relog them in stale buffers. This has the effect of
184 * reducing the size of the relogged item by the amount of dirty data tracked
185 * by the log item. This can result in the committing transaction reducing the
186 * amount of space being consumed by the CIL.
187 */
188STATIC void
189xfs_buf_item_size(
190 struct xfs_log_item *lip,
191 int *nvecs,
192 int *nbytes)
193{
194 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
195 struct xfs_buf *bp = bip->bli_buf;
196 int i;
197 int bytes;
198 uint offset = 0;
199
200 ASSERT(atomic_read(&bip->bli_refcount) > 0);
201 if (bip->bli_flags & XFS_BLI_STALE) {
202 /*
203 * The buffer is stale, so all we need to log is the buf log
204 * format structure with the cancel flag in it as we are never
205 * going to replay the changes tracked in the log item.
206 */
207 trace_xfs_buf_item_size_stale(bip);
208 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
209 *nvecs += bip->bli_format_count;
210 for (i = 0; i < bip->bli_format_count; i++) {
211 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
212 }
213 return;
214 }
215
216 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
217
218 if (bip->bli_flags & XFS_BLI_ORDERED) {
219 /*
220 * The buffer has been logged just to order it. It is not being
221 * included in the transaction commit, so no vectors are used at
222 * all.
223 */
224 trace_xfs_buf_item_size_ordered(bip);
225 *nvecs = XFS_LOG_VEC_ORDERED;
226 return;
227 }
228
229 /*
230 * The vector count is based on the number of buffer vectors we have
231 * dirty bits in. This will only be greater than one when we have a
232 * compound buffer with more than one segment dirty. Hence for compound
233 * buffers we need to track which segment the dirty bits correspond to,
234 * and when we move from one segment to the next increment the vector
235 * count for the extra buf log format structure that will need to be
236 * written.
237 */
238 bytes = 0;
239 for (i = 0; i < bip->bli_format_count; i++) {
240 xfs_buf_item_size_segment(bip, &bip->bli_formats[i], offset,
241 nvecs, &bytes);
242 offset += BBTOB(bp->b_maps[i].bm_len);
243 }
244
245 /*
246 * Round up the buffer size required to minimise the number of memory
247 * allocations that need to be done as this item grows when relogged by
248 * repeated modifications.
249 */
250 *nbytes = round_up(bytes, 512);
251 trace_xfs_buf_item_size(bip);
252}
253
254static inline void
255xfs_buf_item_copy_iovec(
256 struct xfs_log_vec *lv,
257 struct xfs_log_iovec **vecp,
258 struct xfs_buf *bp,
259 uint offset,
260 int first_bit,
261 uint nbits)
262{
263 offset += first_bit * XFS_BLF_CHUNK;
264 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
265 xfs_buf_offset(bp, offset),
266 nbits * XFS_BLF_CHUNK);
267}
268
269static void
270xfs_buf_item_format_segment(
271 struct xfs_buf_log_item *bip,
272 struct xfs_log_vec *lv,
273 struct xfs_log_iovec **vecp,
274 uint offset,
275 struct xfs_buf_log_format *blfp)
276{
277 struct xfs_buf *bp = bip->bli_buf;
278 uint base_size;
279 int first_bit;
280 int last_bit;
281 int next_bit;
282 uint nbits;
283
284 /* copy the flags across from the base format item */
285 blfp->blf_flags = bip->__bli_format.blf_flags;
286
287 /*
288 * Base size is the actual size of the ondisk structure - it reflects
289 * the actual size of the dirty bitmap rather than the size of the in
290 * memory structure.
291 */
292 base_size = xfs_buf_log_format_size(blfp);
293
294 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
295 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
296 /*
297 * If the map is not be dirty in the transaction, mark
298 * the size as zero and do not advance the vector pointer.
299 */
300 return;
301 }
302
303 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
304 blfp->blf_size = 1;
305
306 if (bip->bli_flags & XFS_BLI_STALE) {
307 /*
308 * The buffer is stale, so all we need to log
309 * is the buf log format structure with the
310 * cancel flag in it.
311 */
312 trace_xfs_buf_item_format_stale(bip);
313 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
314 return;
315 }
316
317
318 /*
319 * Fill in an iovec for each set of contiguous chunks.
320 */
321 do {
322 ASSERT(first_bit >= 0);
323 nbits = xfs_contig_bits(blfp->blf_data_map,
324 blfp->blf_map_size, first_bit);
325 ASSERT(nbits > 0);
326
327 /*
328 * Straddling a page is rare because we don't log contiguous
329 * chunks of unmapped buffers anywhere.
330 */
331 if (nbits > 1 &&
332 xfs_buf_item_straddle(bp, offset, first_bit, nbits))
333 goto slow_scan;
334
335 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
336 first_bit, nbits);
337 blfp->blf_size++;
338
339 /*
340 * This takes the bit number to start looking from and
341 * returns the next set bit from there. It returns -1
342 * if there are no more bits set or the start bit is
343 * beyond the end of the bitmap.
344 */
345 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
346 (uint)first_bit + nbits + 1);
347 } while (first_bit != -1);
348
349 return;
350
351slow_scan:
352 ASSERT(bp->b_addr == NULL);
353 last_bit = first_bit;
354 nbits = 1;
355 for (;;) {
356 /*
357 * This takes the bit number to start looking from and
358 * returns the next set bit from there. It returns -1
359 * if there are no more bits set or the start bit is
360 * beyond the end of the bitmap.
361 */
362 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
363 (uint)last_bit + 1);
364 /*
365 * If we run out of bits fill in the last iovec and get out of
366 * the loop. Else if we start a new set of bits then fill in
367 * the iovec for the series we were looking at and start
368 * counting the bits in the new one. Else we're still in the
369 * same set of bits so just keep counting and scanning.
370 */
371 if (next_bit == -1) {
372 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
373 first_bit, nbits);
374 blfp->blf_size++;
375 break;
376 } else if (next_bit != last_bit + 1 ||
377 xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
378 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
379 first_bit, nbits);
380 blfp->blf_size++;
381 first_bit = next_bit;
382 last_bit = next_bit;
383 nbits = 1;
384 } else {
385 last_bit++;
386 nbits++;
387 }
388 }
389}
390
391/*
392 * This is called to fill in the vector of log iovecs for the
393 * given log buf item. It fills the first entry with a buf log
394 * format structure, and the rest point to contiguous chunks
395 * within the buffer.
396 */
397STATIC void
398xfs_buf_item_format(
399 struct xfs_log_item *lip,
400 struct xfs_log_vec *lv)
401{
402 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
403 struct xfs_buf *bp = bip->bli_buf;
404 struct xfs_log_iovec *vecp = NULL;
405 uint offset = 0;
406 int i;
407
408 ASSERT(atomic_read(&bip->bli_refcount) > 0);
409 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
410 (bip->bli_flags & XFS_BLI_STALE));
411 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
412 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
413 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
414 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
415 (bip->bli_flags & XFS_BLI_STALE));
416
417
418 /*
419 * If it is an inode buffer, transfer the in-memory state to the
420 * format flags and clear the in-memory state.
421 *
422 * For buffer based inode allocation, we do not transfer
423 * this state if the inode buffer allocation has not yet been committed
424 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
425 * correct replay of the inode allocation.
426 *
427 * For icreate item based inode allocation, the buffers aren't written
428 * to the journal during allocation, and hence we should always tag the
429 * buffer as an inode buffer so that the correct unlinked list replay
430 * occurs during recovery.
431 */
432 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
433 if (xfs_has_v3inodes(lip->li_log->l_mp) ||
434 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
435 xfs_log_item_in_current_chkpt(lip)))
436 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
437 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
438 }
439
440 for (i = 0; i < bip->bli_format_count; i++) {
441 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
442 &bip->bli_formats[i]);
443 offset += BBTOB(bp->b_maps[i].bm_len);
444 }
445
446 /*
447 * Check to make sure everything is consistent.
448 */
449 trace_xfs_buf_item_format(bip);
450}
451
452/*
453 * This is called to pin the buffer associated with the buf log item in memory
454 * so it cannot be written out.
455 *
456 * We take a reference to the buffer log item here so that the BLI life cycle
457 * extends at least until the buffer is unpinned via xfs_buf_item_unpin() and
458 * inserted into the AIL.
459 *
460 * We also need to take a reference to the buffer itself as the BLI unpin
461 * processing requires accessing the buffer after the BLI has dropped the final
462 * BLI reference. See xfs_buf_item_unpin() for an explanation.
463 * If unpins race to drop the final BLI reference and only the
464 * BLI owns a reference to the buffer, then the loser of the race can have the
465 * buffer fgreed from under it (e.g. on shutdown). Taking a buffer reference per
466 * pin count ensures the life cycle of the buffer extends for as
467 * long as we hold the buffer pin reference in xfs_buf_item_unpin().
468 */
469STATIC void
470xfs_buf_item_pin(
471 struct xfs_log_item *lip)
472{
473 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
474
475 ASSERT(atomic_read(&bip->bli_refcount) > 0);
476 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
477 (bip->bli_flags & XFS_BLI_ORDERED) ||
478 (bip->bli_flags & XFS_BLI_STALE));
479
480 trace_xfs_buf_item_pin(bip);
481
482 xfs_buf_hold(bip->bli_buf);
483 atomic_inc(&bip->bli_refcount);
484 atomic_inc(&bip->bli_buf->b_pin_count);
485}
486
487/*
488 * This is called to unpin the buffer associated with the buf log item which was
489 * previously pinned with a call to xfs_buf_item_pin(). We enter this function
490 * with a buffer pin count, a buffer reference and a BLI reference.
491 *
492 * We must drop the BLI reference before we unpin the buffer because the AIL
493 * doesn't acquire a BLI reference whenever it accesses it. Therefore if the
494 * refcount drops to zero, the bli could still be AIL resident and the buffer
495 * submitted for I/O at any point before we return. This can result in IO
496 * completion freeing the buffer while we are still trying to access it here.
497 * This race condition can also occur in shutdown situations where we abort and
498 * unpin buffers from contexts other that journal IO completion.
499 *
500 * Hence we have to hold a buffer reference per pin count to ensure that the
501 * buffer cannot be freed until we have finished processing the unpin operation.
502 * The reference is taken in xfs_buf_item_pin(), and we must hold it until we
503 * are done processing the buffer state. In the case of an abort (remove =
504 * true) then we re-use the current pin reference as the IO reference we hand
505 * off to IO failure handling.
506 */
507STATIC void
508xfs_buf_item_unpin(
509 struct xfs_log_item *lip,
510 int remove)
511{
512 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
513 struct xfs_buf *bp = bip->bli_buf;
514 int stale = bip->bli_flags & XFS_BLI_STALE;
515 int freed;
516
517 ASSERT(bp->b_log_item == bip);
518 ASSERT(atomic_read(&bip->bli_refcount) > 0);
519
520 trace_xfs_buf_item_unpin(bip);
521
522 freed = atomic_dec_and_test(&bip->bli_refcount);
523 if (atomic_dec_and_test(&bp->b_pin_count))
524 wake_up_all(&bp->b_waiters);
525
526 /*
527 * Nothing to do but drop the buffer pin reference if the BLI is
528 * still active.
529 */
530 if (!freed) {
531 xfs_buf_rele(bp);
532 return;
533 }
534
535 if (stale) {
536 ASSERT(bip->bli_flags & XFS_BLI_STALE);
537 ASSERT(xfs_buf_islocked(bp));
538 ASSERT(bp->b_flags & XBF_STALE);
539 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
540 ASSERT(list_empty(&lip->li_trans));
541 ASSERT(!bp->b_transp);
542
543 trace_xfs_buf_item_unpin_stale(bip);
544
545 /*
546 * The buffer has been locked and referenced since it was marked
547 * stale so we own both lock and reference exclusively here. We
548 * do not need the pin reference any more, so drop it now so
549 * that we only have one reference to drop once item completion
550 * processing is complete.
551 */
552 xfs_buf_rele(bp);
553
554 /*
555 * If we get called here because of an IO error, we may or may
556 * not have the item on the AIL. xfs_trans_ail_delete() will
557 * take care of that situation. xfs_trans_ail_delete() drops
558 * the AIL lock.
559 */
560 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
561 xfs_buf_item_done(bp);
562 xfs_buf_inode_iodone(bp);
563 ASSERT(list_empty(&bp->b_li_list));
564 } else {
565 xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
566 xfs_buf_item_relse(bp);
567 ASSERT(bp->b_log_item == NULL);
568 }
569 xfs_buf_relse(bp);
570 return;
571 }
572
573 if (remove) {
574 /*
575 * We need to simulate an async IO failures here to ensure that
576 * the correct error completion is run on this buffer. This
577 * requires a reference to the buffer and for the buffer to be
578 * locked. We can safely pass ownership of the pin reference to
579 * the IO to ensure that nothing can free the buffer while we
580 * wait for the lock and then run the IO failure completion.
581 */
582 xfs_buf_lock(bp);
583 bp->b_flags |= XBF_ASYNC;
584 xfs_buf_ioend_fail(bp);
585 return;
586 }
587
588 /*
589 * BLI has no more active references - it will be moved to the AIL to
590 * manage the remaining BLI/buffer life cycle. There is nothing left for
591 * us to do here so drop the pin reference to the buffer.
592 */
593 xfs_buf_rele(bp);
594}
595
596STATIC uint
597xfs_buf_item_push(
598 struct xfs_log_item *lip,
599 struct list_head *buffer_list)
600{
601 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
602 struct xfs_buf *bp = bip->bli_buf;
603 uint rval = XFS_ITEM_SUCCESS;
604
605 if (xfs_buf_ispinned(bp))
606 return XFS_ITEM_PINNED;
607 if (!xfs_buf_trylock(bp)) {
608 /*
609 * If we have just raced with a buffer being pinned and it has
610 * been marked stale, we could end up stalling until someone else
611 * issues a log force to unpin the stale buffer. Check for the
612 * race condition here so xfsaild recognizes the buffer is pinned
613 * and queues a log force to move it along.
614 */
615 if (xfs_buf_ispinned(bp))
616 return XFS_ITEM_PINNED;
617 return XFS_ITEM_LOCKED;
618 }
619
620 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
621
622 trace_xfs_buf_item_push(bip);
623
624 /* has a previous flush failed due to IO errors? */
625 if (bp->b_flags & XBF_WRITE_FAIL) {
626 xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
627 "Failing async write on buffer block 0x%llx. Retrying async write.",
628 (long long)xfs_buf_daddr(bp));
629 }
630
631 if (!xfs_buf_delwri_queue(bp, buffer_list))
632 rval = XFS_ITEM_FLUSHING;
633 xfs_buf_unlock(bp);
634 return rval;
635}
636
637/*
638 * Drop the buffer log item refcount and take appropriate action. This helper
639 * determines whether the bli must be freed or not, since a decrement to zero
640 * does not necessarily mean the bli is unused.
641 *
642 * Return true if the bli is freed, false otherwise.
643 */
644bool
645xfs_buf_item_put(
646 struct xfs_buf_log_item *bip)
647{
648 struct xfs_log_item *lip = &bip->bli_item;
649 bool aborted;
650 bool dirty;
651
652 /* drop the bli ref and return if it wasn't the last one */
653 if (!atomic_dec_and_test(&bip->bli_refcount))
654 return false;
655
656 /*
657 * We dropped the last ref and must free the item if clean or aborted.
658 * If the bli is dirty and non-aborted, the buffer was clean in the
659 * transaction but still awaiting writeback from previous changes. In
660 * that case, the bli is freed on buffer writeback completion.
661 */
662 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
663 xlog_is_shutdown(lip->li_log);
664 dirty = bip->bli_flags & XFS_BLI_DIRTY;
665 if (dirty && !aborted)
666 return false;
667
668 /*
669 * The bli is aborted or clean. An aborted item may be in the AIL
670 * regardless of dirty state. For example, consider an aborted
671 * transaction that invalidated a dirty bli and cleared the dirty
672 * state.
673 */
674 if (aborted)
675 xfs_trans_ail_delete(lip, 0);
676 xfs_buf_item_relse(bip->bli_buf);
677 return true;
678}
679
680/*
681 * Release the buffer associated with the buf log item. If there is no dirty
682 * logged data associated with the buffer recorded in the buf log item, then
683 * free the buf log item and remove the reference to it in the buffer.
684 *
685 * This call ignores the recursion count. It is only called when the buffer
686 * should REALLY be unlocked, regardless of the recursion count.
687 *
688 * We unconditionally drop the transaction's reference to the log item. If the
689 * item was logged, then another reference was taken when it was pinned, so we
690 * can safely drop the transaction reference now. This also allows us to avoid
691 * potential races with the unpin code freeing the bli by not referencing the
692 * bli after we've dropped the reference count.
693 *
694 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
695 * if necessary but do not unlock the buffer. This is for support of
696 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
697 * free the item.
698 */
699STATIC void
700xfs_buf_item_release(
701 struct xfs_log_item *lip)
702{
703 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
704 struct xfs_buf *bp = bip->bli_buf;
705 bool released;
706 bool hold = bip->bli_flags & XFS_BLI_HOLD;
707 bool stale = bip->bli_flags & XFS_BLI_STALE;
708#if defined(DEBUG) || defined(XFS_WARN)
709 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
710 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
711 bool aborted = test_bit(XFS_LI_ABORTED,
712 &lip->li_flags);
713#endif
714
715 trace_xfs_buf_item_release(bip);
716
717 /*
718 * The bli dirty state should match whether the blf has logged segments
719 * except for ordered buffers, where only the bli should be dirty.
720 */
721 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
722 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
723 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
724
725 /*
726 * Clear the buffer's association with this transaction and
727 * per-transaction state from the bli, which has been copied above.
728 */
729 bp->b_transp = NULL;
730 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
731
732 /*
733 * Unref the item and unlock the buffer unless held or stale. Stale
734 * buffers remain locked until final unpin unless the bli is freed by
735 * the unref call. The latter implies shutdown because buffer
736 * invalidation dirties the bli and transaction.
737 */
738 released = xfs_buf_item_put(bip);
739 if (hold || (stale && !released))
740 return;
741 ASSERT(!stale || aborted);
742 xfs_buf_relse(bp);
743}
744
745STATIC void
746xfs_buf_item_committing(
747 struct xfs_log_item *lip,
748 xfs_csn_t seq)
749{
750 return xfs_buf_item_release(lip);
751}
752
753/*
754 * This is called to find out where the oldest active copy of the
755 * buf log item in the on disk log resides now that the last log
756 * write of it completed at the given lsn.
757 * We always re-log all the dirty data in a buffer, so usually the
758 * latest copy in the on disk log is the only one that matters. For
759 * those cases we simply return the given lsn.
760 *
761 * The one exception to this is for buffers full of newly allocated
762 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
763 * flag set, indicating that only the di_next_unlinked fields from the
764 * inodes in the buffers will be replayed during recovery. If the
765 * original newly allocated inode images have not yet been flushed
766 * when the buffer is so relogged, then we need to make sure that we
767 * keep the old images in the 'active' portion of the log. We do this
768 * by returning the original lsn of that transaction here rather than
769 * the current one.
770 */
771STATIC xfs_lsn_t
772xfs_buf_item_committed(
773 struct xfs_log_item *lip,
774 xfs_lsn_t lsn)
775{
776 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
777
778 trace_xfs_buf_item_committed(bip);
779
780 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
781 return lip->li_lsn;
782 return lsn;
783}
784
785#ifdef DEBUG_EXPENSIVE
786static int
787xfs_buf_item_precommit(
788 struct xfs_trans *tp,
789 struct xfs_log_item *lip)
790{
791 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
792 struct xfs_buf *bp = bip->bli_buf;
793 struct xfs_mount *mp = bp->b_mount;
794 xfs_failaddr_t fa;
795
796 if (!bp->b_ops || !bp->b_ops->verify_struct)
797 return 0;
798 if (bip->bli_flags & XFS_BLI_STALE)
799 return 0;
800
801 fa = bp->b_ops->verify_struct(bp);
802 if (fa) {
803 xfs_buf_verifier_error(bp, -EFSCORRUPTED, bp->b_ops->name,
804 bp->b_addr, BBTOB(bp->b_length), fa);
805 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
806 ASSERT(fa == NULL);
807 }
808
809 return 0;
810}
811#else
812# define xfs_buf_item_precommit NULL
813#endif
814
815static const struct xfs_item_ops xfs_buf_item_ops = {
816 .iop_size = xfs_buf_item_size,
817 .iop_precommit = xfs_buf_item_precommit,
818 .iop_format = xfs_buf_item_format,
819 .iop_pin = xfs_buf_item_pin,
820 .iop_unpin = xfs_buf_item_unpin,
821 .iop_release = xfs_buf_item_release,
822 .iop_committing = xfs_buf_item_committing,
823 .iop_committed = xfs_buf_item_committed,
824 .iop_push = xfs_buf_item_push,
825};
826
827STATIC void
828xfs_buf_item_get_format(
829 struct xfs_buf_log_item *bip,
830 int count)
831{
832 ASSERT(bip->bli_formats == NULL);
833 bip->bli_format_count = count;
834
835 if (count == 1) {
836 bip->bli_formats = &bip->__bli_format;
837 return;
838 }
839
840 bip->bli_formats = kzalloc(count * sizeof(struct xfs_buf_log_format),
841 GFP_KERNEL | __GFP_NOFAIL);
842}
843
844STATIC void
845xfs_buf_item_free_format(
846 struct xfs_buf_log_item *bip)
847{
848 if (bip->bli_formats != &bip->__bli_format) {
849 kfree(bip->bli_formats);
850 bip->bli_formats = NULL;
851 }
852}
853
854/*
855 * Allocate a new buf log item to go with the given buffer.
856 * Set the buffer's b_log_item field to point to the new
857 * buf log item.
858 */
859int
860xfs_buf_item_init(
861 struct xfs_buf *bp,
862 struct xfs_mount *mp)
863{
864 struct xfs_buf_log_item *bip = bp->b_log_item;
865 int chunks;
866 int map_size;
867 int i;
868
869 /*
870 * Check to see if there is already a buf log item for
871 * this buffer. If we do already have one, there is
872 * nothing to do here so return.
873 */
874 ASSERT(bp->b_mount == mp);
875 if (bip) {
876 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
877 ASSERT(!bp->b_transp);
878 ASSERT(bip->bli_buf == bp);
879 return 0;
880 }
881
882 bip = kmem_cache_zalloc(xfs_buf_item_cache, GFP_KERNEL | __GFP_NOFAIL);
883 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
884 bip->bli_buf = bp;
885
886 /*
887 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
888 * can be divided into. Make sure not to truncate any pieces.
889 * map_size is the size of the bitmap needed to describe the
890 * chunks of the buffer.
891 *
892 * Discontiguous buffer support follows the layout of the underlying
893 * buffer. This makes the implementation as simple as possible.
894 */
895 xfs_buf_item_get_format(bip, bp->b_map_count);
896
897 for (i = 0; i < bip->bli_format_count; i++) {
898 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
899 XFS_BLF_CHUNK);
900 map_size = DIV_ROUND_UP(chunks, NBWORD);
901
902 if (map_size > XFS_BLF_DATAMAP_SIZE) {
903 kmem_cache_free(xfs_buf_item_cache, bip);
904 xfs_err(mp,
905 "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
906 map_size,
907 BBTOB(bp->b_maps[i].bm_len));
908 return -EFSCORRUPTED;
909 }
910
911 bip->bli_formats[i].blf_type = XFS_LI_BUF;
912 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
913 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
914 bip->bli_formats[i].blf_map_size = map_size;
915 }
916
917 bp->b_log_item = bip;
918 xfs_buf_hold(bp);
919 return 0;
920}
921
922
923/*
924 * Mark bytes first through last inclusive as dirty in the buf
925 * item's bitmap.
926 */
927static void
928xfs_buf_item_log_segment(
929 uint first,
930 uint last,
931 uint *map)
932{
933 uint first_bit;
934 uint last_bit;
935 uint bits_to_set;
936 uint bits_set;
937 uint word_num;
938 uint *wordp;
939 uint bit;
940 uint end_bit;
941 uint mask;
942
943 ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
944 ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
945
946 /*
947 * Convert byte offsets to bit numbers.
948 */
949 first_bit = first >> XFS_BLF_SHIFT;
950 last_bit = last >> XFS_BLF_SHIFT;
951
952 /*
953 * Calculate the total number of bits to be set.
954 */
955 bits_to_set = last_bit - first_bit + 1;
956
957 /*
958 * Get a pointer to the first word in the bitmap
959 * to set a bit in.
960 */
961 word_num = first_bit >> BIT_TO_WORD_SHIFT;
962 wordp = &map[word_num];
963
964 /*
965 * Calculate the starting bit in the first word.
966 */
967 bit = first_bit & (uint)(NBWORD - 1);
968
969 /*
970 * First set any bits in the first word of our range.
971 * If it starts at bit 0 of the word, it will be
972 * set below rather than here. That is what the variable
973 * bit tells us. The variable bits_set tracks the number
974 * of bits that have been set so far. End_bit is the number
975 * of the last bit to be set in this word plus one.
976 */
977 if (bit) {
978 end_bit = min(bit + bits_to_set, (uint)NBWORD);
979 mask = ((1U << (end_bit - bit)) - 1) << bit;
980 *wordp |= mask;
981 wordp++;
982 bits_set = end_bit - bit;
983 } else {
984 bits_set = 0;
985 }
986
987 /*
988 * Now set bits a whole word at a time that are between
989 * first_bit and last_bit.
990 */
991 while ((bits_to_set - bits_set) >= NBWORD) {
992 *wordp = 0xffffffff;
993 bits_set += NBWORD;
994 wordp++;
995 }
996
997 /*
998 * Finally, set any bits left to be set in one last partial word.
999 */
1000 end_bit = bits_to_set - bits_set;
1001 if (end_bit) {
1002 mask = (1U << end_bit) - 1;
1003 *wordp |= mask;
1004 }
1005}
1006
1007/*
1008 * Mark bytes first through last inclusive as dirty in the buf
1009 * item's bitmap.
1010 */
1011void
1012xfs_buf_item_log(
1013 struct xfs_buf_log_item *bip,
1014 uint first,
1015 uint last)
1016{
1017 int i;
1018 uint start;
1019 uint end;
1020 struct xfs_buf *bp = bip->bli_buf;
1021
1022 /*
1023 * walk each buffer segment and mark them dirty appropriately.
1024 */
1025 start = 0;
1026 for (i = 0; i < bip->bli_format_count; i++) {
1027 if (start > last)
1028 break;
1029 end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
1030
1031 /* skip to the map that includes the first byte to log */
1032 if (first > end) {
1033 start += BBTOB(bp->b_maps[i].bm_len);
1034 continue;
1035 }
1036
1037 /*
1038 * Trim the range to this segment and mark it in the bitmap.
1039 * Note that we must convert buffer offsets to segment relative
1040 * offsets (e.g., the first byte of each segment is byte 0 of
1041 * that segment).
1042 */
1043 if (first < start)
1044 first = start;
1045 if (end > last)
1046 end = last;
1047 xfs_buf_item_log_segment(first - start, end - start,
1048 &bip->bli_formats[i].blf_data_map[0]);
1049
1050 start += BBTOB(bp->b_maps[i].bm_len);
1051 }
1052}
1053
1054
1055/*
1056 * Return true if the buffer has any ranges logged/dirtied by a transaction,
1057 * false otherwise.
1058 */
1059bool
1060xfs_buf_item_dirty_format(
1061 struct xfs_buf_log_item *bip)
1062{
1063 int i;
1064
1065 for (i = 0; i < bip->bli_format_count; i++) {
1066 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
1067 bip->bli_formats[i].blf_map_size))
1068 return true;
1069 }
1070
1071 return false;
1072}
1073
1074STATIC void
1075xfs_buf_item_free(
1076 struct xfs_buf_log_item *bip)
1077{
1078 xfs_buf_item_free_format(bip);
1079 kvfree(bip->bli_item.li_lv_shadow);
1080 kmem_cache_free(xfs_buf_item_cache, bip);
1081}
1082
1083/*
1084 * xfs_buf_item_relse() is called when the buf log item is no longer needed.
1085 */
1086void
1087xfs_buf_item_relse(
1088 struct xfs_buf *bp)
1089{
1090 struct xfs_buf_log_item *bip = bp->b_log_item;
1091
1092 trace_xfs_buf_item_relse(bp, _RET_IP_);
1093 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
1094
1095 if (atomic_read(&bip->bli_refcount))
1096 return;
1097 bp->b_log_item = NULL;
1098 xfs_buf_rele(bp);
1099 xfs_buf_item_free(bip);
1100}
1101
1102void
1103xfs_buf_item_done(
1104 struct xfs_buf *bp)
1105{
1106 /*
1107 * If we are forcibly shutting down, this may well be off the AIL
1108 * already. That's because we simulate the log-committed callbacks to
1109 * unpin these buffers. Or we may never have put this item on AIL
1110 * because of the transaction was aborted forcibly.
1111 * xfs_trans_ail_delete() takes care of these.
1112 *
1113 * Either way, AIL is useless if we're forcing a shutdown.
1114 *
1115 * Note that log recovery writes might have buffer items that are not on
1116 * the AIL even when the file system is not shut down.
1117 */
1118 xfs_trans_ail_delete(&bp->b_log_item->bli_item,
1119 (bp->b_flags & _XBF_LOGRECOVERY) ? 0 :
1120 SHUTDOWN_CORRUPT_INCORE);
1121 xfs_buf_item_relse(bp);
1122}