Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_bit.h"
12#include "xfs_shared.h"
13#include "xfs_mount.h"
14#include "xfs_defer.h"
15#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
17#include "xfs_rmap_item.h"
18#include "xfs_log.h"
19#include "xfs_rmap.h"
20#include "xfs_error.h"
21#include "xfs_log_priv.h"
22#include "xfs_log_recover.h"
23#include "xfs_ag.h"
24
25struct kmem_cache *xfs_rui_cache;
26struct kmem_cache *xfs_rud_cache;
27
28static const struct xfs_item_ops xfs_rui_item_ops;
29
30static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
31{
32 return container_of(lip, struct xfs_rui_log_item, rui_item);
33}
34
35STATIC void
36xfs_rui_item_free(
37 struct xfs_rui_log_item *ruip)
38{
39 kvfree(ruip->rui_item.li_lv_shadow);
40 if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
41 kfree(ruip);
42 else
43 kmem_cache_free(xfs_rui_cache, ruip);
44}
45
46/*
47 * Freeing the RUI requires that we remove it from the AIL if it has already
48 * been placed there. However, the RUI may not yet have been placed in the AIL
49 * when called by xfs_rui_release() from RUD processing due to the ordering of
50 * committed vs unpin operations in bulk insert operations. Hence the reference
51 * count to ensure only the last caller frees the RUI.
52 */
53STATIC void
54xfs_rui_release(
55 struct xfs_rui_log_item *ruip)
56{
57 ASSERT(atomic_read(&ruip->rui_refcount) > 0);
58 if (!atomic_dec_and_test(&ruip->rui_refcount))
59 return;
60
61 xfs_trans_ail_delete(&ruip->rui_item, 0);
62 xfs_rui_item_free(ruip);
63}
64
65STATIC void
66xfs_rui_item_size(
67 struct xfs_log_item *lip,
68 int *nvecs,
69 int *nbytes)
70{
71 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
72
73 *nvecs += 1;
74 *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
75}
76
77/*
78 * This is called to fill in the vector of log iovecs for the
79 * given rui log item. We use only 1 iovec, and we point that
80 * at the rui_log_format structure embedded in the rui item.
81 * It is at this point that we assert that all of the extent
82 * slots in the rui item have been filled.
83 */
84STATIC void
85xfs_rui_item_format(
86 struct xfs_log_item *lip,
87 struct xfs_log_vec *lv)
88{
89 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
90 struct xfs_log_iovec *vecp = NULL;
91
92 ASSERT(atomic_read(&ruip->rui_next_extent) ==
93 ruip->rui_format.rui_nextents);
94
95 ruip->rui_format.rui_type = XFS_LI_RUI;
96 ruip->rui_format.rui_size = 1;
97
98 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
99 xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
100}
101
102/*
103 * The unpin operation is the last place an RUI is manipulated in the log. It is
104 * either inserted in the AIL or aborted in the event of a log I/O error. In
105 * either case, the RUI transaction has been successfully committed to make it
106 * this far. Therefore, we expect whoever committed the RUI to either construct
107 * and commit the RUD or drop the RUD's reference in the event of error. Simply
108 * drop the log's RUI reference now that the log is done with it.
109 */
110STATIC void
111xfs_rui_item_unpin(
112 struct xfs_log_item *lip,
113 int remove)
114{
115 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
116
117 xfs_rui_release(ruip);
118}
119
120/*
121 * The RUI has been either committed or aborted if the transaction has been
122 * cancelled. If the transaction was cancelled, an RUD isn't going to be
123 * constructed and thus we free the RUI here directly.
124 */
125STATIC void
126xfs_rui_item_release(
127 struct xfs_log_item *lip)
128{
129 xfs_rui_release(RUI_ITEM(lip));
130}
131
132/*
133 * Allocate and initialize an rui item with the given number of extents.
134 */
135STATIC struct xfs_rui_log_item *
136xfs_rui_init(
137 struct xfs_mount *mp,
138 uint nextents)
139
140{
141 struct xfs_rui_log_item *ruip;
142
143 ASSERT(nextents > 0);
144 if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
145 ruip = kzalloc(xfs_rui_log_item_sizeof(nextents),
146 GFP_KERNEL | __GFP_NOFAIL);
147 else
148 ruip = kmem_cache_zalloc(xfs_rui_cache,
149 GFP_KERNEL | __GFP_NOFAIL);
150
151 xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
152 ruip->rui_format.rui_nextents = nextents;
153 ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
154 atomic_set(&ruip->rui_next_extent, 0);
155 atomic_set(&ruip->rui_refcount, 2);
156
157 return ruip;
158}
159
160static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
161{
162 return container_of(lip, struct xfs_rud_log_item, rud_item);
163}
164
165STATIC void
166xfs_rud_item_size(
167 struct xfs_log_item *lip,
168 int *nvecs,
169 int *nbytes)
170{
171 *nvecs += 1;
172 *nbytes += sizeof(struct xfs_rud_log_format);
173}
174
175/*
176 * This is called to fill in the vector of log iovecs for the
177 * given rud log item. We use only 1 iovec, and we point that
178 * at the rud_log_format structure embedded in the rud item.
179 * It is at this point that we assert that all of the extent
180 * slots in the rud item have been filled.
181 */
182STATIC void
183xfs_rud_item_format(
184 struct xfs_log_item *lip,
185 struct xfs_log_vec *lv)
186{
187 struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
188 struct xfs_log_iovec *vecp = NULL;
189
190 rudp->rud_format.rud_type = XFS_LI_RUD;
191 rudp->rud_format.rud_size = 1;
192
193 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
194 sizeof(struct xfs_rud_log_format));
195}
196
197/*
198 * The RUD is either committed or aborted if the transaction is cancelled. If
199 * the transaction is cancelled, drop our reference to the RUI and free the
200 * RUD.
201 */
202STATIC void
203xfs_rud_item_release(
204 struct xfs_log_item *lip)
205{
206 struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
207
208 xfs_rui_release(rudp->rud_ruip);
209 kvfree(rudp->rud_item.li_lv_shadow);
210 kmem_cache_free(xfs_rud_cache, rudp);
211}
212
213static struct xfs_log_item *
214xfs_rud_item_intent(
215 struct xfs_log_item *lip)
216{
217 return &RUD_ITEM(lip)->rud_ruip->rui_item;
218}
219
220static const struct xfs_item_ops xfs_rud_item_ops = {
221 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
222 XFS_ITEM_INTENT_DONE,
223 .iop_size = xfs_rud_item_size,
224 .iop_format = xfs_rud_item_format,
225 .iop_release = xfs_rud_item_release,
226 .iop_intent = xfs_rud_item_intent,
227};
228
229/* Set the map extent flags for this reverse mapping. */
230static void
231xfs_trans_set_rmap_flags(
232 struct xfs_map_extent *map,
233 enum xfs_rmap_intent_type type,
234 int whichfork,
235 xfs_exntst_t state)
236{
237 map->me_flags = 0;
238 if (state == XFS_EXT_UNWRITTEN)
239 map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
240 if (whichfork == XFS_ATTR_FORK)
241 map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
242 switch (type) {
243 case XFS_RMAP_MAP:
244 map->me_flags |= XFS_RMAP_EXTENT_MAP;
245 break;
246 case XFS_RMAP_MAP_SHARED:
247 map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
248 break;
249 case XFS_RMAP_UNMAP:
250 map->me_flags |= XFS_RMAP_EXTENT_UNMAP;
251 break;
252 case XFS_RMAP_UNMAP_SHARED:
253 map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
254 break;
255 case XFS_RMAP_CONVERT:
256 map->me_flags |= XFS_RMAP_EXTENT_CONVERT;
257 break;
258 case XFS_RMAP_CONVERT_SHARED:
259 map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
260 break;
261 case XFS_RMAP_ALLOC:
262 map->me_flags |= XFS_RMAP_EXTENT_ALLOC;
263 break;
264 case XFS_RMAP_FREE:
265 map->me_flags |= XFS_RMAP_EXTENT_FREE;
266 break;
267 default:
268 ASSERT(0);
269 }
270}
271
272/* Sort rmap intents by AG. */
273static int
274xfs_rmap_update_diff_items(
275 void *priv,
276 const struct list_head *a,
277 const struct list_head *b)
278{
279 struct xfs_rmap_intent *ra;
280 struct xfs_rmap_intent *rb;
281
282 ra = container_of(a, struct xfs_rmap_intent, ri_list);
283 rb = container_of(b, struct xfs_rmap_intent, ri_list);
284
285 return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
286}
287
288/* Log rmap updates in the intent item. */
289STATIC void
290xfs_rmap_update_log_item(
291 struct xfs_trans *tp,
292 struct xfs_rui_log_item *ruip,
293 struct xfs_rmap_intent *ri)
294{
295 uint next_extent;
296 struct xfs_map_extent *map;
297
298 /*
299 * atomic_inc_return gives us the value after the increment;
300 * we want to use it as an array index so we need to subtract 1 from
301 * it.
302 */
303 next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
304 ASSERT(next_extent < ruip->rui_format.rui_nextents);
305 map = &ruip->rui_format.rui_extents[next_extent];
306 map->me_owner = ri->ri_owner;
307 map->me_startblock = ri->ri_bmap.br_startblock;
308 map->me_startoff = ri->ri_bmap.br_startoff;
309 map->me_len = ri->ri_bmap.br_blockcount;
310 xfs_trans_set_rmap_flags(map, ri->ri_type, ri->ri_whichfork,
311 ri->ri_bmap.br_state);
312}
313
314static struct xfs_log_item *
315xfs_rmap_update_create_intent(
316 struct xfs_trans *tp,
317 struct list_head *items,
318 unsigned int count,
319 bool sort)
320{
321 struct xfs_mount *mp = tp->t_mountp;
322 struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count);
323 struct xfs_rmap_intent *ri;
324
325 ASSERT(count > 0);
326
327 if (sort)
328 list_sort(mp, items, xfs_rmap_update_diff_items);
329 list_for_each_entry(ri, items, ri_list)
330 xfs_rmap_update_log_item(tp, ruip, ri);
331 return &ruip->rui_item;
332}
333
334/* Get an RUD so we can process all the deferred rmap updates. */
335static struct xfs_log_item *
336xfs_rmap_update_create_done(
337 struct xfs_trans *tp,
338 struct xfs_log_item *intent,
339 unsigned int count)
340{
341 struct xfs_rui_log_item *ruip = RUI_ITEM(intent);
342 struct xfs_rud_log_item *rudp;
343
344 rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
345 xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
346 &xfs_rud_item_ops);
347 rudp->rud_ruip = ruip;
348 rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
349
350 return &rudp->rud_item;
351}
352
353/* Take a passive ref to the AG containing the space we're rmapping. */
354void
355xfs_rmap_update_get_group(
356 struct xfs_mount *mp,
357 struct xfs_rmap_intent *ri)
358{
359 xfs_agnumber_t agno;
360
361 agno = XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock);
362 ri->ri_pag = xfs_perag_intent_get(mp, agno);
363}
364
365/* Release a passive AG ref after finishing rmapping work. */
366static inline void
367xfs_rmap_update_put_group(
368 struct xfs_rmap_intent *ri)
369{
370 xfs_perag_intent_put(ri->ri_pag);
371}
372
373/* Process a deferred rmap update. */
374STATIC int
375xfs_rmap_update_finish_item(
376 struct xfs_trans *tp,
377 struct xfs_log_item *done,
378 struct list_head *item,
379 struct xfs_btree_cur **state)
380{
381 struct xfs_rmap_intent *ri;
382 int error;
383
384 ri = container_of(item, struct xfs_rmap_intent, ri_list);
385
386 error = xfs_rmap_finish_one(tp, ri, state);
387
388 xfs_rmap_update_put_group(ri);
389 kmem_cache_free(xfs_rmap_intent_cache, ri);
390 return error;
391}
392
393/* Abort all pending RUIs. */
394STATIC void
395xfs_rmap_update_abort_intent(
396 struct xfs_log_item *intent)
397{
398 xfs_rui_release(RUI_ITEM(intent));
399}
400
401/* Cancel a deferred rmap update. */
402STATIC void
403xfs_rmap_update_cancel_item(
404 struct list_head *item)
405{
406 struct xfs_rmap_intent *ri;
407
408 ri = container_of(item, struct xfs_rmap_intent, ri_list);
409
410 xfs_rmap_update_put_group(ri);
411 kmem_cache_free(xfs_rmap_intent_cache, ri);
412}
413
414/* Is this recovered RUI ok? */
415static inline bool
416xfs_rui_validate_map(
417 struct xfs_mount *mp,
418 struct xfs_map_extent *map)
419{
420 if (!xfs_has_rmapbt(mp))
421 return false;
422
423 if (map->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
424 return false;
425
426 switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
427 case XFS_RMAP_EXTENT_MAP:
428 case XFS_RMAP_EXTENT_MAP_SHARED:
429 case XFS_RMAP_EXTENT_UNMAP:
430 case XFS_RMAP_EXTENT_UNMAP_SHARED:
431 case XFS_RMAP_EXTENT_CONVERT:
432 case XFS_RMAP_EXTENT_CONVERT_SHARED:
433 case XFS_RMAP_EXTENT_ALLOC:
434 case XFS_RMAP_EXTENT_FREE:
435 break;
436 default:
437 return false;
438 }
439
440 if (!XFS_RMAP_NON_INODE_OWNER(map->me_owner) &&
441 !xfs_verify_ino(mp, map->me_owner))
442 return false;
443
444 if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
445 return false;
446
447 return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
448}
449
450static inline void
451xfs_rui_recover_work(
452 struct xfs_mount *mp,
453 struct xfs_defer_pending *dfp,
454 const struct xfs_map_extent *map)
455{
456 struct xfs_rmap_intent *ri;
457
458 ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
459
460 switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
461 case XFS_RMAP_EXTENT_MAP:
462 ri->ri_type = XFS_RMAP_MAP;
463 break;
464 case XFS_RMAP_EXTENT_MAP_SHARED:
465 ri->ri_type = XFS_RMAP_MAP_SHARED;
466 break;
467 case XFS_RMAP_EXTENT_UNMAP:
468 ri->ri_type = XFS_RMAP_UNMAP;
469 break;
470 case XFS_RMAP_EXTENT_UNMAP_SHARED:
471 ri->ri_type = XFS_RMAP_UNMAP_SHARED;
472 break;
473 case XFS_RMAP_EXTENT_CONVERT:
474 ri->ri_type = XFS_RMAP_CONVERT;
475 break;
476 case XFS_RMAP_EXTENT_CONVERT_SHARED:
477 ri->ri_type = XFS_RMAP_CONVERT_SHARED;
478 break;
479 case XFS_RMAP_EXTENT_ALLOC:
480 ri->ri_type = XFS_RMAP_ALLOC;
481 break;
482 case XFS_RMAP_EXTENT_FREE:
483 ri->ri_type = XFS_RMAP_FREE;
484 break;
485 default:
486 ASSERT(0);
487 return;
488 }
489
490 ri->ri_owner = map->me_owner;
491 ri->ri_whichfork = (map->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
492 XFS_ATTR_FORK : XFS_DATA_FORK;
493 ri->ri_bmap.br_startblock = map->me_startblock;
494 ri->ri_bmap.br_startoff = map->me_startoff;
495 ri->ri_bmap.br_blockcount = map->me_len;
496 ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
497 XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
498 xfs_rmap_update_get_group(mp, ri);
499
500 xfs_defer_add_item(dfp, &ri->ri_list);
501}
502
503/*
504 * Process an rmap update intent item that was recovered from the log.
505 * We need to update the rmapbt.
506 */
507STATIC int
508xfs_rmap_recover_work(
509 struct xfs_defer_pending *dfp,
510 struct list_head *capture_list)
511{
512 struct xfs_trans_res resv;
513 struct xfs_log_item *lip = dfp->dfp_intent;
514 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
515 struct xfs_trans *tp;
516 struct xfs_mount *mp = lip->li_log->l_mp;
517 int i;
518 int error = 0;
519
520 /*
521 * First check the validity of the extents described by the
522 * RUI. If any are bad, then assume that all are bad and
523 * just toss the RUI.
524 */
525 for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
526 if (!xfs_rui_validate_map(mp,
527 &ruip->rui_format.rui_extents[i])) {
528 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
529 &ruip->rui_format,
530 sizeof(ruip->rui_format));
531 return -EFSCORRUPTED;
532 }
533
534 xfs_rui_recover_work(mp, dfp, &ruip->rui_format.rui_extents[i]);
535 }
536
537 resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
538 error = xfs_trans_alloc(mp, &resv, mp->m_rmap_maxlevels, 0,
539 XFS_TRANS_RESERVE, &tp);
540 if (error)
541 return error;
542
543 error = xlog_recover_finish_intent(tp, dfp);
544 if (error == -EFSCORRUPTED)
545 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
546 &ruip->rui_format,
547 sizeof(ruip->rui_format));
548 if (error)
549 goto abort_error;
550
551 return xfs_defer_ops_capture_and_commit(tp, capture_list);
552
553abort_error:
554 xfs_trans_cancel(tp);
555 return error;
556}
557
558/* Relog an intent item to push the log tail forward. */
559static struct xfs_log_item *
560xfs_rmap_relog_intent(
561 struct xfs_trans *tp,
562 struct xfs_log_item *intent,
563 struct xfs_log_item *done_item)
564{
565 struct xfs_rui_log_item *ruip;
566 struct xfs_map_extent *map;
567 unsigned int count;
568
569 count = RUI_ITEM(intent)->rui_format.rui_nextents;
570 map = RUI_ITEM(intent)->rui_format.rui_extents;
571
572 ruip = xfs_rui_init(tp->t_mountp, count);
573 memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map));
574 atomic_set(&ruip->rui_next_extent, count);
575
576 return &ruip->rui_item;
577}
578
579const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
580 .name = "rmap",
581 .max_items = XFS_RUI_MAX_FAST_EXTENTS,
582 .create_intent = xfs_rmap_update_create_intent,
583 .abort_intent = xfs_rmap_update_abort_intent,
584 .create_done = xfs_rmap_update_create_done,
585 .finish_item = xfs_rmap_update_finish_item,
586 .finish_cleanup = xfs_rmap_finish_one_cleanup,
587 .cancel_item = xfs_rmap_update_cancel_item,
588 .recover_work = xfs_rmap_recover_work,
589 .relog_intent = xfs_rmap_relog_intent,
590};
591
592STATIC bool
593xfs_rui_item_match(
594 struct xfs_log_item *lip,
595 uint64_t intent_id)
596{
597 return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
598}
599
600static const struct xfs_item_ops xfs_rui_item_ops = {
601 .flags = XFS_ITEM_INTENT,
602 .iop_size = xfs_rui_item_size,
603 .iop_format = xfs_rui_item_format,
604 .iop_unpin = xfs_rui_item_unpin,
605 .iop_release = xfs_rui_item_release,
606 .iop_match = xfs_rui_item_match,
607};
608
609static inline void
610xfs_rui_copy_format(
611 struct xfs_rui_log_format *dst,
612 const struct xfs_rui_log_format *src)
613{
614 unsigned int i;
615
616 memcpy(dst, src, offsetof(struct xfs_rui_log_format, rui_extents));
617
618 for (i = 0; i < src->rui_nextents; i++)
619 memcpy(&dst->rui_extents[i], &src->rui_extents[i],
620 sizeof(struct xfs_map_extent));
621}
622
623/*
624 * This routine is called to create an in-core extent rmap update
625 * item from the rui format structure which was logged on disk.
626 * It allocates an in-core rui, copies the extents from the format
627 * structure into it, and adds the rui to the AIL with the given
628 * LSN.
629 */
630STATIC int
631xlog_recover_rui_commit_pass2(
632 struct xlog *log,
633 struct list_head *buffer_list,
634 struct xlog_recover_item *item,
635 xfs_lsn_t lsn)
636{
637 struct xfs_mount *mp = log->l_mp;
638 struct xfs_rui_log_item *ruip;
639 struct xfs_rui_log_format *rui_formatp;
640 size_t len;
641
642 rui_formatp = item->ri_buf[0].i_addr;
643
644 if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) {
645 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
646 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
647 return -EFSCORRUPTED;
648 }
649
650 len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents);
651 if (item->ri_buf[0].i_len != len) {
652 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
653 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
654 return -EFSCORRUPTED;
655 }
656
657 ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
658 xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
659 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
660
661 xlog_recover_intent_item(log, &ruip->rui_item, lsn,
662 &xfs_rmap_update_defer_type);
663 return 0;
664}
665
666const struct xlog_recover_item_ops xlog_rui_item_ops = {
667 .item_type = XFS_LI_RUI,
668 .commit_pass2 = xlog_recover_rui_commit_pass2,
669};
670
671/*
672 * This routine is called when an RUD format structure is found in a committed
673 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
674 * was still in the log. To do this it searches the AIL for the RUI with an id
675 * equal to that in the RUD format structure. If we find it we drop the RUD
676 * reference, which removes the RUI from the AIL and frees it.
677 */
678STATIC int
679xlog_recover_rud_commit_pass2(
680 struct xlog *log,
681 struct list_head *buffer_list,
682 struct xlog_recover_item *item,
683 xfs_lsn_t lsn)
684{
685 struct xfs_rud_log_format *rud_formatp;
686
687 rud_formatp = item->ri_buf[0].i_addr;
688 if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) {
689 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
690 rud_formatp, item->ri_buf[0].i_len);
691 return -EFSCORRUPTED;
692 }
693
694 xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
695 return 0;
696}
697
698const struct xlog_recover_item_ops xlog_rud_item_ops = {
699 .item_type = XFS_LI_RUD,
700 .commit_pass2 = xlog_recover_rud_commit_pass2,
701};
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_bit.h"
12#include "xfs_shared.h"
13#include "xfs_mount.h"
14#include "xfs_defer.h"
15#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
17#include "xfs_rmap_item.h"
18#include "xfs_log.h"
19#include "xfs_rmap.h"
20#include "xfs_error.h"
21#include "xfs_log_priv.h"
22#include "xfs_log_recover.h"
23
24kmem_zone_t *xfs_rui_zone;
25kmem_zone_t *xfs_rud_zone;
26
27static const struct xfs_item_ops xfs_rui_item_ops;
28
29static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
30{
31 return container_of(lip, struct xfs_rui_log_item, rui_item);
32}
33
34STATIC void
35xfs_rui_item_free(
36 struct xfs_rui_log_item *ruip)
37{
38 if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
39 kmem_free(ruip);
40 else
41 kmem_cache_free(xfs_rui_zone, ruip);
42}
43
44/*
45 * Freeing the RUI requires that we remove it from the AIL if it has already
46 * been placed there. However, the RUI may not yet have been placed in the AIL
47 * when called by xfs_rui_release() from RUD processing due to the ordering of
48 * committed vs unpin operations in bulk insert operations. Hence the reference
49 * count to ensure only the last caller frees the RUI.
50 */
51STATIC void
52xfs_rui_release(
53 struct xfs_rui_log_item *ruip)
54{
55 ASSERT(atomic_read(&ruip->rui_refcount) > 0);
56 if (atomic_dec_and_test(&ruip->rui_refcount)) {
57 xfs_trans_ail_delete(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
58 xfs_rui_item_free(ruip);
59 }
60}
61
62STATIC void
63xfs_rui_item_size(
64 struct xfs_log_item *lip,
65 int *nvecs,
66 int *nbytes)
67{
68 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
69
70 *nvecs += 1;
71 *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
72}
73
74/*
75 * This is called to fill in the vector of log iovecs for the
76 * given rui log item. We use only 1 iovec, and we point that
77 * at the rui_log_format structure embedded in the rui item.
78 * It is at this point that we assert that all of the extent
79 * slots in the rui item have been filled.
80 */
81STATIC void
82xfs_rui_item_format(
83 struct xfs_log_item *lip,
84 struct xfs_log_vec *lv)
85{
86 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
87 struct xfs_log_iovec *vecp = NULL;
88
89 ASSERT(atomic_read(&ruip->rui_next_extent) ==
90 ruip->rui_format.rui_nextents);
91
92 ruip->rui_format.rui_type = XFS_LI_RUI;
93 ruip->rui_format.rui_size = 1;
94
95 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
96 xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
97}
98
99/*
100 * The unpin operation is the last place an RUI is manipulated in the log. It is
101 * either inserted in the AIL or aborted in the event of a log I/O error. In
102 * either case, the RUI transaction has been successfully committed to make it
103 * this far. Therefore, we expect whoever committed the RUI to either construct
104 * and commit the RUD or drop the RUD's reference in the event of error. Simply
105 * drop the log's RUI reference now that the log is done with it.
106 */
107STATIC void
108xfs_rui_item_unpin(
109 struct xfs_log_item *lip,
110 int remove)
111{
112 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
113
114 xfs_rui_release(ruip);
115}
116
117/*
118 * The RUI has been either committed or aborted if the transaction has been
119 * cancelled. If the transaction was cancelled, an RUD isn't going to be
120 * constructed and thus we free the RUI here directly.
121 */
122STATIC void
123xfs_rui_item_release(
124 struct xfs_log_item *lip)
125{
126 xfs_rui_release(RUI_ITEM(lip));
127}
128
129/*
130 * Allocate and initialize an rui item with the given number of extents.
131 */
132STATIC struct xfs_rui_log_item *
133xfs_rui_init(
134 struct xfs_mount *mp,
135 uint nextents)
136
137{
138 struct xfs_rui_log_item *ruip;
139
140 ASSERT(nextents > 0);
141 if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
142 ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
143 else
144 ruip = kmem_cache_zalloc(xfs_rui_zone,
145 GFP_KERNEL | __GFP_NOFAIL);
146
147 xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
148 ruip->rui_format.rui_nextents = nextents;
149 ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
150 atomic_set(&ruip->rui_next_extent, 0);
151 atomic_set(&ruip->rui_refcount, 2);
152
153 return ruip;
154}
155
156/*
157 * Copy an RUI format buffer from the given buf, and into the destination
158 * RUI format structure. The RUI/RUD items were designed not to need any
159 * special alignment handling.
160 */
161STATIC int
162xfs_rui_copy_format(
163 struct xfs_log_iovec *buf,
164 struct xfs_rui_log_format *dst_rui_fmt)
165{
166 struct xfs_rui_log_format *src_rui_fmt;
167 uint len;
168
169 src_rui_fmt = buf->i_addr;
170 len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
171
172 if (buf->i_len != len) {
173 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
174 return -EFSCORRUPTED;
175 }
176
177 memcpy(dst_rui_fmt, src_rui_fmt, len);
178 return 0;
179}
180
181static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
182{
183 return container_of(lip, struct xfs_rud_log_item, rud_item);
184}
185
186STATIC void
187xfs_rud_item_size(
188 struct xfs_log_item *lip,
189 int *nvecs,
190 int *nbytes)
191{
192 *nvecs += 1;
193 *nbytes += sizeof(struct xfs_rud_log_format);
194}
195
196/*
197 * This is called to fill in the vector of log iovecs for the
198 * given rud log item. We use only 1 iovec, and we point that
199 * at the rud_log_format structure embedded in the rud item.
200 * It is at this point that we assert that all of the extent
201 * slots in the rud item have been filled.
202 */
203STATIC void
204xfs_rud_item_format(
205 struct xfs_log_item *lip,
206 struct xfs_log_vec *lv)
207{
208 struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
209 struct xfs_log_iovec *vecp = NULL;
210
211 rudp->rud_format.rud_type = XFS_LI_RUD;
212 rudp->rud_format.rud_size = 1;
213
214 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
215 sizeof(struct xfs_rud_log_format));
216}
217
218/*
219 * The RUD is either committed or aborted if the transaction is cancelled. If
220 * the transaction is cancelled, drop our reference to the RUI and free the
221 * RUD.
222 */
223STATIC void
224xfs_rud_item_release(
225 struct xfs_log_item *lip)
226{
227 struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
228
229 xfs_rui_release(rudp->rud_ruip);
230 kmem_cache_free(xfs_rud_zone, rudp);
231}
232
233static const struct xfs_item_ops xfs_rud_item_ops = {
234 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED,
235 .iop_size = xfs_rud_item_size,
236 .iop_format = xfs_rud_item_format,
237 .iop_release = xfs_rud_item_release,
238};
239
240static struct xfs_rud_log_item *
241xfs_trans_get_rud(
242 struct xfs_trans *tp,
243 struct xfs_rui_log_item *ruip)
244{
245 struct xfs_rud_log_item *rudp;
246
247 rudp = kmem_cache_zalloc(xfs_rud_zone, GFP_KERNEL | __GFP_NOFAIL);
248 xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
249 &xfs_rud_item_ops);
250 rudp->rud_ruip = ruip;
251 rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
252
253 xfs_trans_add_item(tp, &rudp->rud_item);
254 return rudp;
255}
256
257/* Set the map extent flags for this reverse mapping. */
258static void
259xfs_trans_set_rmap_flags(
260 struct xfs_map_extent *rmap,
261 enum xfs_rmap_intent_type type,
262 int whichfork,
263 xfs_exntst_t state)
264{
265 rmap->me_flags = 0;
266 if (state == XFS_EXT_UNWRITTEN)
267 rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
268 if (whichfork == XFS_ATTR_FORK)
269 rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
270 switch (type) {
271 case XFS_RMAP_MAP:
272 rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
273 break;
274 case XFS_RMAP_MAP_SHARED:
275 rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
276 break;
277 case XFS_RMAP_UNMAP:
278 rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
279 break;
280 case XFS_RMAP_UNMAP_SHARED:
281 rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
282 break;
283 case XFS_RMAP_CONVERT:
284 rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
285 break;
286 case XFS_RMAP_CONVERT_SHARED:
287 rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
288 break;
289 case XFS_RMAP_ALLOC:
290 rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
291 break;
292 case XFS_RMAP_FREE:
293 rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
294 break;
295 default:
296 ASSERT(0);
297 }
298}
299
300/*
301 * Finish an rmap update and log it to the RUD. Note that the transaction is
302 * marked dirty regardless of whether the rmap update succeeds or fails to
303 * support the RUI/RUD lifecycle rules.
304 */
305static int
306xfs_trans_log_finish_rmap_update(
307 struct xfs_trans *tp,
308 struct xfs_rud_log_item *rudp,
309 enum xfs_rmap_intent_type type,
310 uint64_t owner,
311 int whichfork,
312 xfs_fileoff_t startoff,
313 xfs_fsblock_t startblock,
314 xfs_filblks_t blockcount,
315 xfs_exntst_t state,
316 struct xfs_btree_cur **pcur)
317{
318 int error;
319
320 error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
321 startblock, blockcount, state, pcur);
322
323 /*
324 * Mark the transaction dirty, even on error. This ensures the
325 * transaction is aborted, which:
326 *
327 * 1.) releases the RUI and frees the RUD
328 * 2.) shuts down the filesystem
329 */
330 tp->t_flags |= XFS_TRANS_DIRTY;
331 set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
332
333 return error;
334}
335
336/* Sort rmap intents by AG. */
337static int
338xfs_rmap_update_diff_items(
339 void *priv,
340 const struct list_head *a,
341 const struct list_head *b)
342{
343 struct xfs_mount *mp = priv;
344 struct xfs_rmap_intent *ra;
345 struct xfs_rmap_intent *rb;
346
347 ra = container_of(a, struct xfs_rmap_intent, ri_list);
348 rb = container_of(b, struct xfs_rmap_intent, ri_list);
349 return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
350 XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
351}
352
353/* Log rmap updates in the intent item. */
354STATIC void
355xfs_rmap_update_log_item(
356 struct xfs_trans *tp,
357 struct xfs_rui_log_item *ruip,
358 struct xfs_rmap_intent *rmap)
359{
360 uint next_extent;
361 struct xfs_map_extent *map;
362
363 tp->t_flags |= XFS_TRANS_DIRTY;
364 set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
365
366 /*
367 * atomic_inc_return gives us the value after the increment;
368 * we want to use it as an array index so we need to subtract 1 from
369 * it.
370 */
371 next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
372 ASSERT(next_extent < ruip->rui_format.rui_nextents);
373 map = &ruip->rui_format.rui_extents[next_extent];
374 map->me_owner = rmap->ri_owner;
375 map->me_startblock = rmap->ri_bmap.br_startblock;
376 map->me_startoff = rmap->ri_bmap.br_startoff;
377 map->me_len = rmap->ri_bmap.br_blockcount;
378 xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
379 rmap->ri_bmap.br_state);
380}
381
382static struct xfs_log_item *
383xfs_rmap_update_create_intent(
384 struct xfs_trans *tp,
385 struct list_head *items,
386 unsigned int count,
387 bool sort)
388{
389 struct xfs_mount *mp = tp->t_mountp;
390 struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count);
391 struct xfs_rmap_intent *rmap;
392
393 ASSERT(count > 0);
394
395 xfs_trans_add_item(tp, &ruip->rui_item);
396 if (sort)
397 list_sort(mp, items, xfs_rmap_update_diff_items);
398 list_for_each_entry(rmap, items, ri_list)
399 xfs_rmap_update_log_item(tp, ruip, rmap);
400 return &ruip->rui_item;
401}
402
403/* Get an RUD so we can process all the deferred rmap updates. */
404static struct xfs_log_item *
405xfs_rmap_update_create_done(
406 struct xfs_trans *tp,
407 struct xfs_log_item *intent,
408 unsigned int count)
409{
410 return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
411}
412
413/* Process a deferred rmap update. */
414STATIC int
415xfs_rmap_update_finish_item(
416 struct xfs_trans *tp,
417 struct xfs_log_item *done,
418 struct list_head *item,
419 struct xfs_btree_cur **state)
420{
421 struct xfs_rmap_intent *rmap;
422 int error;
423
424 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
425 error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done),
426 rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork,
427 rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
428 rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
429 state);
430 kmem_free(rmap);
431 return error;
432}
433
434/* Abort all pending RUIs. */
435STATIC void
436xfs_rmap_update_abort_intent(
437 struct xfs_log_item *intent)
438{
439 xfs_rui_release(RUI_ITEM(intent));
440}
441
442/* Cancel a deferred rmap update. */
443STATIC void
444xfs_rmap_update_cancel_item(
445 struct list_head *item)
446{
447 struct xfs_rmap_intent *rmap;
448
449 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
450 kmem_free(rmap);
451}
452
453const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
454 .max_items = XFS_RUI_MAX_FAST_EXTENTS,
455 .create_intent = xfs_rmap_update_create_intent,
456 .abort_intent = xfs_rmap_update_abort_intent,
457 .create_done = xfs_rmap_update_create_done,
458 .finish_item = xfs_rmap_update_finish_item,
459 .finish_cleanup = xfs_rmap_finish_one_cleanup,
460 .cancel_item = xfs_rmap_update_cancel_item,
461};
462
463/* Is this recovered RUI ok? */
464static inline bool
465xfs_rui_validate_map(
466 struct xfs_mount *mp,
467 struct xfs_map_extent *rmap)
468{
469 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
470 return false;
471
472 if (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
473 return false;
474
475 switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
476 case XFS_RMAP_EXTENT_MAP:
477 case XFS_RMAP_EXTENT_MAP_SHARED:
478 case XFS_RMAP_EXTENT_UNMAP:
479 case XFS_RMAP_EXTENT_UNMAP_SHARED:
480 case XFS_RMAP_EXTENT_CONVERT:
481 case XFS_RMAP_EXTENT_CONVERT_SHARED:
482 case XFS_RMAP_EXTENT_ALLOC:
483 case XFS_RMAP_EXTENT_FREE:
484 break;
485 default:
486 return false;
487 }
488
489 if (!XFS_RMAP_NON_INODE_OWNER(rmap->me_owner) &&
490 !xfs_verify_ino(mp, rmap->me_owner))
491 return false;
492
493 if (!xfs_verify_fileext(mp, rmap->me_startoff, rmap->me_len))
494 return false;
495
496 return xfs_verify_fsbext(mp, rmap->me_startblock, rmap->me_len);
497}
498
499/*
500 * Process an rmap update intent item that was recovered from the log.
501 * We need to update the rmapbt.
502 */
503STATIC int
504xfs_rui_item_recover(
505 struct xfs_log_item *lip,
506 struct list_head *capture_list)
507{
508 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
509 struct xfs_map_extent *rmap;
510 struct xfs_rud_log_item *rudp;
511 struct xfs_trans *tp;
512 struct xfs_btree_cur *rcur = NULL;
513 struct xfs_mount *mp = lip->li_mountp;
514 enum xfs_rmap_intent_type type;
515 xfs_exntst_t state;
516 int i;
517 int whichfork;
518 int error = 0;
519
520 /*
521 * First check the validity of the extents described by the
522 * RUI. If any are bad, then assume that all are bad and
523 * just toss the RUI.
524 */
525 for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
526 if (!xfs_rui_validate_map(mp,
527 &ruip->rui_format.rui_extents[i])) {
528 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
529 &ruip->rui_format,
530 sizeof(ruip->rui_format));
531 return -EFSCORRUPTED;
532 }
533 }
534
535 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
536 mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
537 if (error)
538 return error;
539 rudp = xfs_trans_get_rud(tp, ruip);
540
541 for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
542 rmap = &ruip->rui_format.rui_extents[i];
543 state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
544 XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
545 whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
546 XFS_ATTR_FORK : XFS_DATA_FORK;
547 switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
548 case XFS_RMAP_EXTENT_MAP:
549 type = XFS_RMAP_MAP;
550 break;
551 case XFS_RMAP_EXTENT_MAP_SHARED:
552 type = XFS_RMAP_MAP_SHARED;
553 break;
554 case XFS_RMAP_EXTENT_UNMAP:
555 type = XFS_RMAP_UNMAP;
556 break;
557 case XFS_RMAP_EXTENT_UNMAP_SHARED:
558 type = XFS_RMAP_UNMAP_SHARED;
559 break;
560 case XFS_RMAP_EXTENT_CONVERT:
561 type = XFS_RMAP_CONVERT;
562 break;
563 case XFS_RMAP_EXTENT_CONVERT_SHARED:
564 type = XFS_RMAP_CONVERT_SHARED;
565 break;
566 case XFS_RMAP_EXTENT_ALLOC:
567 type = XFS_RMAP_ALLOC;
568 break;
569 case XFS_RMAP_EXTENT_FREE:
570 type = XFS_RMAP_FREE;
571 break;
572 default:
573 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
574 error = -EFSCORRUPTED;
575 goto abort_error;
576 }
577 error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
578 rmap->me_owner, whichfork,
579 rmap->me_startoff, rmap->me_startblock,
580 rmap->me_len, state, &rcur);
581 if (error)
582 goto abort_error;
583
584 }
585
586 xfs_rmap_finish_one_cleanup(tp, rcur, error);
587 return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
588
589abort_error:
590 xfs_rmap_finish_one_cleanup(tp, rcur, error);
591 xfs_trans_cancel(tp);
592 return error;
593}
594
595STATIC bool
596xfs_rui_item_match(
597 struct xfs_log_item *lip,
598 uint64_t intent_id)
599{
600 return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
601}
602
603/* Relog an intent item to push the log tail forward. */
604static struct xfs_log_item *
605xfs_rui_item_relog(
606 struct xfs_log_item *intent,
607 struct xfs_trans *tp)
608{
609 struct xfs_rud_log_item *rudp;
610 struct xfs_rui_log_item *ruip;
611 struct xfs_map_extent *extp;
612 unsigned int count;
613
614 count = RUI_ITEM(intent)->rui_format.rui_nextents;
615 extp = RUI_ITEM(intent)->rui_format.rui_extents;
616
617 tp->t_flags |= XFS_TRANS_DIRTY;
618 rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
619 set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
620
621 ruip = xfs_rui_init(tp->t_mountp, count);
622 memcpy(ruip->rui_format.rui_extents, extp, count * sizeof(*extp));
623 atomic_set(&ruip->rui_next_extent, count);
624 xfs_trans_add_item(tp, &ruip->rui_item);
625 set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
626 return &ruip->rui_item;
627}
628
629static const struct xfs_item_ops xfs_rui_item_ops = {
630 .iop_size = xfs_rui_item_size,
631 .iop_format = xfs_rui_item_format,
632 .iop_unpin = xfs_rui_item_unpin,
633 .iop_release = xfs_rui_item_release,
634 .iop_recover = xfs_rui_item_recover,
635 .iop_match = xfs_rui_item_match,
636 .iop_relog = xfs_rui_item_relog,
637};
638
639/*
640 * This routine is called to create an in-core extent rmap update
641 * item from the rui format structure which was logged on disk.
642 * It allocates an in-core rui, copies the extents from the format
643 * structure into it, and adds the rui to the AIL with the given
644 * LSN.
645 */
646STATIC int
647xlog_recover_rui_commit_pass2(
648 struct xlog *log,
649 struct list_head *buffer_list,
650 struct xlog_recover_item *item,
651 xfs_lsn_t lsn)
652{
653 int error;
654 struct xfs_mount *mp = log->l_mp;
655 struct xfs_rui_log_item *ruip;
656 struct xfs_rui_log_format *rui_formatp;
657
658 rui_formatp = item->ri_buf[0].i_addr;
659
660 ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
661 error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
662 if (error) {
663 xfs_rui_item_free(ruip);
664 return error;
665 }
666 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
667 /*
668 * Insert the intent into the AIL directly and drop one reference so
669 * that finishing or canceling the work will drop the other.
670 */
671 xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
672 xfs_rui_release(ruip);
673 return 0;
674}
675
676const struct xlog_recover_item_ops xlog_rui_item_ops = {
677 .item_type = XFS_LI_RUI,
678 .commit_pass2 = xlog_recover_rui_commit_pass2,
679};
680
681/*
682 * This routine is called when an RUD format structure is found in a committed
683 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
684 * was still in the log. To do this it searches the AIL for the RUI with an id
685 * equal to that in the RUD format structure. If we find it we drop the RUD
686 * reference, which removes the RUI from the AIL and frees it.
687 */
688STATIC int
689xlog_recover_rud_commit_pass2(
690 struct xlog *log,
691 struct list_head *buffer_list,
692 struct xlog_recover_item *item,
693 xfs_lsn_t lsn)
694{
695 struct xfs_rud_log_format *rud_formatp;
696
697 rud_formatp = item->ri_buf[0].i_addr;
698 ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
699
700 xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
701 return 0;
702}
703
704const struct xlog_recover_item_ops xlog_rud_item_ops = {
705 .item_type = XFS_LI_RUD,
706 .commit_pass2 = xlog_recover_rud_commit_pass2,
707};