Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
Note: File does not exist in v3.15.
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Copyright (C) 2016 Oracle.  All Rights Reserved.
  4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_format.h"
  9#include "xfs_log_format.h"
 10#include "xfs_trans_resv.h"
 11#include "xfs_bit.h"
 12#include "xfs_shared.h"
 13#include "xfs_mount.h"
 14#include "xfs_defer.h"
 15#include "xfs_trans.h"
 16#include "xfs_trans_priv.h"
 17#include "xfs_rmap_item.h"
 18#include "xfs_log.h"
 19#include "xfs_rmap.h"
 20
 21
 22kmem_zone_t	*xfs_rui_zone;
 23kmem_zone_t	*xfs_rud_zone;
 24
 25static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
 26{
 27	return container_of(lip, struct xfs_rui_log_item, rui_item);
 28}
 29
 30void
 31xfs_rui_item_free(
 32	struct xfs_rui_log_item	*ruip)
 33{
 34	if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
 35		kmem_free(ruip);
 36	else
 37		kmem_zone_free(xfs_rui_zone, ruip);
 38}
 39
 40/*
 41 * Freeing the RUI requires that we remove it from the AIL if it has already
 42 * been placed there. However, the RUI may not yet have been placed in the AIL
 43 * when called by xfs_rui_release() from RUD processing due to the ordering of
 44 * committed vs unpin operations in bulk insert operations. Hence the reference
 45 * count to ensure only the last caller frees the RUI.
 46 */
 47void
 48xfs_rui_release(
 49	struct xfs_rui_log_item	*ruip)
 50{
 51	ASSERT(atomic_read(&ruip->rui_refcount) > 0);
 52	if (atomic_dec_and_test(&ruip->rui_refcount)) {
 53		xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
 54		xfs_rui_item_free(ruip);
 55	}
 56}
 57
 58STATIC void
 59xfs_rui_item_size(
 60	struct xfs_log_item	*lip,
 61	int			*nvecs,
 62	int			*nbytes)
 63{
 64	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
 65
 66	*nvecs += 1;
 67	*nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
 68}
 69
 70/*
 71 * This is called to fill in the vector of log iovecs for the
 72 * given rui log item. We use only 1 iovec, and we point that
 73 * at the rui_log_format structure embedded in the rui item.
 74 * It is at this point that we assert that all of the extent
 75 * slots in the rui item have been filled.
 76 */
 77STATIC void
 78xfs_rui_item_format(
 79	struct xfs_log_item	*lip,
 80	struct xfs_log_vec	*lv)
 81{
 82	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
 83	struct xfs_log_iovec	*vecp = NULL;
 84
 85	ASSERT(atomic_read(&ruip->rui_next_extent) ==
 86			ruip->rui_format.rui_nextents);
 87
 88	ruip->rui_format.rui_type = XFS_LI_RUI;
 89	ruip->rui_format.rui_size = 1;
 90
 91	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
 92			xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
 93}
 94
 95/*
 96 * The unpin operation is the last place an RUI is manipulated in the log. It is
 97 * either inserted in the AIL or aborted in the event of a log I/O error. In
 98 * either case, the RUI transaction has been successfully committed to make it
 99 * this far. Therefore, we expect whoever committed the RUI to either construct
100 * and commit the RUD or drop the RUD's reference in the event of error. Simply
101 * drop the log's RUI reference now that the log is done with it.
102 */
103STATIC void
104xfs_rui_item_unpin(
105	struct xfs_log_item	*lip,
106	int			remove)
107{
108	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
109
110	xfs_rui_release(ruip);
111}
112
113/*
114 * The RUI has been either committed or aborted if the transaction has been
115 * cancelled. If the transaction was cancelled, an RUD isn't going to be
116 * constructed and thus we free the RUI here directly.
117 */
118STATIC void
119xfs_rui_item_release(
120	struct xfs_log_item	*lip)
121{
122	xfs_rui_release(RUI_ITEM(lip));
123}
124
125static const struct xfs_item_ops xfs_rui_item_ops = {
126	.iop_size	= xfs_rui_item_size,
127	.iop_format	= xfs_rui_item_format,
128	.iop_unpin	= xfs_rui_item_unpin,
129	.iop_release	= xfs_rui_item_release,
130};
131
132/*
133 * Allocate and initialize an rui item with the given number of extents.
134 */
135struct xfs_rui_log_item *
136xfs_rui_init(
137	struct xfs_mount		*mp,
138	uint				nextents)
139
140{
141	struct xfs_rui_log_item		*ruip;
142
143	ASSERT(nextents > 0);
144	if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
145		ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
146	else
147		ruip = kmem_zone_zalloc(xfs_rui_zone, 0);
148
149	xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
150	ruip->rui_format.rui_nextents = nextents;
151	ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
152	atomic_set(&ruip->rui_next_extent, 0);
153	atomic_set(&ruip->rui_refcount, 2);
154
155	return ruip;
156}
157
158/*
159 * Copy an RUI format buffer from the given buf, and into the destination
160 * RUI format structure.  The RUI/RUD items were designed not to need any
161 * special alignment handling.
162 */
163int
164xfs_rui_copy_format(
165	struct xfs_log_iovec		*buf,
166	struct xfs_rui_log_format	*dst_rui_fmt)
167{
168	struct xfs_rui_log_format	*src_rui_fmt;
169	uint				len;
170
171	src_rui_fmt = buf->i_addr;
172	len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
173
174	if (buf->i_len != len)
175		return -EFSCORRUPTED;
176
177	memcpy(dst_rui_fmt, src_rui_fmt, len);
178	return 0;
179}
180
181static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
182{
183	return container_of(lip, struct xfs_rud_log_item, rud_item);
184}
185
186STATIC void
187xfs_rud_item_size(
188	struct xfs_log_item	*lip,
189	int			*nvecs,
190	int			*nbytes)
191{
192	*nvecs += 1;
193	*nbytes += sizeof(struct xfs_rud_log_format);
194}
195
196/*
197 * This is called to fill in the vector of log iovecs for the
198 * given rud log item. We use only 1 iovec, and we point that
199 * at the rud_log_format structure embedded in the rud item.
200 * It is at this point that we assert that all of the extent
201 * slots in the rud item have been filled.
202 */
203STATIC void
204xfs_rud_item_format(
205	struct xfs_log_item	*lip,
206	struct xfs_log_vec	*lv)
207{
208	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
209	struct xfs_log_iovec	*vecp = NULL;
210
211	rudp->rud_format.rud_type = XFS_LI_RUD;
212	rudp->rud_format.rud_size = 1;
213
214	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
215			sizeof(struct xfs_rud_log_format));
216}
217
218/*
219 * The RUD is either committed or aborted if the transaction is cancelled. If
220 * the transaction is cancelled, drop our reference to the RUI and free the
221 * RUD.
222 */
223STATIC void
224xfs_rud_item_release(
225	struct xfs_log_item	*lip)
226{
227	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
228
229	xfs_rui_release(rudp->rud_ruip);
230	kmem_zone_free(xfs_rud_zone, rudp);
231}
232
233static const struct xfs_item_ops xfs_rud_item_ops = {
234	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED,
235	.iop_size	= xfs_rud_item_size,
236	.iop_format	= xfs_rud_item_format,
237	.iop_release	= xfs_rud_item_release,
238};
239
240static struct xfs_rud_log_item *
241xfs_trans_get_rud(
242	struct xfs_trans		*tp,
243	struct xfs_rui_log_item		*ruip)
244{
245	struct xfs_rud_log_item		*rudp;
246
247	rudp = kmem_zone_zalloc(xfs_rud_zone, 0);
248	xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
249			  &xfs_rud_item_ops);
250	rudp->rud_ruip = ruip;
251	rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
252
253	xfs_trans_add_item(tp, &rudp->rud_item);
254	return rudp;
255}
256
257/* Set the map extent flags for this reverse mapping. */
258static void
259xfs_trans_set_rmap_flags(
260	struct xfs_map_extent		*rmap,
261	enum xfs_rmap_intent_type	type,
262	int				whichfork,
263	xfs_exntst_t			state)
264{
265	rmap->me_flags = 0;
266	if (state == XFS_EXT_UNWRITTEN)
267		rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
268	if (whichfork == XFS_ATTR_FORK)
269		rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
270	switch (type) {
271	case XFS_RMAP_MAP:
272		rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
273		break;
274	case XFS_RMAP_MAP_SHARED:
275		rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
276		break;
277	case XFS_RMAP_UNMAP:
278		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
279		break;
280	case XFS_RMAP_UNMAP_SHARED:
281		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
282		break;
283	case XFS_RMAP_CONVERT:
284		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
285		break;
286	case XFS_RMAP_CONVERT_SHARED:
287		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
288		break;
289	case XFS_RMAP_ALLOC:
290		rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
291		break;
292	case XFS_RMAP_FREE:
293		rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
294		break;
295	default:
296		ASSERT(0);
297	}
298}
299
300/*
301 * Finish an rmap update and log it to the RUD. Note that the transaction is
302 * marked dirty regardless of whether the rmap update succeeds or fails to
303 * support the RUI/RUD lifecycle rules.
304 */
305static int
306xfs_trans_log_finish_rmap_update(
307	struct xfs_trans		*tp,
308	struct xfs_rud_log_item		*rudp,
309	enum xfs_rmap_intent_type	type,
310	uint64_t			owner,
311	int				whichfork,
312	xfs_fileoff_t			startoff,
313	xfs_fsblock_t			startblock,
314	xfs_filblks_t			blockcount,
315	xfs_exntst_t			state,
316	struct xfs_btree_cur		**pcur)
317{
318	int				error;
319
320	error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
321			startblock, blockcount, state, pcur);
322
323	/*
324	 * Mark the transaction dirty, even on error. This ensures the
325	 * transaction is aborted, which:
326	 *
327	 * 1.) releases the RUI and frees the RUD
328	 * 2.) shuts down the filesystem
329	 */
330	tp->t_flags |= XFS_TRANS_DIRTY;
331	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
332
333	return error;
334}
335
336/* Sort rmap intents by AG. */
337static int
338xfs_rmap_update_diff_items(
339	void				*priv,
340	struct list_head		*a,
341	struct list_head		*b)
342{
343	struct xfs_mount		*mp = priv;
344	struct xfs_rmap_intent		*ra;
345	struct xfs_rmap_intent		*rb;
346
347	ra = container_of(a, struct xfs_rmap_intent, ri_list);
348	rb = container_of(b, struct xfs_rmap_intent, ri_list);
349	return  XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
350		XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
351}
352
353/* Get an RUI. */
354STATIC void *
355xfs_rmap_update_create_intent(
356	struct xfs_trans		*tp,
357	unsigned int			count)
358{
359	struct xfs_rui_log_item		*ruip;
360
361	ASSERT(tp != NULL);
362	ASSERT(count > 0);
363
364	ruip = xfs_rui_init(tp->t_mountp, count);
365	ASSERT(ruip != NULL);
366
367	/*
368	 * Get a log_item_desc to point at the new item.
369	 */
370	xfs_trans_add_item(tp, &ruip->rui_item);
371	return ruip;
372}
373
374/* Log rmap updates in the intent item. */
375STATIC void
376xfs_rmap_update_log_item(
377	struct xfs_trans		*tp,
378	void				*intent,
379	struct list_head		*item)
380{
381	struct xfs_rui_log_item		*ruip = intent;
382	struct xfs_rmap_intent		*rmap;
383	uint				next_extent;
384	struct xfs_map_extent		*map;
385
386	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
387
388	tp->t_flags |= XFS_TRANS_DIRTY;
389	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
390
391	/*
392	 * atomic_inc_return gives us the value after the increment;
393	 * we want to use it as an array index so we need to subtract 1 from
394	 * it.
395	 */
396	next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
397	ASSERT(next_extent < ruip->rui_format.rui_nextents);
398	map = &ruip->rui_format.rui_extents[next_extent];
399	map->me_owner = rmap->ri_owner;
400	map->me_startblock = rmap->ri_bmap.br_startblock;
401	map->me_startoff = rmap->ri_bmap.br_startoff;
402	map->me_len = rmap->ri_bmap.br_blockcount;
403	xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
404			rmap->ri_bmap.br_state);
405}
406
407/* Get an RUD so we can process all the deferred rmap updates. */
408STATIC void *
409xfs_rmap_update_create_done(
410	struct xfs_trans		*tp,
411	void				*intent,
412	unsigned int			count)
413{
414	return xfs_trans_get_rud(tp, intent);
415}
416
417/* Process a deferred rmap update. */
418STATIC int
419xfs_rmap_update_finish_item(
420	struct xfs_trans		*tp,
421	struct list_head		*item,
422	void				*done_item,
423	void				**state)
424{
425	struct xfs_rmap_intent		*rmap;
426	int				error;
427
428	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
429	error = xfs_trans_log_finish_rmap_update(tp, done_item,
430			rmap->ri_type,
431			rmap->ri_owner, rmap->ri_whichfork,
432			rmap->ri_bmap.br_startoff,
433			rmap->ri_bmap.br_startblock,
434			rmap->ri_bmap.br_blockcount,
435			rmap->ri_bmap.br_state,
436			(struct xfs_btree_cur **)state);
437	kmem_free(rmap);
438	return error;
439}
440
441/* Clean up after processing deferred rmaps. */
442STATIC void
443xfs_rmap_update_finish_cleanup(
444	struct xfs_trans	*tp,
445	void			*state,
446	int			error)
447{
448	struct xfs_btree_cur	*rcur = state;
449
450	xfs_rmap_finish_one_cleanup(tp, rcur, error);
451}
452
453/* Abort all pending RUIs. */
454STATIC void
455xfs_rmap_update_abort_intent(
456	void				*intent)
457{
458	xfs_rui_release(intent);
459}
460
461/* Cancel a deferred rmap update. */
462STATIC void
463xfs_rmap_update_cancel_item(
464	struct list_head		*item)
465{
466	struct xfs_rmap_intent		*rmap;
467
468	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
469	kmem_free(rmap);
470}
471
472const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
473	.max_items	= XFS_RUI_MAX_FAST_EXTENTS,
474	.diff_items	= xfs_rmap_update_diff_items,
475	.create_intent	= xfs_rmap_update_create_intent,
476	.abort_intent	= xfs_rmap_update_abort_intent,
477	.log_item	= xfs_rmap_update_log_item,
478	.create_done	= xfs_rmap_update_create_done,
479	.finish_item	= xfs_rmap_update_finish_item,
480	.finish_cleanup = xfs_rmap_update_finish_cleanup,
481	.cancel_item	= xfs_rmap_update_cancel_item,
482};
483
484/*
485 * Process an rmap update intent item that was recovered from the log.
486 * We need to update the rmapbt.
487 */
488int
489xfs_rui_recover(
490	struct xfs_mount		*mp,
491	struct xfs_rui_log_item		*ruip)
492{
493	int				i;
494	int				error = 0;
495	struct xfs_map_extent		*rmap;
496	xfs_fsblock_t			startblock_fsb;
497	bool				op_ok;
498	struct xfs_rud_log_item		*rudp;
499	enum xfs_rmap_intent_type	type;
500	int				whichfork;
501	xfs_exntst_t			state;
502	struct xfs_trans		*tp;
503	struct xfs_btree_cur		*rcur = NULL;
504
505	ASSERT(!test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags));
506
507	/*
508	 * First check the validity of the extents described by the
509	 * RUI.  If any are bad, then assume that all are bad and
510	 * just toss the RUI.
511	 */
512	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
513		rmap = &ruip->rui_format.rui_extents[i];
514		startblock_fsb = XFS_BB_TO_FSB(mp,
515				   XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
516		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
517		case XFS_RMAP_EXTENT_MAP:
518		case XFS_RMAP_EXTENT_MAP_SHARED:
519		case XFS_RMAP_EXTENT_UNMAP:
520		case XFS_RMAP_EXTENT_UNMAP_SHARED:
521		case XFS_RMAP_EXTENT_CONVERT:
522		case XFS_RMAP_EXTENT_CONVERT_SHARED:
523		case XFS_RMAP_EXTENT_ALLOC:
524		case XFS_RMAP_EXTENT_FREE:
525			op_ok = true;
526			break;
527		default:
528			op_ok = false;
529			break;
530		}
531		if (!op_ok || startblock_fsb == 0 ||
532		    rmap->me_len == 0 ||
533		    startblock_fsb >= mp->m_sb.sb_dblocks ||
534		    rmap->me_len >= mp->m_sb.sb_agblocks ||
535		    (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)) {
536			/*
537			 * This will pull the RUI from the AIL and
538			 * free the memory associated with it.
539			 */
540			set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
541			xfs_rui_release(ruip);
542			return -EIO;
543		}
544	}
545
546	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
547			mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
548	if (error)
549		return error;
550	rudp = xfs_trans_get_rud(tp, ruip);
551
552	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
553		rmap = &ruip->rui_format.rui_extents[i];
554		state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
555				XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
556		whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
557				XFS_ATTR_FORK : XFS_DATA_FORK;
558		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
559		case XFS_RMAP_EXTENT_MAP:
560			type = XFS_RMAP_MAP;
561			break;
562		case XFS_RMAP_EXTENT_MAP_SHARED:
563			type = XFS_RMAP_MAP_SHARED;
564			break;
565		case XFS_RMAP_EXTENT_UNMAP:
566			type = XFS_RMAP_UNMAP;
567			break;
568		case XFS_RMAP_EXTENT_UNMAP_SHARED:
569			type = XFS_RMAP_UNMAP_SHARED;
570			break;
571		case XFS_RMAP_EXTENT_CONVERT:
572			type = XFS_RMAP_CONVERT;
573			break;
574		case XFS_RMAP_EXTENT_CONVERT_SHARED:
575			type = XFS_RMAP_CONVERT_SHARED;
576			break;
577		case XFS_RMAP_EXTENT_ALLOC:
578			type = XFS_RMAP_ALLOC;
579			break;
580		case XFS_RMAP_EXTENT_FREE:
581			type = XFS_RMAP_FREE;
582			break;
583		default:
584			error = -EFSCORRUPTED;
585			goto abort_error;
586		}
587		error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
588				rmap->me_owner, whichfork,
589				rmap->me_startoff, rmap->me_startblock,
590				rmap->me_len, state, &rcur);
591		if (error)
592			goto abort_error;
593
594	}
595
596	xfs_rmap_finish_one_cleanup(tp, rcur, error);
597	set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
598	error = xfs_trans_commit(tp);
599	return error;
600
601abort_error:
602	xfs_rmap_finish_one_cleanup(tp, rcur, error);
603	xfs_trans_cancel(tp);
604	return error;
605}