Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_types.h"
 21#include "xfs_bit.h"
 22#include "xfs_inum.h"
 23#include "xfs_log.h"
 24#include "xfs_trans.h"
 25#include "xfs_sb.h"
 26#include "xfs_ag.h"
 27#include "xfs_mount.h"
 28#include "xfs_bmap_btree.h"
 29#include "xfs_alloc_btree.h"
 30#include "xfs_ialloc_btree.h"
 31#include "xfs_dinode.h"
 32#include "xfs_inode.h"
 33#include "xfs_inode_item.h"
 34#include "xfs_btree.h"
 35#include "xfs_error.h"
 36#include "xfs_alloc.h"
 37#include "xfs_ialloc.h"
 38#include "xfs_fsops.h"
 39#include "xfs_itable.h"
 40#include "xfs_trans_space.h"
 41#include "xfs_rtalloc.h"
 42#include "xfs_rw.h"
 43#include "xfs_filestream.h"
 
 44#include "xfs_trace.h"
 45
 46/*
 47 * File system operations
 
 
 
 
 
 
 
 
 
 48 */
 49
 50int
 51xfs_fs_geometry(
 52	xfs_mount_t		*mp,
 53	xfs_fsop_geom_t		*geo,
 54	int			new_version)
 
 
 
 55{
 
 
 
 56
 57	memset(geo, 0, sizeof(*geo));
 
 
 
 
 
 
 
 
 
 
 
 58
 59	geo->blocksize = mp->m_sb.sb_blocksize;
 60	geo->rtextsize = mp->m_sb.sb_rextsize;
 61	geo->agblocks = mp->m_sb.sb_agblocks;
 62	geo->agcount = mp->m_sb.sb_agcount;
 63	geo->logblocks = mp->m_sb.sb_logblocks;
 64	geo->sectsize = mp->m_sb.sb_sectsize;
 65	geo->inodesize = mp->m_sb.sb_inodesize;
 66	geo->imaxpct = mp->m_sb.sb_imax_pct;
 67	geo->datablocks = mp->m_sb.sb_dblocks;
 68	geo->rtblocks = mp->m_sb.sb_rblocks;
 69	geo->rtextents = mp->m_sb.sb_rextents;
 70	geo->logstart = mp->m_sb.sb_logstart;
 71	ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
 72	memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
 73	if (new_version >= 2) {
 74		geo->sunit = mp->m_sb.sb_unit;
 75		geo->swidth = mp->m_sb.sb_width;
 76	}
 77	if (new_version >= 3) {
 78		geo->version = XFS_FSOP_GEOM_VERSION;
 79		geo->flags =
 80			(xfs_sb_version_hasattr(&mp->m_sb) ?
 81				XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
 82			(xfs_sb_version_hasnlink(&mp->m_sb) ?
 83				XFS_FSOP_GEOM_FLAGS_NLINK : 0) |
 84			(xfs_sb_version_hasquota(&mp->m_sb) ?
 85				XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
 86			(xfs_sb_version_hasalign(&mp->m_sb) ?
 87				XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
 88			(xfs_sb_version_hasdalign(&mp->m_sb) ?
 89				XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
 90			(xfs_sb_version_hasshared(&mp->m_sb) ?
 91				XFS_FSOP_GEOM_FLAGS_SHARED : 0) |
 92			(xfs_sb_version_hasextflgbit(&mp->m_sb) ?
 93				XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
 94			(xfs_sb_version_hasdirv2(&mp->m_sb) ?
 95				XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) |
 96			(xfs_sb_version_hassector(&mp->m_sb) ?
 97				XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
 98			(xfs_sb_version_hasasciici(&mp->m_sb) ?
 99				XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) |
100			(xfs_sb_version_haslazysbcount(&mp->m_sb) ?
101				XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
102			(xfs_sb_version_hasattr2(&mp->m_sb) ?
103				XFS_FSOP_GEOM_FLAGS_ATTR2 : 0);
104		geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
105				mp->m_sb.sb_logsectsize : BBSIZE;
106		geo->rtsectsize = mp->m_sb.sb_blocksize;
107		geo->dirblocksize = mp->m_dirblksize;
108	}
109	if (new_version >= 4) {
110		geo->flags |=
111			(xfs_sb_version_haslogv2(&mp->m_sb) ?
112				XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
113		geo->logsunit = mp->m_sb.sb_logsunit;
114	}
115	return 0;
 
 
 
 
 
 
 
 
 
116}
117
 
 
 
118static int
119xfs_growfs_data_private(
120	xfs_mount_t		*mp,		/* mount point for filesystem */
121	xfs_growfs_data_t	*in)		/* growfs data input struct */
122{
123	xfs_agf_t		*agf;
124	xfs_agi_t		*agi;
125	xfs_agnumber_t		agno;
126	xfs_extlen_t		agsize;
127	xfs_extlen_t		tmpsize;
128	xfs_alloc_rec_t		*arec;
129	struct xfs_btree_block	*block;
130	xfs_buf_t		*bp;
131	int			bucket;
132	int			dpct;
133	int			error;
134	xfs_agnumber_t		nagcount;
135	xfs_agnumber_t		nagimax = 0;
136	xfs_rfsblock_t		nb, nb_mod;
137	xfs_rfsblock_t		new;
138	xfs_rfsblock_t		nfree;
139	xfs_agnumber_t		oagcount;
140	int			pct;
141	xfs_trans_t		*tp;
 
142
143	nb = in->newblocks;
144	pct = in->imaxpct;
145	if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
146		return XFS_ERROR(EINVAL);
147	if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
148		return error;
149	dpct = pct - mp->m_sb.sb_imax_pct;
150	bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
 
151				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
152				BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
153	if (!bp)
154		return EIO;
155	xfs_buf_relse(bp);
156
157	new = nb;	/* use new as a temporary here */
158	nb_mod = do_div(new, mp->m_sb.sb_agblocks);
159	nagcount = new + (nb_mod != 0);
 
160	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
161		nagcount--;
162		nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
163		if (nb < mp->m_sb.sb_dblocks)
164			return XFS_ERROR(EINVAL);
165	}
166	new = nb - mp->m_sb.sb_dblocks;
167	oagcount = mp->m_sb.sb_agcount;
 
 
 
 
 
 
168
 
169	/* allocate the new per-ag structures */
170	if (nagcount > oagcount) {
171		error = xfs_initialize_perag(mp, nagcount, &nagimax);
172		if (error)
173			return error;
 
 
 
174	}
175
176	tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
177	tp->t_flags |= XFS_TRANS_RESERVE;
178	if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp),
179			XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) {
180		xfs_trans_cancel(tp, 0);
181		return error;
182	}
183
184	/*
185	 * Write new AG headers to disk. Non-transactional, but written
186	 * synchronously so they are completed prior to the growfs transaction
187	 * being logged.
188	 */
189	nfree = 0;
190	for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
191		/*
192		 * AG freelist header block
193		 */
194		bp = xfs_buf_get(mp->m_ddev_targp,
195				 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
196				 XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED);
197		agf = XFS_BUF_TO_AGF(bp);
198		memset(agf, 0, mp->m_sb.sb_sectsize);
199		agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
200		agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
201		agf->agf_seqno = cpu_to_be32(agno);
202		if (agno == nagcount - 1)
203			agsize =
204				nb -
205				(agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
206		else
207			agsize = mp->m_sb.sb_agblocks;
208		agf->agf_length = cpu_to_be32(agsize);
209		agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
210		agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
211		agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
212		agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
213		agf->agf_flfirst = 0;
214		agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
215		agf->agf_flcount = 0;
216		tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
217		agf->agf_freeblks = cpu_to_be32(tmpsize);
218		agf->agf_longest = cpu_to_be32(tmpsize);
219		error = xfs_bwrite(mp, bp);
220		if (error) {
221			goto error0;
222		}
223		/*
224		 * AG inode header block
225		 */
226		bp = xfs_buf_get(mp->m_ddev_targp,
227				 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
228				 XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED);
229		agi = XFS_BUF_TO_AGI(bp);
230		memset(agi, 0, mp->m_sb.sb_sectsize);
231		agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
232		agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
233		agi->agi_seqno = cpu_to_be32(agno);
234		agi->agi_length = cpu_to_be32(agsize);
235		agi->agi_count = 0;
236		agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
237		agi->agi_level = cpu_to_be32(1);
238		agi->agi_freecount = 0;
239		agi->agi_newino = cpu_to_be32(NULLAGINO);
240		agi->agi_dirino = cpu_to_be32(NULLAGINO);
241		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
242			agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
243		error = xfs_bwrite(mp, bp);
244		if (error) {
245			goto error0;
246		}
247		/*
248		 * BNO btree root block
249		 */
250		bp = xfs_buf_get(mp->m_ddev_targp,
251				 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
252				 BTOBB(mp->m_sb.sb_blocksize),
253				 XBF_LOCK | XBF_MAPPED);
254		block = XFS_BUF_TO_BLOCK(bp);
255		memset(block, 0, mp->m_sb.sb_blocksize);
256		block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC);
257		block->bb_level = 0;
258		block->bb_numrecs = cpu_to_be16(1);
259		block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
260		block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
261		arec = XFS_ALLOC_REC_ADDR(mp, block, 1);
262		arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
263		arec->ar_blockcount = cpu_to_be32(
264			agsize - be32_to_cpu(arec->ar_startblock));
265		error = xfs_bwrite(mp, bp);
266		if (error) {
267			goto error0;
268		}
269		/*
270		 * CNT btree root block
271		 */
272		bp = xfs_buf_get(mp->m_ddev_targp,
273				 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
274				 BTOBB(mp->m_sb.sb_blocksize),
275				 XBF_LOCK | XBF_MAPPED);
276		block = XFS_BUF_TO_BLOCK(bp);
277		memset(block, 0, mp->m_sb.sb_blocksize);
278		block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC);
279		block->bb_level = 0;
280		block->bb_numrecs = cpu_to_be16(1);
281		block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
282		block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
283		arec = XFS_ALLOC_REC_ADDR(mp, block, 1);
284		arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
285		arec->ar_blockcount = cpu_to_be32(
286			agsize - be32_to_cpu(arec->ar_startblock));
287		nfree += be32_to_cpu(arec->ar_blockcount);
288		error = xfs_bwrite(mp, bp);
289		if (error) {
290			goto error0;
291		}
292		/*
293		 * INO btree root block
294		 */
295		bp = xfs_buf_get(mp->m_ddev_targp,
296				 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
297				 BTOBB(mp->m_sb.sb_blocksize),
298				 XBF_LOCK | XBF_MAPPED);
299		block = XFS_BUF_TO_BLOCK(bp);
300		memset(block, 0, mp->m_sb.sb_blocksize);
301		block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC);
302		block->bb_level = 0;
303		block->bb_numrecs = 0;
304		block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
305		block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
306		error = xfs_bwrite(mp, bp);
307		if (error) {
308			goto error0;
309		}
310	}
311	xfs_trans_agblocks_delta(tp, nfree);
312	/*
313	 * There are new blocks in the old last a.g.
314	 */
315	if (new) {
316		/*
317		 * Change the agi length.
318		 */
319		error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
320		if (error) {
321			goto error0;
322		}
323		ASSERT(bp);
324		agi = XFS_BUF_TO_AGI(bp);
325		be32_add_cpu(&agi->agi_length, new);
326		ASSERT(nagcount == oagcount ||
327		       be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
328		xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
329		/*
330		 * Change agf length.
331		 */
332		error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp);
333		if (error) {
334			goto error0;
335		}
336		ASSERT(bp);
337		agf = XFS_BUF_TO_AGF(bp);
338		be32_add_cpu(&agf->agf_length, new);
339		ASSERT(be32_to_cpu(agf->agf_length) ==
340		       be32_to_cpu(agi->agi_length));
341
342		xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
343		/*
344		 * Free the new space.
345		 */
346		error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
347			be32_to_cpu(agf->agf_length) - new), new);
348		if (error) {
349			goto error0;
350		}
351	}
 
 
 
352
353	/*
354	 * Update changed superblock fields transactionally. These are not
355	 * seen by the rest of the world until the transaction commit applies
356	 * them atomically to the superblock.
357	 */
358	if (nagcount > oagcount)
359		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
360	if (nb > mp->m_sb.sb_dblocks)
361		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
362				 nb - mp->m_sb.sb_dblocks);
363	if (nfree)
364		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
365	if (dpct)
366		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
367	error = xfs_trans_commit(tp, 0);
 
 
 
 
 
 
 
368	if (error)
369		return error;
370
371	/* New allocation groups fully initialized, so update mount struct */
372	if (nagimax)
373		mp->m_maxagi = nagimax;
374	if (mp->m_sb.sb_imax_pct) {
375		__uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
376		do_div(icount, 100);
377		mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
378	} else
379		mp->m_maxicount = 0;
380	xfs_set_low_space_thresholds(mp);
 
381
382	/* update secondary superblocks. */
383	for (agno = 1; agno < nagcount; agno++) {
384		error = xfs_read_buf(mp, mp->m_ddev_targp,
385				  XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
386				  XFS_FSS_TO_BB(mp, 1), 0, &bp);
387		if (error) {
388			xfs_warn(mp,
389		"error %d reading secondary superblock for ag %d",
390				error, agno);
391			break;
392		}
393		xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
394		/*
395		 * If we get an error writing out the alternate superblocks,
396		 * just issue a warning and continue.  The real work is
397		 * already done and committed.
398		 */
399		if (!(error = xfs_bwrite(mp, bp))) {
400			continue;
401		} else {
402			xfs_warn(mp,
403		"write error %d updating secondary superblock for ag %d",
404				error, agno);
405			break; /* no point in continuing */
 
406		}
 
 
 
 
 
 
 
 
407	}
408	return 0;
409
410 error0:
411	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
412	return error;
413}
414
415static int
416xfs_growfs_log_private(
417	xfs_mount_t		*mp,	/* mount point for filesystem */
418	xfs_growfs_log_t	*in)	/* growfs log input struct */
419{
420	xfs_extlen_t		nb;
421
422	nb = in->newblocks;
423	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
424		return XFS_ERROR(EINVAL);
425	if (nb == mp->m_sb.sb_logblocks &&
426	    in->isint == (mp->m_sb.sb_logstart != 0))
427		return XFS_ERROR(EINVAL);
428	/*
429	 * Moving the log is hard, need new interfaces to sync
430	 * the log first, hold off all activity while moving it.
431	 * Can have shorter or longer log in the same space,
432	 * or transform internal to external log or vice versa.
433	 */
434	return XFS_ERROR(ENOSYS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435}
436
437/*
438 * protected versions of growfs function acquire and release locks on the mount
439 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
440 * XFS_IOC_FSGROWFSRT
441 */
442
443
444int
445xfs_growfs_data(
446	xfs_mount_t		*mp,
447	xfs_growfs_data_t	*in)
448{
449	int error;
450
451	if (!capable(CAP_SYS_ADMIN))
452		return XFS_ERROR(EPERM);
453	if (!mutex_trylock(&mp->m_growlock))
454		return XFS_ERROR(EWOULDBLOCK);
455	error = xfs_growfs_data_private(mp, in);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456	mutex_unlock(&mp->m_growlock);
457	return error;
458}
459
460int
461xfs_growfs_log(
462	xfs_mount_t		*mp,
463	xfs_growfs_log_t	*in)
464{
465	int error;
466
467	if (!capable(CAP_SYS_ADMIN))
468		return XFS_ERROR(EPERM);
469	if (!mutex_trylock(&mp->m_growlock))
470		return XFS_ERROR(EWOULDBLOCK);
471	error = xfs_growfs_log_private(mp, in);
472	mutex_unlock(&mp->m_growlock);
473	return error;
474}
475
476/*
477 * exported through ioctl XFS_IOC_FSCOUNTS
478 */
479
480int
481xfs_fs_counts(
482	xfs_mount_t		*mp,
483	xfs_fsop_counts_t	*cnt)
484{
485	xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
486	spin_lock(&mp->m_sb_lock);
487	cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
488	cnt->freertx = mp->m_sb.sb_frextents;
489	cnt->freeino = mp->m_sb.sb_ifree;
490	cnt->allocino = mp->m_sb.sb_icount;
491	spin_unlock(&mp->m_sb_lock);
492	return 0;
493}
494
495/*
496 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
497 *
498 * xfs_reserve_blocks is called to set m_resblks
499 * in the in-core mount table. The number of unused reserved blocks
500 * is kept in m_resblks_avail.
501 *
502 * Reserve the requested number of blocks if available. Otherwise return
503 * as many as possible to satisfy the request. The actual number
504 * reserved are returned in outval
505 *
506 * A null inval pointer indicates that only the current reserved blocks
507 * available  should  be returned no settings are changed.
508 */
509
510int
511xfs_reserve_blocks(
512	xfs_mount_t             *mp,
513	__uint64_t              *inval,
514	xfs_fsop_resblks_t      *outval)
515{
516	__int64_t		lcounter, delta, fdblks_delta;
517	__uint64_t		request;
 
 
 
518
519	/* If inval is null, report current values and return */
520	if (inval == (__uint64_t *)NULL) {
521		if (!outval)
522			return EINVAL;
523		outval->resblks = mp->m_resblks;
524		outval->resblks_avail = mp->m_resblks_avail;
525		return 0;
526	}
527
528	request = *inval;
529
530	/*
531	 * With per-cpu counters, this becomes an interesting
532	 * problem. we needto work out if we are freeing or allocation
533	 * blocks first, then we can do the modification as necessary.
534	 *
535	 * We do this under the m_sb_lock so that if we are near
536	 * ENOSPC, we will hold out any changes while we work out
537	 * what to do. This means that the amount of free space can
538	 * change while we do this, so we need to retry if we end up
539	 * trying to reserve more space than is available.
540	 *
541	 * We also use the xfs_mod_incore_sb() interface so that we
542	 * don't have to care about whether per cpu counter are
543	 * enabled, disabled or even compiled in....
 
544	 */
545retry:
546	spin_lock(&mp->m_sb_lock);
547	xfs_icsb_sync_counters_locked(mp, 0);
548
549	/*
550	 * If our previous reservation was larger than the current value,
551	 * then move any unused blocks back to the free pool.
 
 
552	 */
553	fdblks_delta = 0;
554	if (mp->m_resblks > request) {
555		lcounter = mp->m_resblks_avail - request;
556		if (lcounter  > 0) {		/* release unused blocks */
557			fdblks_delta = lcounter;
558			mp->m_resblks_avail -= lcounter;
559		}
560		mp->m_resblks = request;
561	} else {
562		__int64_t	free;
563
564		free =  mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
565		if (!free)
566			goto out; /* ENOSPC and fdblks_delta = 0 */
567
568		delta = request - mp->m_resblks;
569		lcounter = free - delta;
570		if (lcounter < 0) {
571			/* We can't satisfy the request, just get what we can */
572			mp->m_resblks += free;
573			mp->m_resblks_avail += free;
574			fdblks_delta = -free;
575		} else {
576			fdblks_delta = -delta;
577			mp->m_resblks = request;
578			mp->m_resblks_avail += delta;
579		}
 
 
580	}
581out:
582	if (outval) {
583		outval->resblks = mp->m_resblks;
584		outval->resblks_avail = mp->m_resblks_avail;
585	}
586	spin_unlock(&mp->m_sb_lock);
587
588	if (fdblks_delta) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
589		/*
590		 * If we are putting blocks back here, m_resblks_avail is
591		 * already at its max so this will put it in the free pool.
 
 
592		 *
593		 * If we need space, we'll either succeed in getting it
594		 * from the free block count or we'll get an enospc. If
595		 * we get a ENOSPC, it means things changed while we were
596		 * calculating fdblks_delta and so we should try again to
597		 * see if there is anything left to reserve.
598		 *
599		 * Don't set the reserved flag here - we don't want to reserve
600		 * the extra reserve blocks from the reserve.....
601		 */
602		int error;
603		error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
604						 fdblks_delta, 0);
605		if (error == ENOSPC)
606			goto retry;
 
607	}
608	return 0;
609}
610
611/*
612 * Dump a transaction into the log that contains no real change. This is needed
613 * to be able to make the log dirty or stamp the current tail LSN into the log
614 * during the covering operation.
615 *
616 * We cannot use an inode here for this - that will push dirty state back up
617 * into the VFS and then periodic inode flushing will prevent log covering from
618 * making progress. Hence we log a field in the superblock instead and use a
619 * synchronous transaction to ensure the superblock is immediately unpinned
620 * and can be written back.
621 */
622int
623xfs_fs_log_dummy(
624	xfs_mount_t	*mp)
625{
626	xfs_trans_t	*tp;
627	int		error;
628
629	tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
630	error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
631					XFS_DEFAULT_LOG_COUNT);
632	if (error) {
633		xfs_trans_cancel(tp, 0);
634		return error;
635	}
636
637	/* log the UUID because it is an unchanging field */
638	xfs_mod_sb(tp, XFS_SB_UUID);
639	xfs_trans_set_sync(tp);
640	return xfs_trans_commit(tp, 0);
641}
642
643int
644xfs_fs_goingdown(
645	xfs_mount_t	*mp,
646	__uint32_t	inflags)
647{
648	switch (inflags) {
649	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
650		struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
651
652		if (sb && !IS_ERR(sb)) {
653			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
654			thaw_bdev(sb->s_bdev, sb);
655		}
656
657		break;
658	}
659	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
660		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
661		break;
662	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
663		xfs_force_shutdown(mp,
664				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
665		break;
666	default:
667		return XFS_ERROR(EINVAL);
668	}
669
670	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
671}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 
 12#include "xfs_sb.h"
 
 13#include "xfs_mount.h"
 14#include "xfs_trans.h"
 
 
 
 
 
 
 15#include "xfs_error.h"
 16#include "xfs_alloc.h"
 
 17#include "xfs_fsops.h"
 
 18#include "xfs_trans_space.h"
 19#include "xfs_log.h"
 20#include "xfs_log_priv.h"
 21#include "xfs_ag.h"
 22#include "xfs_ag_resv.h"
 23#include "xfs_trace.h"
 24
 25/*
 26 * Write new AG headers to disk. Non-transactional, but need to be
 27 * written and completed prior to the growfs transaction being logged.
 28 * To do this, we use a delayed write buffer list and wait for
 29 * submission and IO completion of the list as a whole. This allows the
 30 * IO subsystem to merge all the AG headers in a single AG into a single
 31 * IO and hide most of the latency of the IO from us.
 32 *
 33 * This also means that if we get an error whilst building the buffer
 34 * list to write, we can cancel the entire list without having written
 35 * anything.
 36 */
 37static int
 38xfs_resizefs_init_new_ags(
 39	struct xfs_trans	*tp,
 40	struct aghdr_init_data	*id,
 41	xfs_agnumber_t		oagcount,
 42	xfs_agnumber_t		nagcount,
 43	xfs_rfsblock_t		delta,
 44	struct xfs_perag	*last_pag,
 45	bool			*lastag_extended)
 46{
 47	struct xfs_mount	*mp = tp->t_mountp;
 48	xfs_rfsblock_t		nb = mp->m_sb.sb_dblocks + delta;
 49	int			error;
 50
 51	*lastag_extended = false;
 52
 53	INIT_LIST_HEAD(&id->buffer_list);
 54	for (id->agno = nagcount - 1;
 55	     id->agno >= oagcount;
 56	     id->agno--, delta -= id->agsize) {
 57
 58		if (id->agno == nagcount - 1)
 59			id->agsize = nb - (id->agno *
 60					(xfs_rfsblock_t)mp->m_sb.sb_agblocks);
 61		else
 62			id->agsize = mp->m_sb.sb_agblocks;
 63
 64		error = xfs_ag_init_headers(mp, id);
 65		if (error) {
 66			xfs_buf_delwri_cancel(&id->buffer_list);
 67			return error;
 68		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 69	}
 70
 71	error = xfs_buf_delwri_submit(&id->buffer_list);
 72	if (error)
 73		return error;
 74
 75	if (delta) {
 76		*lastag_extended = true;
 77		error = xfs_ag_extend_space(last_pag, tp, delta);
 78	}
 79	return error;
 80}
 81
 82/*
 83 * growfs operations
 84 */
 85static int
 86xfs_growfs_data_private(
 87	struct xfs_mount	*mp,		/* mount point for filesystem */
 88	struct xfs_growfs_data	*in)		/* growfs data input struct */
 89{
 90	struct xfs_buf		*bp;
 
 
 
 
 
 
 
 
 
 91	int			error;
 92	xfs_agnumber_t		nagcount;
 93	xfs_agnumber_t		nagimax = 0;
 94	xfs_rfsblock_t		nb, nb_div, nb_mod;
 95	int64_t			delta;
 96	bool			lastag_extended;
 97	xfs_agnumber_t		oagcount;
 98	struct xfs_trans	*tp;
 99	struct aghdr_init_data	id = {};
100	struct xfs_perag	*last_pag;
101
102	nb = in->newblocks;
103	error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
104	if (error)
 
 
105		return error;
106
107	if (nb > mp->m_sb.sb_dblocks) {
108		error = xfs_buf_read_uncached(mp->m_ddev_targp,
109				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
110				XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
111		if (error)
112			return error;
113		xfs_buf_relse(bp);
114	}
115
116	nb_div = nb;
117	nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
118	nagcount = nb_div + (nb_mod != 0);
119	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
120		nagcount--;
121		nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
 
 
122	}
123	delta = nb - mp->m_sb.sb_dblocks;
124	/*
125	 * Reject filesystems with a single AG because they are not
126	 * supported, and reject a shrink operation that would cause a
127	 * filesystem to become unsupported.
128	 */
129	if (delta < 0 && nagcount < 2)
130		return -EINVAL;
131
132	oagcount = mp->m_sb.sb_agcount;
133	/* allocate the new per-ag structures */
134	if (nagcount > oagcount) {
135		error = xfs_initialize_perag(mp, nagcount, nb, &nagimax);
136		if (error)
137			return error;
138	} else if (nagcount < oagcount) {
139		/* TODO: shrinking the entire AGs hasn't yet completed */
140		return -EINVAL;
141	}
142
143	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
144			(delta > 0 ? XFS_GROWFS_SPACE_RES(mp) : -delta), 0,
145			XFS_TRANS_RESERVE, &tp);
146	if (error)
 
147		return error;
 
148
149	last_pag = xfs_perag_get(mp, oagcount - 1);
150	if (delta > 0) {
151		error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
152				delta, last_pag, &lastag_extended);
153	} else {
154		xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK,
155	"EXPERIMENTAL online shrink feature in use. Use at your own risk!");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
157		error = xfs_ag_shrink_space(last_pag, &tp, -delta);
 
 
 
 
 
 
 
 
158	}
159	xfs_perag_put(last_pag);
160	if (error)
161		goto out_trans_cancel;
162
163	/*
164	 * Update changed superblock fields transactionally. These are not
165	 * seen by the rest of the world until the transaction commit applies
166	 * them atomically to the superblock.
167	 */
168	if (nagcount > oagcount)
169		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
170	if (delta)
171		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
172	if (id.nfree)
173		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
174
175	/*
176	 * Sync sb counters now to reflect the updated values. This is
177	 * particularly important for shrink because the write verifier
178	 * will fail if sb_fdblocks is ever larger than sb_dblocks.
179	 */
180	if (xfs_has_lazysbcount(mp))
181		xfs_log_sb(tp);
182
183	xfs_trans_set_sync(tp);
184	error = xfs_trans_commit(tp);
185	if (error)
186		return error;
187
188	/* New allocation groups fully initialized, so update mount struct */
189	if (nagimax)
190		mp->m_maxagi = nagimax;
 
 
 
 
 
 
191	xfs_set_low_space_thresholds(mp);
192	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
193
194	if (delta > 0) {
 
 
 
 
 
 
 
 
 
 
 
195		/*
196		 * If we expanded the last AG, free the per-AG reservation
197		 * so we can reinitialize it with the new size.
 
198		 */
199		if (lastag_extended) {
200			struct xfs_perag	*pag;
201
202			pag = xfs_perag_get(mp, id.agno);
203			error = xfs_ag_resv_free(pag);
204			xfs_perag_put(pag);
205			if (error)
206				return error;
207		}
208		/*
209		 * Reserve AG metadata blocks. ENOSPC here does not mean there
210		 * was a growfs failure, just that there still isn't space for
211		 * new user data after the grow has been run.
212		 */
213		error = xfs_fs_reserve_ag_blocks(mp);
214		if (error == -ENOSPC)
215			error = 0;
216	}
217	return error;
218
219out_trans_cancel:
220	xfs_trans_cancel(tp);
221	return error;
222}
223
224static int
225xfs_growfs_log_private(
226	struct xfs_mount	*mp,	/* mount point for filesystem */
227	struct xfs_growfs_log	*in)	/* growfs log input struct */
228{
229	xfs_extlen_t		nb;
230
231	nb = in->newblocks;
232	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
233		return -EINVAL;
234	if (nb == mp->m_sb.sb_logblocks &&
235	    in->isint == (mp->m_sb.sb_logstart != 0))
236		return -EINVAL;
237	/*
238	 * Moving the log is hard, need new interfaces to sync
239	 * the log first, hold off all activity while moving it.
240	 * Can have shorter or longer log in the same space,
241	 * or transform internal to external log or vice versa.
242	 */
243	return -ENOSYS;
244}
245
246static int
247xfs_growfs_imaxpct(
248	struct xfs_mount	*mp,
249	__u32			imaxpct)
250{
251	struct xfs_trans	*tp;
252	int			dpct;
253	int			error;
254
255	if (imaxpct > 100)
256		return -EINVAL;
257
258	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
259			XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
260	if (error)
261		return error;
262
263	dpct = imaxpct - mp->m_sb.sb_imax_pct;
264	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
265	xfs_trans_set_sync(tp);
266	return xfs_trans_commit(tp);
267}
268
269/*
270 * protected versions of growfs function acquire and release locks on the mount
271 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
272 * XFS_IOC_FSGROWFSRT
273 */
 
 
274int
275xfs_growfs_data(
276	struct xfs_mount	*mp,
277	struct xfs_growfs_data	*in)
278{
279	int			error = 0;
280
281	if (!capable(CAP_SYS_ADMIN))
282		return -EPERM;
283	if (!mutex_trylock(&mp->m_growlock))
284		return -EWOULDBLOCK;
285
286	/* update imaxpct separately to the physical grow of the filesystem */
287	if (in->imaxpct != mp->m_sb.sb_imax_pct) {
288		error = xfs_growfs_imaxpct(mp, in->imaxpct);
289		if (error)
290			goto out_error;
291	}
292
293	if (in->newblocks != mp->m_sb.sb_dblocks) {
294		error = xfs_growfs_data_private(mp, in);
295		if (error)
296			goto out_error;
297	}
298
299	/* Post growfs calculations needed to reflect new state in operations */
300	if (mp->m_sb.sb_imax_pct) {
301		uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
302		do_div(icount, 100);
303		M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
304	} else
305		M_IGEO(mp)->maxicount = 0;
306
307	/* Update secondary superblocks now the physical grow has completed */
308	error = xfs_update_secondary_sbs(mp);
309
310out_error:
311	/*
312	 * Increment the generation unconditionally, the error could be from
313	 * updating the secondary superblocks, in which case the new size
314	 * is live already.
315	 */
316	mp->m_generation++;
317	mutex_unlock(&mp->m_growlock);
318	return error;
319}
320
321int
322xfs_growfs_log(
323	xfs_mount_t		*mp,
324	struct xfs_growfs_log	*in)
325{
326	int error;
327
328	if (!capable(CAP_SYS_ADMIN))
329		return -EPERM;
330	if (!mutex_trylock(&mp->m_growlock))
331		return -EWOULDBLOCK;
332	error = xfs_growfs_log_private(mp, in);
333	mutex_unlock(&mp->m_growlock);
334	return error;
335}
336
337/*
338 * exported through ioctl XFS_IOC_FSCOUNTS
339 */
340
341void
342xfs_fs_counts(
343	xfs_mount_t		*mp,
344	xfs_fsop_counts_t	*cnt)
345{
346	cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
347	cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
348	cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
349						xfs_fdblocks_unavailable(mp);
350	cnt->freertx = percpu_counter_read_positive(&mp->m_frextents);
 
 
 
351}
352
353/*
354 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
355 *
356 * xfs_reserve_blocks is called to set m_resblks
357 * in the in-core mount table. The number of unused reserved blocks
358 * is kept in m_resblks_avail.
359 *
360 * Reserve the requested number of blocks if available. Otherwise return
361 * as many as possible to satisfy the request. The actual number
362 * reserved are returned in outval
363 *
364 * A null inval pointer indicates that only the current reserved blocks
365 * available  should  be returned no settings are changed.
366 */
367
368int
369xfs_reserve_blocks(
370	xfs_mount_t             *mp,
371	uint64_t              *inval,
372	xfs_fsop_resblks_t      *outval)
373{
374	int64_t			lcounter, delta;
375	int64_t			fdblks_delta = 0;
376	uint64_t		request;
377	int64_t			free;
378	int			error = 0;
379
380	/* If inval is null, report current values and return */
381	if (inval == (uint64_t *)NULL) {
382		if (!outval)
383			return -EINVAL;
384		outval->resblks = mp->m_resblks;
385		outval->resblks_avail = mp->m_resblks_avail;
386		return 0;
387	}
388
389	request = *inval;
390
391	/*
392	 * With per-cpu counters, this becomes an interesting problem. we need
393	 * to work out if we are freeing or allocation blocks first, then we can
394	 * do the modification as necessary.
 
 
 
 
 
 
395	 *
396	 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
397	 * hold out any changes while we work out what to do. This means that
398	 * the amount of free space can change while we do this, so we need to
399	 * retry if we end up trying to reserve more space than is available.
400	 */
 
401	spin_lock(&mp->m_sb_lock);
 
402
403	/*
404	 * If our previous reservation was larger than the current value,
405	 * then move any unused blocks back to the free pool. Modify the resblks
406	 * counters directly since we shouldn't have any problems unreserving
407	 * space.
408	 */
 
409	if (mp->m_resblks > request) {
410		lcounter = mp->m_resblks_avail - request;
411		if (lcounter  > 0) {		/* release unused blocks */
412			fdblks_delta = lcounter;
413			mp->m_resblks_avail -= lcounter;
414		}
415		mp->m_resblks = request;
416		if (fdblks_delta) {
417			spin_unlock(&mp->m_sb_lock);
418			error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
419			spin_lock(&mp->m_sb_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420		}
421
422		goto out;
423	}
 
 
 
 
 
 
424
425	/*
426	 * If the request is larger than the current reservation, reserve the
427	 * blocks before we update the reserve counters. Sample m_fdblocks and
428	 * perform a partial reservation if the request exceeds free space.
429	 *
430	 * The code below estimates how many blocks it can request from
431	 * fdblocks to stash in the reserve pool.  This is a classic TOCTOU
432	 * race since fdblocks updates are not always coordinated via
433	 * m_sb_lock.  Set the reserve size even if there's not enough free
434	 * space to fill it because mod_fdblocks will refill an undersized
435	 * reserve when it can.
436	 */
437	free = percpu_counter_sum(&mp->m_fdblocks) -
438						xfs_fdblocks_unavailable(mp);
439	delta = request - mp->m_resblks;
440	mp->m_resblks = request;
441	if (delta > 0 && free > 0) {
442		/*
443		 * We'll either succeed in getting space from the free block
444		 * count or we'll get an ENOSPC.  Don't set the reserved flag
445		 * here - we don't want to reserve the extra reserve blocks
446		 * from the reserve.
447		 *
448		 * The desired reserve size can change after we drop the lock.
449		 * Use mod_fdblocks to put the space into the reserve or into
450		 * fdblocks as appropriate.
 
 
 
 
 
451		 */
452		fdblks_delta = min(free, delta);
453		spin_unlock(&mp->m_sb_lock);
454		error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
455		if (!error)
456			xfs_mod_fdblocks(mp, fdblks_delta, 0);
457		spin_lock(&mp->m_sb_lock);
458	}
459out:
460	if (outval) {
461		outval->resblks = mp->m_resblks;
462		outval->resblks_avail = mp->m_resblks_avail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
463	}
464
465	spin_unlock(&mp->m_sb_lock);
466	return error;
 
 
467}
468
469int
470xfs_fs_goingdown(
471	xfs_mount_t	*mp,
472	uint32_t	inflags)
473{
474	switch (inflags) {
475	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
476		if (!freeze_bdev(mp->m_super->s_bdev)) {
 
 
477			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
478			thaw_bdev(mp->m_super->s_bdev);
479		}
 
480		break;
481	}
482	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
483		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
484		break;
485	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
486		xfs_force_shutdown(mp,
487				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
488		break;
489	default:
490		return -EINVAL;
491	}
492
493	return 0;
494}
495
496/*
497 * Force a shutdown of the filesystem instantly while keeping the filesystem
498 * consistent. We don't do an unmount here; just shutdown the shop, make sure
499 * that absolutely nothing persistent happens to this filesystem after this
500 * point.
501 *
502 * The shutdown state change is atomic, resulting in the first and only the
503 * first shutdown call processing the shutdown. This means we only shutdown the
504 * log once as it requires, and we don't spam the logs when multiple concurrent
505 * shutdowns race to set the shutdown flags.
506 */
507void
508xfs_do_force_shutdown(
509	struct xfs_mount *mp,
510	uint32_t	flags,
511	char		*fname,
512	int		lnnum)
513{
514	int		tag;
515	const char	*why;
516
517
518	if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) {
519		xlog_shutdown_wait(mp->m_log);
520		return;
521	}
522	if (mp->m_sb_bp)
523		mp->m_sb_bp->b_flags |= XBF_DONE;
524
525	if (flags & SHUTDOWN_FORCE_UMOUNT)
526		xfs_alert(mp, "User initiated shutdown received.");
527
528	if (xlog_force_shutdown(mp->m_log, flags)) {
529		tag = XFS_PTAG_SHUTDOWN_LOGERROR;
530		why = "Log I/O Error";
531	} else if (flags & SHUTDOWN_CORRUPT_INCORE) {
532		tag = XFS_PTAG_SHUTDOWN_CORRUPT;
533		why = "Corruption of in-memory data";
534	} else if (flags & SHUTDOWN_CORRUPT_ONDISK) {
535		tag = XFS_PTAG_SHUTDOWN_CORRUPT;
536		why = "Corruption of on-disk metadata";
537	} else {
538		tag = XFS_PTAG_SHUTDOWN_IOERROR;
539		why = "Metadata I/O Error";
540	}
541
542	trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum);
543
544	xfs_alert_tag(mp, tag,
545"%s (0x%x) detected at %pS (%s:%d).  Shutting down filesystem.",
546			why, flags, __return_address, fname, lnnum);
547	xfs_alert(mp,
548		"Please unmount the filesystem and rectify the problem(s)");
549	if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
550		xfs_stack_trace();
551}
552
553/*
554 * Reserve free space for per-AG metadata.
555 */
556int
557xfs_fs_reserve_ag_blocks(
558	struct xfs_mount	*mp)
559{
560	xfs_agnumber_t		agno;
561	struct xfs_perag	*pag;
562	int			error = 0;
563	int			err2;
564
565	mp->m_finobt_nores = false;
566	for_each_perag(mp, agno, pag) {
567		err2 = xfs_ag_resv_init(pag, NULL);
568		if (err2 && !error)
569			error = err2;
570	}
571
572	if (error && error != -ENOSPC) {
573		xfs_warn(mp,
574	"Error %d reserving per-AG metadata reserve pool.", error);
575		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
576	}
577
578	return error;
579}
580
581/*
582 * Free space reserved for per-AG metadata.
583 */
584int
585xfs_fs_unreserve_ag_blocks(
586	struct xfs_mount	*mp)
587{
588	xfs_agnumber_t		agno;
589	struct xfs_perag	*pag;
590	int			error = 0;
591	int			err2;
592
593	for_each_perag(mp, agno, pag) {
594		err2 = xfs_ag_resv_free(pag);
595		if (err2 && !error)
596			error = err2;
597	}
598
599	if (error)
600		xfs_warn(mp,
601	"Error %d freeing per-AG metadata reserve pool.", error);
602
603	return error;
604}