Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_shared.h"
 21#include "xfs_format.h"
 22#include "xfs_log_format.h"
 23#include "xfs_trans_resv.h"
 24#include "xfs_sb.h"
 25#include "xfs_ag.h"
 26#include "xfs_mount.h"
 27#include "xfs_inode.h"
 28#include "xfs_trans.h"
 29#include "xfs_inode_item.h"
 30#include "xfs_error.h"
 31#include "xfs_btree.h"
 32#include "xfs_alloc_btree.h"
 33#include "xfs_alloc.h"
 34#include "xfs_ialloc.h"
 35#include "xfs_fsops.h"
 36#include "xfs_itable.h"
 37#include "xfs_trans_space.h"
 38#include "xfs_rtalloc.h"
 39#include "xfs_trace.h"
 40#include "xfs_log.h"
 41#include "xfs_dinode.h"
 42#include "xfs_filestream.h"
 43
 44/*
 45 * File system operations
 
 
 
 
 
 
 
 
 
 46 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48int
 49xfs_fs_geometry(
 50	xfs_mount_t		*mp,
 51	xfs_fsop_geom_t		*geo,
 52	int			new_version)
 53{
 54
 55	memset(geo, 0, sizeof(*geo));
 56
 57	geo->blocksize = mp->m_sb.sb_blocksize;
 58	geo->rtextsize = mp->m_sb.sb_rextsize;
 59	geo->agblocks = mp->m_sb.sb_agblocks;
 60	geo->agcount = mp->m_sb.sb_agcount;
 61	geo->logblocks = mp->m_sb.sb_logblocks;
 62	geo->sectsize = mp->m_sb.sb_sectsize;
 63	geo->inodesize = mp->m_sb.sb_inodesize;
 64	geo->imaxpct = mp->m_sb.sb_imax_pct;
 65	geo->datablocks = mp->m_sb.sb_dblocks;
 66	geo->rtblocks = mp->m_sb.sb_rblocks;
 67	geo->rtextents = mp->m_sb.sb_rextents;
 68	geo->logstart = mp->m_sb.sb_logstart;
 69	ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
 70	memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
 71	if (new_version >= 2) {
 72		geo->sunit = mp->m_sb.sb_unit;
 73		geo->swidth = mp->m_sb.sb_width;
 74	}
 75	if (new_version >= 3) {
 76		geo->version = XFS_FSOP_GEOM_VERSION;
 77		geo->flags =
 78			(xfs_sb_version_hasattr(&mp->m_sb) ?
 79				XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
 80			(xfs_sb_version_hasnlink(&mp->m_sb) ?
 81				XFS_FSOP_GEOM_FLAGS_NLINK : 0) |
 82			(xfs_sb_version_hasquota(&mp->m_sb) ?
 83				XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
 84			(xfs_sb_version_hasalign(&mp->m_sb) ?
 85				XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
 86			(xfs_sb_version_hasdalign(&mp->m_sb) ?
 87				XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
 88			(xfs_sb_version_hasshared(&mp->m_sb) ?
 89				XFS_FSOP_GEOM_FLAGS_SHARED : 0) |
 90			(xfs_sb_version_hasextflgbit(&mp->m_sb) ?
 91				XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
 92			(xfs_sb_version_hasdirv2(&mp->m_sb) ?
 93				XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) |
 94			(xfs_sb_version_hassector(&mp->m_sb) ?
 95				XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
 96			(xfs_sb_version_hasasciici(&mp->m_sb) ?
 97				XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) |
 98			(xfs_sb_version_haslazysbcount(&mp->m_sb) ?
 99				XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
100			(xfs_sb_version_hasattr2(&mp->m_sb) ?
101				XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
102			(xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
103				XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
104			(xfs_sb_version_hascrc(&mp->m_sb) ?
105				XFS_FSOP_GEOM_FLAGS_V5SB : 0) |
106			(xfs_sb_version_hasftype(&mp->m_sb) ?
107				XFS_FSOP_GEOM_FLAGS_FTYPE : 0);
108		geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
109				mp->m_sb.sb_logsectsize : BBSIZE;
110		geo->rtsectsize = mp->m_sb.sb_blocksize;
111		geo->dirblocksize = mp->m_dirblksize;
112	}
113	if (new_version >= 4) {
114		geo->flags |=
115			(xfs_sb_version_haslogv2(&mp->m_sb) ?
116				XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
117		geo->logsunit = mp->m_sb.sb_logsunit;
118	}
119	return 0;
120}
121
122static struct xfs_buf *
123xfs_growfs_get_hdr_buf(
124	struct xfs_mount	*mp,
125	xfs_daddr_t		blkno,
126	size_t			numblks,
127	int			flags,
128	const struct xfs_buf_ops *ops)
129{
130	struct xfs_buf		*bp;
131
132	bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
133	if (!bp)
134		return NULL;
135
136	xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
137	bp->b_bn = blkno;
138	bp->b_maps[0].bm_bn = blkno;
139	bp->b_ops = ops;
140
141	return bp;
 
 
 
 
142}
143
 
 
 
144static int
145xfs_growfs_data_private(
146	xfs_mount_t		*mp,		/* mount point for filesystem */
147	xfs_growfs_data_t	*in)		/* growfs data input struct */
148{
149	xfs_agf_t		*agf;
150	struct xfs_agfl		*agfl;
151	xfs_agi_t		*agi;
152	xfs_agnumber_t		agno;
153	xfs_extlen_t		agsize;
154	xfs_extlen_t		tmpsize;
155	xfs_alloc_rec_t		*arec;
156	xfs_buf_t		*bp;
157	int			bucket;
158	int			dpct;
159	int			error, saved_error = 0;
160	xfs_agnumber_t		nagcount;
161	xfs_agnumber_t		nagimax = 0;
162	xfs_rfsblock_t		nb, nb_mod;
163	xfs_rfsblock_t		new;
164	xfs_rfsblock_t		nfree;
165	xfs_agnumber_t		oagcount;
166	int			pct;
167	xfs_trans_t		*tp;
168
169	nb = in->newblocks;
170	pct = in->imaxpct;
171	if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
172		return XFS_ERROR(EINVAL);
173	if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
174		return error;
175	dpct = pct - mp->m_sb.sb_imax_pct;
176	bp = xfs_buf_read_uncached(mp->m_ddev_targp,
 
177				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
178				XFS_FSS_TO_BB(mp, 1), 0, NULL);
179	if (!bp)
180		return EIO;
181	if (bp->b_error) {
182		error = bp->b_error;
183		xfs_buf_relse(bp);
184		return error;
185	}
186	xfs_buf_relse(bp);
187
188	new = nb;	/* use new as a temporary here */
189	nb_mod = do_div(new, mp->m_sb.sb_agblocks);
190	nagcount = new + (nb_mod != 0);
191	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
192		nagcount--;
193		nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
194		if (nb < mp->m_sb.sb_dblocks)
195			return XFS_ERROR(EINVAL);
196	}
197	new = nb - mp->m_sb.sb_dblocks;
 
 
 
 
 
 
 
 
198	oagcount = mp->m_sb.sb_agcount;
199
200	/* allocate the new per-ag structures */
201	if (nagcount > oagcount) {
202		error = xfs_initialize_perag(mp, nagcount, &nagimax);
203		if (error)
204			return error;
 
 
 
205	}
206
207	tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
208	tp->t_flags |= XFS_TRANS_RESERVE;
209	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
210				  XFS_GROWFS_SPACE_RES(mp), 0);
211	if (error) {
212		xfs_trans_cancel(tp, 0);
213		return error;
214	}
215
216	/*
217	 * Write new AG headers to disk. Non-transactional, but written
218	 * synchronously so they are completed prior to the growfs transaction
219	 * being logged.
220	 */
221	nfree = 0;
222	for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
223		__be32	*agfl_bno;
224
225		/*
226		 * AG freespace header block
227		 */
228		bp = xfs_growfs_get_hdr_buf(mp,
229				XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
230				XFS_FSS_TO_BB(mp, 1), 0,
231				&xfs_agf_buf_ops);
232		if (!bp) {
233			error = ENOMEM;
234			goto error0;
235		}
236
237		agf = XFS_BUF_TO_AGF(bp);
238		agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
239		agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
240		agf->agf_seqno = cpu_to_be32(agno);
241		if (agno == nagcount - 1)
242			agsize =
243				nb -
244				(agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
245		else
246			agsize = mp->m_sb.sb_agblocks;
247		agf->agf_length = cpu_to_be32(agsize);
248		agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
249		agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
250		agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
251		agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
252		agf->agf_flfirst = 0;
253		agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
254		agf->agf_flcount = 0;
255		tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
256		agf->agf_freeblks = cpu_to_be32(tmpsize);
257		agf->agf_longest = cpu_to_be32(tmpsize);
258		if (xfs_sb_version_hascrc(&mp->m_sb))
259			uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_uuid);
260
261		error = xfs_bwrite(bp);
262		xfs_buf_relse(bp);
263		if (error)
264			goto error0;
265
266		/*
267		 * AG freelist header block
268		 */
269		bp = xfs_growfs_get_hdr_buf(mp,
270				XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
271				XFS_FSS_TO_BB(mp, 1), 0,
272				&xfs_agfl_buf_ops);
273		if (!bp) {
274			error = ENOMEM;
275			goto error0;
276		}
277
278		agfl = XFS_BUF_TO_AGFL(bp);
279		if (xfs_sb_version_hascrc(&mp->m_sb)) {
280			agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
281			agfl->agfl_seqno = cpu_to_be32(agno);
282			uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
283		}
284
285		agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
286		for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
287			agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
288
289		error = xfs_bwrite(bp);
290		xfs_buf_relse(bp);
291		if (error)
292			goto error0;
293
294		/*
295		 * AG inode header block
296		 */
297		bp = xfs_growfs_get_hdr_buf(mp,
298				XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
299				XFS_FSS_TO_BB(mp, 1), 0,
300				&xfs_agi_buf_ops);
301		if (!bp) {
302			error = ENOMEM;
303			goto error0;
304		}
305
306		agi = XFS_BUF_TO_AGI(bp);
307		agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
308		agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
309		agi->agi_seqno = cpu_to_be32(agno);
310		agi->agi_length = cpu_to_be32(agsize);
311		agi->agi_count = 0;
312		agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
313		agi->agi_level = cpu_to_be32(1);
314		agi->agi_freecount = 0;
315		agi->agi_newino = cpu_to_be32(NULLAGINO);
316		agi->agi_dirino = cpu_to_be32(NULLAGINO);
317		if (xfs_sb_version_hascrc(&mp->m_sb))
318			uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_uuid);
319		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
320			agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
321
322		error = xfs_bwrite(bp);
323		xfs_buf_relse(bp);
324		if (error)
325			goto error0;
326
327		/*
328		 * BNO btree root block
329		 */
330		bp = xfs_growfs_get_hdr_buf(mp,
331				XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
332				BTOBB(mp->m_sb.sb_blocksize), 0,
333				&xfs_allocbt_buf_ops);
334
335		if (!bp) {
336			error = ENOMEM;
337			goto error0;
338		}
339
340		if (xfs_sb_version_hascrc(&mp->m_sb))
341			xfs_btree_init_block(mp, bp, XFS_ABTB_CRC_MAGIC, 0, 1,
342						agno, XFS_BTREE_CRC_BLOCKS);
343		else
344			xfs_btree_init_block(mp, bp, XFS_ABTB_MAGIC, 0, 1,
345						agno, 0);
346
347		arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
348		arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
349		arec->ar_blockcount = cpu_to_be32(
350			agsize - be32_to_cpu(arec->ar_startblock));
351
352		error = xfs_bwrite(bp);
353		xfs_buf_relse(bp);
354		if (error)
355			goto error0;
356
357		/*
358		 * CNT btree root block
359		 */
360		bp = xfs_growfs_get_hdr_buf(mp,
361				XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
362				BTOBB(mp->m_sb.sb_blocksize), 0,
363				&xfs_allocbt_buf_ops);
364		if (!bp) {
365			error = ENOMEM;
366			goto error0;
367		}
368
369		if (xfs_sb_version_hascrc(&mp->m_sb))
370			xfs_btree_init_block(mp, bp, XFS_ABTC_CRC_MAGIC, 0, 1,
371						agno, XFS_BTREE_CRC_BLOCKS);
372		else
373			xfs_btree_init_block(mp, bp, XFS_ABTC_MAGIC, 0, 1,
374						agno, 0);
375
376		arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
377		arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
378		arec->ar_blockcount = cpu_to_be32(
379			agsize - be32_to_cpu(arec->ar_startblock));
380		nfree += be32_to_cpu(arec->ar_blockcount);
381
382		error = xfs_bwrite(bp);
383		xfs_buf_relse(bp);
384		if (error)
385			goto error0;
386
387		/*
388		 * INO btree root block
389		 */
390		bp = xfs_growfs_get_hdr_buf(mp,
391				XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
392				BTOBB(mp->m_sb.sb_blocksize), 0,
393				&xfs_inobt_buf_ops);
394		if (!bp) {
395			error = ENOMEM;
396			goto error0;
397		}
398
399		if (xfs_sb_version_hascrc(&mp->m_sb))
400			xfs_btree_init_block(mp, bp, XFS_IBT_CRC_MAGIC, 0, 0,
401						agno, XFS_BTREE_CRC_BLOCKS);
402		else
403			xfs_btree_init_block(mp, bp, XFS_IBT_MAGIC, 0, 0,
404						agno, 0);
405
406		error = xfs_bwrite(bp);
407		xfs_buf_relse(bp);
408		if (error)
409			goto error0;
410	}
411	xfs_trans_agblocks_delta(tp, nfree);
412	/*
413	 * There are new blocks in the old last a.g.
414	 */
415	if (new) {
416		/*
417		 * Change the agi length.
418		 */
419		error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
420		if (error) {
421			goto error0;
422		}
423		ASSERT(bp);
424		agi = XFS_BUF_TO_AGI(bp);
425		be32_add_cpu(&agi->agi_length, new);
426		ASSERT(nagcount == oagcount ||
427		       be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
428		xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
429		/*
430		 * Change agf length.
431		 */
432		error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp);
433		if (error) {
434			goto error0;
435		}
436		ASSERT(bp);
437		agf = XFS_BUF_TO_AGF(bp);
438		be32_add_cpu(&agf->agf_length, new);
439		ASSERT(be32_to_cpu(agf->agf_length) ==
440		       be32_to_cpu(agi->agi_length));
441
442		xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
443		/*
444		 * Free the new space.
445		 */
446		error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
447			be32_to_cpu(agf->agf_length) - new), new);
448		if (error) {
449			goto error0;
450		}
451	}
 
 
452
453	/*
454	 * Update changed superblock fields transactionally. These are not
455	 * seen by the rest of the world until the transaction commit applies
456	 * them atomically to the superblock.
457	 */
458	if (nagcount > oagcount)
459		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
460	if (nb > mp->m_sb.sb_dblocks)
461		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
462				 nb - mp->m_sb.sb_dblocks);
463	if (nfree)
464		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
465	if (dpct)
466		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
467	error = xfs_trans_commit(tp, 0);
 
 
 
 
 
 
 
468	if (error)
469		return error;
470
471	/* New allocation groups fully initialized, so update mount struct */
472	if (nagimax)
473		mp->m_maxagi = nagimax;
474	if (mp->m_sb.sb_imax_pct) {
475		__uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
476		do_div(icount, 100);
477		mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
478	} else
479		mp->m_maxicount = 0;
480	xfs_set_low_space_thresholds(mp);
 
481
482	/* update secondary superblocks. */
483	for (agno = 1; agno < nagcount; agno++) {
484		error = 0;
485		/*
486		 * new secondary superblocks need to be zeroed, not read from
487		 * disk as the contents of the new area we are growing into is
488		 * completely unknown.
489		 */
490		if (agno < oagcount) {
491			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
492				  XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
493				  XFS_FSS_TO_BB(mp, 1), 0, &bp,
494				  &xfs_sb_buf_ops);
495		} else {
496			bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp,
497				  XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
498				  XFS_FSS_TO_BB(mp, 1), 0);
499			if (bp) {
500				bp->b_ops = &xfs_sb_buf_ops;
501				xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
502			} else
503				error = ENOMEM;
504		}
505
 
 
 
 
 
 
506		/*
507		 * If we get an error reading or writing alternate superblocks,
508		 * continue.  xfs_repair chooses the "best" superblock based
509		 * on most matches; if we break early, we'll leave more
510		 * superblocks un-updated than updated, and xfs_repair may
511		 * pick them over the properly-updated primary.
512		 */
513		if (error) {
514			xfs_warn(mp,
515		"error %d reading secondary superblock for ag %d",
516				error, agno);
517			saved_error = error;
518			continue;
519		}
520		xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
521
522		error = xfs_bwrite(bp);
523		xfs_buf_relse(bp);
524		if (error) {
525			xfs_warn(mp,
526		"write error %d updating secondary superblock for ag %d",
527				error, agno);
528			saved_error = error;
529			continue;
530		}
531	}
532	return saved_error ? saved_error : error;
533
534 error0:
535	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
536	return error;
537}
538
539static int
540xfs_growfs_log_private(
541	xfs_mount_t		*mp,	/* mount point for filesystem */
542	xfs_growfs_log_t	*in)	/* growfs log input struct */
543{
544	xfs_extlen_t		nb;
545
546	nb = in->newblocks;
547	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
548		return XFS_ERROR(EINVAL);
549	if (nb == mp->m_sb.sb_logblocks &&
550	    in->isint == (mp->m_sb.sb_logstart != 0))
551		return XFS_ERROR(EINVAL);
552	/*
553	 * Moving the log is hard, need new interfaces to sync
554	 * the log first, hold off all activity while moving it.
555	 * Can have shorter or longer log in the same space,
556	 * or transform internal to external log or vice versa.
557	 */
558	return XFS_ERROR(ENOSYS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559}
560
561/*
562 * protected versions of growfs function acquire and release locks on the mount
563 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
564 * XFS_IOC_FSGROWFSRT
565 */
566
567
568int
569xfs_growfs_data(
570	xfs_mount_t		*mp,
571	xfs_growfs_data_t	*in)
572{
573	int error;
574
575	if (!capable(CAP_SYS_ADMIN))
576		return XFS_ERROR(EPERM);
577	if (!mutex_trylock(&mp->m_growlock))
578		return XFS_ERROR(EWOULDBLOCK);
579	error = xfs_growfs_data_private(mp, in);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580	mutex_unlock(&mp->m_growlock);
581	return error;
582}
583
584int
585xfs_growfs_log(
586	xfs_mount_t		*mp,
587	xfs_growfs_log_t	*in)
588{
589	int error;
590
591	if (!capable(CAP_SYS_ADMIN))
592		return XFS_ERROR(EPERM);
593	if (!mutex_trylock(&mp->m_growlock))
594		return XFS_ERROR(EWOULDBLOCK);
595	error = xfs_growfs_log_private(mp, in);
596	mutex_unlock(&mp->m_growlock);
597	return error;
598}
599
600/*
601 * exported through ioctl XFS_IOC_FSCOUNTS
602 */
603
604int
605xfs_fs_counts(
606	xfs_mount_t		*mp,
607	xfs_fsop_counts_t	*cnt)
608{
609	xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
 
 
 
 
610	spin_lock(&mp->m_sb_lock);
611	cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
612	cnt->freertx = mp->m_sb.sb_frextents;
613	cnt->freeino = mp->m_sb.sb_ifree;
614	cnt->allocino = mp->m_sb.sb_icount;
615	spin_unlock(&mp->m_sb_lock);
616	return 0;
617}
618
619/*
620 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
621 *
622 * xfs_reserve_blocks is called to set m_resblks
623 * in the in-core mount table. The number of unused reserved blocks
624 * is kept in m_resblks_avail.
625 *
626 * Reserve the requested number of blocks if available. Otherwise return
627 * as many as possible to satisfy the request. The actual number
628 * reserved are returned in outval
629 *
630 * A null inval pointer indicates that only the current reserved blocks
631 * available  should  be returned no settings are changed.
632 */
633
634int
635xfs_reserve_blocks(
636	xfs_mount_t             *mp,
637	__uint64_t              *inval,
638	xfs_fsop_resblks_t      *outval)
639{
640	__int64_t		lcounter, delta, fdblks_delta;
641	__uint64_t		request;
 
 
 
642
643	/* If inval is null, report current values and return */
644	if (inval == (__uint64_t *)NULL) {
645		if (!outval)
646			return EINVAL;
647		outval->resblks = mp->m_resblks;
648		outval->resblks_avail = mp->m_resblks_avail;
649		return 0;
650	}
651
652	request = *inval;
653
654	/*
655	 * With per-cpu counters, this becomes an interesting
656	 * problem. we needto work out if we are freeing or allocation
657	 * blocks first, then we can do the modification as necessary.
658	 *
659	 * We do this under the m_sb_lock so that if we are near
660	 * ENOSPC, we will hold out any changes while we work out
661	 * what to do. This means that the amount of free space can
662	 * change while we do this, so we need to retry if we end up
663	 * trying to reserve more space than is available.
664	 *
665	 * We also use the xfs_mod_incore_sb() interface so that we
666	 * don't have to care about whether per cpu counter are
667	 * enabled, disabled or even compiled in....
668	 */
669retry:
670	spin_lock(&mp->m_sb_lock);
671	xfs_icsb_sync_counters_locked(mp, 0);
672
673	/*
674	 * If our previous reservation was larger than the current value,
675	 * then move any unused blocks back to the free pool.
 
 
676	 */
677	fdblks_delta = 0;
678	if (mp->m_resblks > request) {
679		lcounter = mp->m_resblks_avail - request;
680		if (lcounter  > 0) {		/* release unused blocks */
681			fdblks_delta = lcounter;
682			mp->m_resblks_avail -= lcounter;
683		}
684		mp->m_resblks = request;
685	} else {
686		__int64_t	free;
 
 
 
687
688		free =  mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
689		if (!free)
690			goto out; /* ENOSPC and fdblks_delta = 0 */
 
 
 
 
 
 
 
 
 
 
 
691
692		delta = request - mp->m_resblks;
693		lcounter = free - delta;
694		if (lcounter < 0) {
695			/* We can't satisfy the request, just get what we can */
696			mp->m_resblks += free;
697			mp->m_resblks_avail += free;
698			fdblks_delta = -free;
699		} else {
700			fdblks_delta = -delta;
701			mp->m_resblks = request;
702			mp->m_resblks_avail += delta;
703		}
704	}
705out:
706	if (outval) {
707		outval->resblks = mp->m_resblks;
708		outval->resblks_avail = mp->m_resblks_avail;
709	}
710	spin_unlock(&mp->m_sb_lock);
711
712	if (fdblks_delta) {
713		/*
714		 * If we are putting blocks back here, m_resblks_avail is
715		 * already at its max so this will put it in the free pool.
716		 *
717		 * If we need space, we'll either succeed in getting it
718		 * from the free block count or we'll get an enospc. If
719		 * we get a ENOSPC, it means things changed while we were
720		 * calculating fdblks_delta and so we should try again to
721		 * see if there is anything left to reserve.
722		 *
723		 * Don't set the reserved flag here - we don't want to reserve
724		 * the extra reserve blocks from the reserve.....
725		 */
726		int error;
727		error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
728						 fdblks_delta, 0);
729		if (error == ENOSPC)
730			goto retry;
731	}
732	return 0;
733}
734
735/*
736 * Dump a transaction into the log that contains no real change. This is needed
737 * to be able to make the log dirty or stamp the current tail LSN into the log
738 * during the covering operation.
739 *
740 * We cannot use an inode here for this - that will push dirty state back up
741 * into the VFS and then periodic inode flushing will prevent log covering from
742 * making progress. Hence we log a field in the superblock instead and use a
743 * synchronous transaction to ensure the superblock is immediately unpinned
744 * and can be written back.
745 */
746int
747xfs_fs_log_dummy(
748	xfs_mount_t	*mp)
749{
750	xfs_trans_t	*tp;
751	int		error;
752
753	tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
754	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
755	if (error) {
756		xfs_trans_cancel(tp, 0);
757		return error;
758	}
759
760	/* log the UUID because it is an unchanging field */
761	xfs_mod_sb(tp, XFS_SB_UUID);
762	xfs_trans_set_sync(tp);
763	return xfs_trans_commit(tp, 0);
764}
765
766int
767xfs_fs_goingdown(
768	xfs_mount_t	*mp,
769	__uint32_t	inflags)
770{
771	switch (inflags) {
772	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
773		struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
774
775		if (sb && !IS_ERR(sb)) {
776			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
777			thaw_bdev(sb->s_bdev, sb);
778		}
779
780		break;
781	}
782	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
783		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
784		break;
785	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
786		xfs_force_shutdown(mp,
787				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
788		break;
789	default:
790		return XFS_ERROR(EINVAL);
791	}
792
793	return 0;
794}
795
796/*
797 * Force a shutdown of the filesystem instantly while keeping the filesystem
798 * consistent. We don't do an unmount here; just shutdown the shop, make sure
799 * that absolutely nothing persistent happens to this filesystem after this
800 * point.
801 */
802void
803xfs_do_force_shutdown(
804	xfs_mount_t	*mp,
805	int		flags,
806	char		*fname,
807	int		lnnum)
808{
809	int		logerror;
810
811	logerror = flags & SHUTDOWN_LOG_IO_ERROR;
812
813	if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
814		xfs_notice(mp,
815	"%s(0x%x) called from line %d of file %s.  Return address = 0x%p",
816			__func__, flags, lnnum, fname, __return_address);
817	}
818	/*
819	 * No need to duplicate efforts.
820	 */
821	if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
822		return;
823
824	/*
825	 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
826	 * queue up anybody new on the log reservations, and wakes up
827	 * everybody who's sleeping on log reservations to tell them
828	 * the bad news.
829	 */
830	if (xfs_log_force_umount(mp, logerror))
831		return;
832
 
 
 
 
 
 
 
833	if (flags & SHUTDOWN_CORRUPT_INCORE) {
834		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
835    "Corruption of in-memory data detected.  Shutting down filesystem");
 
836		if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
837			xfs_stack_trace();
838	} else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
839		if (logerror) {
840			xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
841		"Log I/O Error Detected.  Shutting down filesystem");
842		} else if (flags & SHUTDOWN_DEVICE_REQ) {
843			xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
844		"All device paths lost.  Shutting down filesystem");
845		} else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
846			xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
847		"I/O Error Detected. Shutting down filesystem");
848		}
849	}
850	if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
851		xfs_alert(mp,
852	"Please umount the filesystem and rectify the problem(s)");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
853	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 12#include "xfs_sb.h"
 
 13#include "xfs_mount.h"
 
 14#include "xfs_trans.h"
 
 15#include "xfs_error.h"
 
 
 16#include "xfs_alloc.h"
 
 17#include "xfs_fsops.h"
 
 18#include "xfs_trans_space.h"
 
 
 19#include "xfs_log.h"
 20#include "xfs_ag.h"
 21#include "xfs_ag_resv.h"
 22
 23/*
 24 * Write new AG headers to disk. Non-transactional, but need to be
 25 * written and completed prior to the growfs transaction being logged.
 26 * To do this, we use a delayed write buffer list and wait for
 27 * submission and IO completion of the list as a whole. This allows the
 28 * IO subsystem to merge all the AG headers in a single AG into a single
 29 * IO and hide most of the latency of the IO from us.
 30 *
 31 * This also means that if we get an error whilst building the buffer
 32 * list to write, we can cancel the entire list without having written
 33 * anything.
 34 */
 35static int
 36xfs_resizefs_init_new_ags(
 37	struct xfs_trans	*tp,
 38	struct aghdr_init_data	*id,
 39	xfs_agnumber_t		oagcount,
 40	xfs_agnumber_t		nagcount,
 41	xfs_rfsblock_t		delta,
 42	bool			*lastag_extended)
 43{
 44	struct xfs_mount	*mp = tp->t_mountp;
 45	xfs_rfsblock_t		nb = mp->m_sb.sb_dblocks + delta;
 46	int			error;
 47
 48	*lastag_extended = false;
 49
 50	INIT_LIST_HEAD(&id->buffer_list);
 51	for (id->agno = nagcount - 1;
 52	     id->agno >= oagcount;
 53	     id->agno--, delta -= id->agsize) {
 54
 55		if (id->agno == nagcount - 1)
 56			id->agsize = nb - (id->agno *
 57					(xfs_rfsblock_t)mp->m_sb.sb_agblocks);
 58		else
 59			id->agsize = mp->m_sb.sb_agblocks;
 60
 61		error = xfs_ag_init_headers(mp, id);
 62		if (error) {
 63			xfs_buf_delwri_cancel(&id->buffer_list);
 64			return error;
 65		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 66	}
 
 
 
 
 
 
 
 
 
 
 
 
 67
 68	error = xfs_buf_delwri_submit(&id->buffer_list);
 69	if (error)
 70		return error;
 
 
 
 
 
 71
 72	if (delta) {
 73		*lastag_extended = true;
 74		error = xfs_ag_extend_space(mp, tp, id, delta);
 75	}
 76	return error;
 77}
 78
 79/*
 80 * growfs operations
 81 */
 82static int
 83xfs_growfs_data_private(
 84	struct xfs_mount	*mp,		/* mount point for filesystem */
 85	struct xfs_growfs_data	*in)		/* growfs data input struct */
 86{
 87	struct xfs_buf		*bp;
 88	int			error;
 
 
 
 
 
 
 
 
 
 89	xfs_agnumber_t		nagcount;
 90	xfs_agnumber_t		nagimax = 0;
 91	xfs_rfsblock_t		nb, nb_div, nb_mod;
 92	int64_t			delta;
 93	bool			lastag_extended;
 94	xfs_agnumber_t		oagcount;
 95	struct xfs_trans	*tp;
 96	struct aghdr_init_data	id = {};
 97
 98	nb = in->newblocks;
 99	error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
100	if (error)
 
 
101		return error;
102
103	if (nb > mp->m_sb.sb_dblocks) {
104		error = xfs_buf_read_uncached(mp->m_ddev_targp,
105				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
106				XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
107		if (error)
108			return error;
 
 
109		xfs_buf_relse(bp);
 
110	}
 
111
112	nb_div = nb;
113	nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
114	nagcount = nb_div + (nb_mod != 0);
115	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
116		nagcount--;
117		nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
 
 
118	}
119	delta = nb - mp->m_sb.sb_dblocks;
120	/*
121	 * Reject filesystems with a single AG because they are not
122	 * supported, and reject a shrink operation that would cause a
123	 * filesystem to become unsupported.
124	 */
125	if (delta < 0 && nagcount < 2)
126		return -EINVAL;
127
128	oagcount = mp->m_sb.sb_agcount;
129
130	/* allocate the new per-ag structures */
131	if (nagcount > oagcount) {
132		error = xfs_initialize_perag(mp, nagcount, &nagimax);
133		if (error)
134			return error;
135	} else if (nagcount < oagcount) {
136		/* TODO: shrinking the entire AGs hasn't yet completed */
137		return -EINVAL;
138	}
139
140	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
141			(delta > 0 ? XFS_GROWFS_SPACE_RES(mp) : -delta), 0,
142			XFS_TRANS_RESERVE, &tp);
143	if (error)
 
 
144		return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
146	if (delta > 0) {
147		error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
148						  delta, &lastag_extended);
149	} else {
150		static struct ratelimit_state shrink_warning = \
151			RATELIMIT_STATE_INIT("shrink_warning", 86400 * HZ, 1);
152		ratelimit_set_flags(&shrink_warning, RATELIMIT_MSG_ON_RELEASE);
153
154		if (__ratelimit(&shrink_warning))
155			xfs_alert(mp,
156	"EXPERIMENTAL online shrink feature in use. Use at your own risk!");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
158		error = xfs_ag_shrink_space(mp, &tp, nagcount - 1, -delta);
 
 
 
 
 
 
 
 
159	}
160	if (error)
161		goto out_trans_cancel;
162
163	/*
164	 * Update changed superblock fields transactionally. These are not
165	 * seen by the rest of the world until the transaction commit applies
166	 * them atomically to the superblock.
167	 */
168	if (nagcount > oagcount)
169		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
170	if (delta)
171		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
172	if (id.nfree)
173		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
174
175	/*
176	 * Sync sb counters now to reflect the updated values. This is
177	 * particularly important for shrink because the write verifier
178	 * will fail if sb_fdblocks is ever larger than sb_dblocks.
179	 */
180	if (xfs_sb_version_haslazysbcount(&mp->m_sb))
181		xfs_log_sb(tp);
182
183	xfs_trans_set_sync(tp);
184	error = xfs_trans_commit(tp);
185	if (error)
186		return error;
187
188	/* New allocation groups fully initialized, so update mount struct */
189	if (nagimax)
190		mp->m_maxagi = nagimax;
 
 
 
 
 
 
191	xfs_set_low_space_thresholds(mp);
192	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
193
194	if (delta > 0) {
 
 
195		/*
196		 * If we expanded the last AG, free the per-AG reservation
197		 * so we can reinitialize it with the new size.
 
198		 */
199		if (lastag_extended) {
200			struct xfs_perag	*pag;
 
 
 
 
 
 
 
 
 
 
 
 
 
201
202			pag = xfs_perag_get(mp, id.agno);
203			error = xfs_ag_resv_free(pag);
204			xfs_perag_put(pag);
205			if (error)
206				return error;
207		}
208		/*
209		 * Reserve AG metadata blocks. ENOSPC here does not mean there
210		 * was a growfs failure, just that there still isn't space for
211		 * new user data after the grow has been run.
 
 
212		 */
213		error = xfs_fs_reserve_ag_blocks(mp);
214		if (error == -ENOSPC)
215			error = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216	}
217	return error;
218
219out_trans_cancel:
220	xfs_trans_cancel(tp);
221	return error;
222}
223
224static int
225xfs_growfs_log_private(
226	struct xfs_mount	*mp,	/* mount point for filesystem */
227	struct xfs_growfs_log	*in)	/* growfs log input struct */
228{
229	xfs_extlen_t		nb;
230
231	nb = in->newblocks;
232	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
233		return -EINVAL;
234	if (nb == mp->m_sb.sb_logblocks &&
235	    in->isint == (mp->m_sb.sb_logstart != 0))
236		return -EINVAL;
237	/*
238	 * Moving the log is hard, need new interfaces to sync
239	 * the log first, hold off all activity while moving it.
240	 * Can have shorter or longer log in the same space,
241	 * or transform internal to external log or vice versa.
242	 */
243	return -ENOSYS;
244}
245
246static int
247xfs_growfs_imaxpct(
248	struct xfs_mount	*mp,
249	__u32			imaxpct)
250{
251	struct xfs_trans	*tp;
252	int			dpct;
253	int			error;
254
255	if (imaxpct > 100)
256		return -EINVAL;
257
258	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
259			XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
260	if (error)
261		return error;
262
263	dpct = imaxpct - mp->m_sb.sb_imax_pct;
264	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
265	xfs_trans_set_sync(tp);
266	return xfs_trans_commit(tp);
267}
268
269/*
270 * protected versions of growfs function acquire and release locks on the mount
271 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
272 * XFS_IOC_FSGROWFSRT
273 */
 
 
274int
275xfs_growfs_data(
276	struct xfs_mount	*mp,
277	struct xfs_growfs_data	*in)
278{
279	int			error = 0;
280
281	if (!capable(CAP_SYS_ADMIN))
282		return -EPERM;
283	if (!mutex_trylock(&mp->m_growlock))
284		return -EWOULDBLOCK;
285
286	/* update imaxpct separately to the physical grow of the filesystem */
287	if (in->imaxpct != mp->m_sb.sb_imax_pct) {
288		error = xfs_growfs_imaxpct(mp, in->imaxpct);
289		if (error)
290			goto out_error;
291	}
292
293	if (in->newblocks != mp->m_sb.sb_dblocks) {
294		error = xfs_growfs_data_private(mp, in);
295		if (error)
296			goto out_error;
297	}
298
299	/* Post growfs calculations needed to reflect new state in operations */
300	if (mp->m_sb.sb_imax_pct) {
301		uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
302		do_div(icount, 100);
303		M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
304	} else
305		M_IGEO(mp)->maxicount = 0;
306
307	/* Update secondary superblocks now the physical grow has completed */
308	error = xfs_update_secondary_sbs(mp);
309
310out_error:
311	/*
312	 * Increment the generation unconditionally, the error could be from
313	 * updating the secondary superblocks, in which case the new size
314	 * is live already.
315	 */
316	mp->m_generation++;
317	mutex_unlock(&mp->m_growlock);
318	return error;
319}
320
321int
322xfs_growfs_log(
323	xfs_mount_t		*mp,
324	struct xfs_growfs_log	*in)
325{
326	int error;
327
328	if (!capable(CAP_SYS_ADMIN))
329		return -EPERM;
330	if (!mutex_trylock(&mp->m_growlock))
331		return -EWOULDBLOCK;
332	error = xfs_growfs_log_private(mp, in);
333	mutex_unlock(&mp->m_growlock);
334	return error;
335}
336
337/*
338 * exported through ioctl XFS_IOC_FSCOUNTS
339 */
340
341void
342xfs_fs_counts(
343	xfs_mount_t		*mp,
344	xfs_fsop_counts_t	*cnt)
345{
346	cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
347	cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
348	cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
349						mp->m_alloc_set_aside;
350
351	spin_lock(&mp->m_sb_lock);
 
352	cnt->freertx = mp->m_sb.sb_frextents;
 
 
353	spin_unlock(&mp->m_sb_lock);
 
354}
355
356/*
357 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
358 *
359 * xfs_reserve_blocks is called to set m_resblks
360 * in the in-core mount table. The number of unused reserved blocks
361 * is kept in m_resblks_avail.
362 *
363 * Reserve the requested number of blocks if available. Otherwise return
364 * as many as possible to satisfy the request. The actual number
365 * reserved are returned in outval
366 *
367 * A null inval pointer indicates that only the current reserved blocks
368 * available  should  be returned no settings are changed.
369 */
370
371int
372xfs_reserve_blocks(
373	xfs_mount_t             *mp,
374	uint64_t              *inval,
375	xfs_fsop_resblks_t      *outval)
376{
377	int64_t			lcounter, delta;
378	int64_t			fdblks_delta = 0;
379	uint64_t		request;
380	int64_t			free;
381	int			error = 0;
382
383	/* If inval is null, report current values and return */
384	if (inval == (uint64_t *)NULL) {
385		if (!outval)
386			return -EINVAL;
387		outval->resblks = mp->m_resblks;
388		outval->resblks_avail = mp->m_resblks_avail;
389		return 0;
390	}
391
392	request = *inval;
393
394	/*
395	 * With per-cpu counters, this becomes an interesting problem. we need
396	 * to work out if we are freeing or allocation blocks first, then we can
397	 * do the modification as necessary.
398	 *
399	 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
400	 * hold out any changes while we work out what to do. This means that
401	 * the amount of free space can change while we do this, so we need to
402	 * retry if we end up trying to reserve more space than is available.
 
 
 
 
 
403	 */
 
404	spin_lock(&mp->m_sb_lock);
 
405
406	/*
407	 * If our previous reservation was larger than the current value,
408	 * then move any unused blocks back to the free pool. Modify the resblks
409	 * counters directly since we shouldn't have any problems unreserving
410	 * space.
411	 */
 
412	if (mp->m_resblks > request) {
413		lcounter = mp->m_resblks_avail - request;
414		if (lcounter  > 0) {		/* release unused blocks */
415			fdblks_delta = lcounter;
416			mp->m_resblks_avail -= lcounter;
417		}
418		mp->m_resblks = request;
419		if (fdblks_delta) {
420			spin_unlock(&mp->m_sb_lock);
421			error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
422			spin_lock(&mp->m_sb_lock);
423		}
424
425		goto out;
426	}
427
428	/*
429	 * If the request is larger than the current reservation, reserve the
430	 * blocks before we update the reserve counters. Sample m_fdblocks and
431	 * perform a partial reservation if the request exceeds free space.
432	 */
433	error = -ENOSPC;
434	do {
435		free = percpu_counter_sum(&mp->m_fdblocks) -
436						mp->m_alloc_set_aside;
437		if (free <= 0)
438			break;
439
440		delta = request - mp->m_resblks;
441		lcounter = free - delta;
442		if (lcounter < 0)
443			/* We can't satisfy the request, just get what we can */
444			fdblks_delta = free;
445		else
446			fdblks_delta = delta;
 
 
 
 
 
 
 
 
 
 
 
 
447
 
448		/*
449		 * We'll either succeed in getting space from the free block
450		 * count or we'll get an ENOSPC. If we get a ENOSPC, it means
451		 * things changed while we were calculating fdblks_delta and so
452		 * we should try again to see if there is anything left to
453		 * reserve.
 
 
 
454		 *
455		 * Don't set the reserved flag here - we don't want to reserve
456		 * the extra reserve blocks from the reserve.....
457		 */
458		spin_unlock(&mp->m_sb_lock);
459		error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
460		spin_lock(&mp->m_sb_lock);
461	} while (error == -ENOSPC);
 
 
 
 
462
463	/*
464	 * Update the reserve counters if blocks have been successfully
465	 * allocated.
466	 */
467	if (!error && fdblks_delta) {
468		mp->m_resblks += fdblks_delta;
469		mp->m_resblks_avail += fdblks_delta;
470	}
 
 
 
 
 
 
 
 
 
471
472out:
473	if (outval) {
474		outval->resblks = mp->m_resblks;
475		outval->resblks_avail = mp->m_resblks_avail;
 
476	}
477
478	spin_unlock(&mp->m_sb_lock);
479	return error;
 
 
480}
481
482int
483xfs_fs_goingdown(
484	xfs_mount_t	*mp,
485	uint32_t	inflags)
486{
487	switch (inflags) {
488	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
489		if (!freeze_bdev(mp->m_super->s_bdev)) {
 
 
490			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
491			thaw_bdev(mp->m_super->s_bdev);
492		}
 
493		break;
494	}
495	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
496		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
497		break;
498	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
499		xfs_force_shutdown(mp,
500				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
501		break;
502	default:
503		return -EINVAL;
504	}
505
506	return 0;
507}
508
509/*
510 * Force a shutdown of the filesystem instantly while keeping the filesystem
511 * consistent. We don't do an unmount here; just shutdown the shop, make sure
512 * that absolutely nothing persistent happens to this filesystem after this
513 * point.
514 */
515void
516xfs_do_force_shutdown(
517	struct xfs_mount *mp,
518	int		flags,
519	char		*fname,
520	int		lnnum)
521{
522	bool		logerror = flags & SHUTDOWN_LOG_IO_ERROR;
 
 
523
 
 
 
 
 
524	/*
525	 * No need to duplicate efforts.
526	 */
527	if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
528		return;
529
530	/*
531	 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
532	 * queue up anybody new on the log reservations, and wakes up
533	 * everybody who's sleeping on log reservations to tell them
534	 * the bad news.
535	 */
536	if (xfs_log_force_umount(mp, logerror))
537		return;
538
539	if (flags & SHUTDOWN_FORCE_UMOUNT) {
540		xfs_alert(mp,
541"User initiated shutdown (0x%x) received. Shutting down filesystem",
542				flags);
543		return;
544	}
545
546	if (flags & SHUTDOWN_CORRUPT_INCORE) {
547		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
548"Corruption of in-memory data (0x%x) detected at %pS (%s:%d).  Shutting down filesystem",
549				flags, __return_address, fname, lnnum);
550		if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
551			xfs_stack_trace();
552	} else if (logerror) {
553		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
554"Log I/O error (0x%x) detected at %pS (%s:%d). Shutting down filesystem",
555				flags, __return_address, fname, lnnum);
556	} else {
557		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
558"I/O error (0x%x) detected at %pS (%s:%d). Shutting down filesystem",
559				flags, __return_address, fname, lnnum);
 
 
 
560	}
561
562	xfs_alert(mp,
563		"Please unmount the filesystem and rectify the problem(s)");
564}
565
566/*
567 * Reserve free space for per-AG metadata.
568 */
569int
570xfs_fs_reserve_ag_blocks(
571	struct xfs_mount	*mp)
572{
573	xfs_agnumber_t		agno;
574	struct xfs_perag	*pag;
575	int			error = 0;
576	int			err2;
577
578	mp->m_finobt_nores = false;
579	for_each_perag(mp, agno, pag) {
580		err2 = xfs_ag_resv_init(pag, NULL);
581		if (err2 && !error)
582			error = err2;
583	}
584
585	if (error && error != -ENOSPC) {
586		xfs_warn(mp,
587	"Error %d reserving per-AG metadata reserve pool.", error);
588		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
589	}
590
591	return error;
592}
593
594/*
595 * Free space reserved for per-AG metadata.
596 */
597int
598xfs_fs_unreserve_ag_blocks(
599	struct xfs_mount	*mp)
600{
601	xfs_agnumber_t		agno;
602	struct xfs_perag	*pag;
603	int			error = 0;
604	int			err2;
605
606	for_each_perag(mp, agno, pag) {
607		err2 = xfs_ag_resv_free(pag);
608		if (err2 && !error)
609			error = err2;
610	}
611
612	if (error)
613		xfs_warn(mp,
614	"Error %d freeing per-AG metadata reserve pool.", error);
615
616	return error;
617}