Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 12#include "xfs_sb.h"
 
 13#include "xfs_mount.h"
 
 14#include "xfs_trans.h"
 
 15#include "xfs_error.h"
 
 
 16#include "xfs_alloc.h"
 
 17#include "xfs_fsops.h"
 
 18#include "xfs_trans_space.h"
 19#include "xfs_log.h"
 20#include "xfs_log_priv.h"
 21#include "xfs_ag.h"
 22#include "xfs_ag_resv.h"
 23#include "xfs_trace.h"
 
 
 
 24
 25/*
 26 * Write new AG headers to disk. Non-transactional, but need to be
 27 * written and completed prior to the growfs transaction being logged.
 28 * To do this, we use a delayed write buffer list and wait for
 29 * submission and IO completion of the list as a whole. This allows the
 30 * IO subsystem to merge all the AG headers in a single AG into a single
 31 * IO and hide most of the latency of the IO from us.
 32 *
 33 * This also means that if we get an error whilst building the buffer
 34 * list to write, we can cancel the entire list without having written
 35 * anything.
 36 */
 37static int
 38xfs_resizefs_init_new_ags(
 39	struct xfs_trans	*tp,
 40	struct aghdr_init_data	*id,
 41	xfs_agnumber_t		oagcount,
 42	xfs_agnumber_t		nagcount,
 43	xfs_rfsblock_t		delta,
 44	struct xfs_perag	*last_pag,
 45	bool			*lastag_extended)
 46{
 47	struct xfs_mount	*mp = tp->t_mountp;
 48	xfs_rfsblock_t		nb = mp->m_sb.sb_dblocks + delta;
 49	int			error;
 50
 51	*lastag_extended = false;
 52
 53	INIT_LIST_HEAD(&id->buffer_list);
 54	for (id->agno = nagcount - 1;
 55	     id->agno >= oagcount;
 56	     id->agno--, delta -= id->agsize) {
 57
 58		if (id->agno == nagcount - 1)
 59			id->agsize = nb - (id->agno *
 60					(xfs_rfsblock_t)mp->m_sb.sb_agblocks);
 61		else
 62			id->agsize = mp->m_sb.sb_agblocks;
 63
 64		error = xfs_ag_init_headers(mp, id);
 65		if (error) {
 66			xfs_buf_delwri_cancel(&id->buffer_list);
 67			return error;
 68		}
 69	}
 70
 71	error = xfs_buf_delwri_submit(&id->buffer_list);
 72	if (error)
 73		return error;
 74
 75	if (delta) {
 76		*lastag_extended = true;
 77		error = xfs_ag_extend_space(last_pag, tp, delta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 78	}
 79	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80}
 81
 82/*
 83 * growfs operations
 84 */
 85static int
 86xfs_growfs_data_private(
 87	struct xfs_mount	*mp,		/* mount point for filesystem */
 88	struct xfs_growfs_data	*in)		/* growfs data input struct */
 89{
 90	struct xfs_buf		*bp;
 91	int			error;
 
 
 
 
 
 
 
 
 
 92	xfs_agnumber_t		nagcount;
 93	xfs_agnumber_t		nagimax = 0;
 94	xfs_rfsblock_t		nb, nb_div, nb_mod;
 95	int64_t			delta;
 96	bool			lastag_extended = false;
 97	xfs_agnumber_t		oagcount;
 98	struct xfs_trans	*tp;
 99	struct aghdr_init_data	id = {};
100	struct xfs_perag	*last_pag;
101
102	nb = in->newblocks;
103	error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
104	if (error)
 
 
105		return error;
106
107	if (nb > mp->m_sb.sb_dblocks) {
108		error = xfs_buf_read_uncached(mp->m_ddev_targp,
109				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
110				XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
111		if (error)
112			return error;
 
 
113		xfs_buf_relse(bp);
 
114	}
 
115
116	nb_div = nb;
117	nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
118	if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS)
119		nb_div++;
120	else if (nb_mod)
121		nb = nb_div * mp->m_sb.sb_agblocks;
122
123	if (nb_div > XFS_MAX_AGNUMBER + 1) {
124		nb_div = XFS_MAX_AGNUMBER + 1;
125		nb = nb_div * mp->m_sb.sb_agblocks;
126	}
127	nagcount = nb_div;
128	delta = nb - mp->m_sb.sb_dblocks;
129	/*
130	 * Reject filesystems with a single AG because they are not
131	 * supported, and reject a shrink operation that would cause a
132	 * filesystem to become unsupported.
133	 */
134	if (delta < 0 && nagcount < 2)
135		return -EINVAL;
136
137	/* No work to do */
138	if (delta == 0)
139		return 0;
140
141	oagcount = mp->m_sb.sb_agcount;
 
142	/* allocate the new per-ag structures */
143	if (nagcount > oagcount) {
144		error = xfs_initialize_perag(mp, nagcount, nb, &nagimax);
145		if (error)
146			return error;
147	} else if (nagcount < oagcount) {
148		/* TODO: shrinking the entire AGs hasn't yet completed */
149		return -EINVAL;
150	}
151
152	if (delta > 0)
153		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
154				XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
155				&tp);
156	else
157		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, -delta, 0,
158				0, &tp);
159	if (error)
160		goto out_free_unused_perag;
161
162	last_pag = xfs_perag_get(mp, oagcount - 1);
163	if (delta > 0) {
164		error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
165				delta, last_pag, &lastag_extended);
166	} else {
167		xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK,
168	"EXPERIMENTAL online shrink feature in use. Use at your own risk!");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
170		error = xfs_ag_shrink_space(last_pag, &tp, -delta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171	}
172	xfs_perag_put(last_pag);
173	if (error)
174		goto out_trans_cancel;
175
176	/*
177	 * Update changed superblock fields transactionally. These are not
178	 * seen by the rest of the world until the transaction commit applies
179	 * them atomically to the superblock.
180	 */
181	if (nagcount > oagcount)
182		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
183	if (delta)
184		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
185	if (id.nfree)
186		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
187
188	/*
189	 * Sync sb counters now to reflect the updated values. This is
190	 * particularly important for shrink because the write verifier
191	 * will fail if sb_fdblocks is ever larger than sb_dblocks.
192	 */
193	if (xfs_has_lazysbcount(mp))
194		xfs_log_sb(tp);
195
196	xfs_trans_set_sync(tp);
197	error = xfs_trans_commit(tp);
198	if (error)
199		return error;
200
201	/* New allocation groups fully initialized, so update mount struct */
202	if (nagimax)
203		mp->m_maxagi = nagimax;
 
 
 
 
 
 
204	xfs_set_low_space_thresholds(mp);
205	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
206
207	if (delta > 0) {
 
 
208		/*
209		 * If we expanded the last AG, free the per-AG reservation
210		 * so we can reinitialize it with the new size.
 
211		 */
212		if (lastag_extended) {
213			struct xfs_perag	*pag;
214
215			pag = xfs_perag_get(mp, id.agno);
216			error = xfs_ag_resv_free(pag);
217			xfs_perag_put(pag);
218			if (error)
219				return error;
 
 
 
 
 
 
220		}
 
221		/*
222		 * Reserve AG metadata blocks. ENOSPC here does not mean there
223		 * was a growfs failure, just that there still isn't space for
224		 * new user data after the grow has been run.
 
 
225		 */
226		error = xfs_fs_reserve_ag_blocks(mp);
227		if (error == -ENOSPC)
228			error = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229	}
230	return error;
231
232out_trans_cancel:
233	xfs_trans_cancel(tp);
234out_free_unused_perag:
235	if (nagcount > oagcount)
236		xfs_free_unused_perag_range(mp, oagcount, nagcount);
237	return error;
238}
239
240static int
241xfs_growfs_log_private(
242	struct xfs_mount	*mp,	/* mount point for filesystem */
243	struct xfs_growfs_log	*in)	/* growfs log input struct */
244{
245	xfs_extlen_t		nb;
246
247	nb = in->newblocks;
248	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
249		return -EINVAL;
250	if (nb == mp->m_sb.sb_logblocks &&
251	    in->isint == (mp->m_sb.sb_logstart != 0))
252		return -EINVAL;
253	/*
254	 * Moving the log is hard, need new interfaces to sync
255	 * the log first, hold off all activity while moving it.
256	 * Can have shorter or longer log in the same space,
257	 * or transform internal to external log or vice versa.
258	 */
259	return -ENOSYS;
260}
261
262static int
263xfs_growfs_imaxpct(
264	struct xfs_mount	*mp,
265	__u32			imaxpct)
266{
267	struct xfs_trans	*tp;
268	int			dpct;
269	int			error;
270
271	if (imaxpct > 100)
272		return -EINVAL;
273
274	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
275			XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
276	if (error)
277		return error;
278
279	dpct = imaxpct - mp->m_sb.sb_imax_pct;
280	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
281	xfs_trans_set_sync(tp);
282	return xfs_trans_commit(tp);
283}
284
285/*
286 * protected versions of growfs function acquire and release locks on the mount
287 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
288 * XFS_IOC_FSGROWFSRT
289 */
 
 
290int
291xfs_growfs_data(
292	struct xfs_mount	*mp,
293	struct xfs_growfs_data	*in)
294{
295	int			error = 0;
296
297	if (!capable(CAP_SYS_ADMIN))
298		return -EPERM;
299	if (!mutex_trylock(&mp->m_growlock))
300		return -EWOULDBLOCK;
301
302	/* update imaxpct separately to the physical grow of the filesystem */
303	if (in->imaxpct != mp->m_sb.sb_imax_pct) {
304		error = xfs_growfs_imaxpct(mp, in->imaxpct);
305		if (error)
306			goto out_error;
307	}
308
309	if (in->newblocks != mp->m_sb.sb_dblocks) {
310		error = xfs_growfs_data_private(mp, in);
311		if (error)
312			goto out_error;
313	}
314
315	/* Post growfs calculations needed to reflect new state in operations */
316	if (mp->m_sb.sb_imax_pct) {
317		uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
318		do_div(icount, 100);
319		M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
320	} else
321		M_IGEO(mp)->maxicount = 0;
322
323	/* Update secondary superblocks now the physical grow has completed */
324	error = xfs_update_secondary_sbs(mp);
325
326out_error:
327	/*
328	 * Increment the generation unconditionally, the error could be from
329	 * updating the secondary superblocks, in which case the new size
330	 * is live already.
331	 */
332	mp->m_generation++;
333	mutex_unlock(&mp->m_growlock);
334	return error;
335}
336
337int
338xfs_growfs_log(
339	xfs_mount_t		*mp,
340	struct xfs_growfs_log	*in)
341{
342	int error;
343
344	if (!capable(CAP_SYS_ADMIN))
345		return -EPERM;
346	if (!mutex_trylock(&mp->m_growlock))
347		return -EWOULDBLOCK;
348	error = xfs_growfs_log_private(mp, in);
349	mutex_unlock(&mp->m_growlock);
350	return error;
351}
352
353/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354 * Reserve the requested number of blocks if available. Otherwise return
355 * as many as possible to satisfy the request. The actual number
356 * reserved are returned in outval.
 
 
 
357 */
 
358int
359xfs_reserve_blocks(
360	struct xfs_mount	*mp,
361	uint64_t		request)
362{
363	int64_t			lcounter, delta;
364	int64_t			fdblks_delta = 0;
365	int64_t			free;
366	int			error = 0;
 
 
 
 
 
 
 
 
 
 
367
368	/*
369	 * With per-cpu counters, this becomes an interesting problem. we need
370	 * to work out if we are freeing or allocation blocks first, then we can
371	 * do the modification as necessary.
 
 
 
 
 
 
372	 *
373	 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
374	 * hold out any changes while we work out what to do. This means that
375	 * the amount of free space can change while we do this, so we need to
376	 * retry if we end up trying to reserve more space than is available.
377	 */
 
378	spin_lock(&mp->m_sb_lock);
 
379
380	/*
381	 * If our previous reservation was larger than the current value,
382	 * then move any unused blocks back to the free pool. Modify the resblks
383	 * counters directly since we shouldn't have any problems unreserving
384	 * space.
385	 */
 
386	if (mp->m_resblks > request) {
387		lcounter = mp->m_resblks_avail - request;
388		if (lcounter  > 0) {		/* release unused blocks */
389			fdblks_delta = lcounter;
390			mp->m_resblks_avail -= lcounter;
391		}
392		mp->m_resblks = request;
393		if (fdblks_delta) {
394			spin_unlock(&mp->m_sb_lock);
395			error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
396			spin_lock(&mp->m_sb_lock);
397		}
398
399		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400	}
 
401
402	/*
403	 * If the request is larger than the current reservation, reserve the
404	 * blocks before we update the reserve counters. Sample m_fdblocks and
405	 * perform a partial reservation if the request exceeds free space.
406	 *
407	 * The code below estimates how many blocks it can request from
408	 * fdblocks to stash in the reserve pool.  This is a classic TOCTOU
409	 * race since fdblocks updates are not always coordinated via
410	 * m_sb_lock.  Set the reserve size even if there's not enough free
411	 * space to fill it because mod_fdblocks will refill an undersized
412	 * reserve when it can.
413	 */
414	free = percpu_counter_sum(&mp->m_fdblocks) -
415						xfs_fdblocks_unavailable(mp);
416	delta = request - mp->m_resblks;
417	mp->m_resblks = request;
418	if (delta > 0 && free > 0) {
419		/*
420		 * We'll either succeed in getting space from the free block
421		 * count or we'll get an ENOSPC.  Don't set the reserved flag
422		 * here - we don't want to reserve the extra reserve blocks
423		 * from the reserve.
424		 *
425		 * The desired reserve size can change after we drop the lock.
426		 * Use mod_fdblocks to put the space into the reserve or into
427		 * fdblocks as appropriate.
428		 */
429		fdblks_delta = min(free, delta);
430		spin_unlock(&mp->m_sb_lock);
431		error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
432		if (!error)
433			xfs_mod_fdblocks(mp, fdblks_delta, 0);
434		spin_lock(&mp->m_sb_lock);
 
 
 
 
435	}
436out:
437	spin_unlock(&mp->m_sb_lock);
438	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439}
440
441int
442xfs_fs_goingdown(
443	xfs_mount_t	*mp,
444	uint32_t	inflags)
445{
446	switch (inflags) {
447	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
448		if (!bdev_freeze(mp->m_super->s_bdev)) {
 
 
449			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
450			bdev_thaw(mp->m_super->s_bdev);
451		}
 
452		break;
453	}
454	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
455		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
456		break;
457	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
458		xfs_force_shutdown(mp,
459				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
460		break;
461	default:
462		return -EINVAL;
463	}
464
465	return 0;
466}
467
468/*
469 * Force a shutdown of the filesystem instantly while keeping the filesystem
470 * consistent. We don't do an unmount here; just shutdown the shop, make sure
471 * that absolutely nothing persistent happens to this filesystem after this
472 * point.
473 *
474 * The shutdown state change is atomic, resulting in the first and only the
475 * first shutdown call processing the shutdown. This means we only shutdown the
476 * log once as it requires, and we don't spam the logs when multiple concurrent
477 * shutdowns race to set the shutdown flags.
478 */
479void
480xfs_do_force_shutdown(
481	struct xfs_mount *mp,
482	uint32_t	flags,
483	char		*fname,
484	int		lnnum)
485{
486	int		tag;
487	const char	*why;
488
489
490	if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) {
491		xlog_shutdown_wait(mp->m_log);
492		return;
493	}
494	if (mp->m_sb_bp)
495		mp->m_sb_bp->b_flags |= XBF_DONE;
496
497	if (flags & SHUTDOWN_FORCE_UMOUNT)
498		xfs_alert(mp, "User initiated shutdown received.");
499
500	if (xlog_force_shutdown(mp->m_log, flags)) {
501		tag = XFS_PTAG_SHUTDOWN_LOGERROR;
502		why = "Log I/O Error";
503	} else if (flags & SHUTDOWN_CORRUPT_INCORE) {
504		tag = XFS_PTAG_SHUTDOWN_CORRUPT;
505		why = "Corruption of in-memory data";
506	} else if (flags & SHUTDOWN_CORRUPT_ONDISK) {
507		tag = XFS_PTAG_SHUTDOWN_CORRUPT;
508		why = "Corruption of on-disk metadata";
509	} else if (flags & SHUTDOWN_DEVICE_REMOVED) {
510		tag = XFS_PTAG_SHUTDOWN_IOERROR;
511		why = "Block device removal";
512	} else {
513		tag = XFS_PTAG_SHUTDOWN_IOERROR;
514		why = "Metadata I/O Error";
515	}
 
 
 
 
 
516
517	trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum);
518
519	xfs_alert_tag(mp, tag,
520"%s (0x%x) detected at %pS (%s:%d).  Shutting down filesystem.",
521			why, flags, __return_address, fname, lnnum);
522	xfs_alert(mp,
523		"Please unmount the filesystem and rectify the problem(s)");
524	if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
525		xfs_stack_trace();
526}
527
528/*
529 * Reserve free space for per-AG metadata.
530 */
531int
532xfs_fs_reserve_ag_blocks(
533	struct xfs_mount	*mp)
534{
535	xfs_agnumber_t		agno;
536	struct xfs_perag	*pag;
537	int			error = 0;
538	int			err2;
539
540	mp->m_finobt_nores = false;
541	for_each_perag(mp, agno, pag) {
542		err2 = xfs_ag_resv_init(pag, NULL);
543		if (err2 && !error)
544			error = err2;
545	}
546
547	if (error && error != -ENOSPC) {
548		xfs_warn(mp,
549	"Error %d reserving per-AG metadata reserve pool.", error);
550		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 
 
 
 
 
 
 
 
 
 
 
 
551	}
552
553	return error;
554}
555
556/*
557 * Free space reserved for per-AG metadata.
558 */
559int
560xfs_fs_unreserve_ag_blocks(
561	struct xfs_mount	*mp)
562{
563	xfs_agnumber_t		agno;
564	struct xfs_perag	*pag;
565	int			error = 0;
566	int			err2;
567
568	for_each_perag(mp, agno, pag) {
569		err2 = xfs_ag_resv_free(pag);
570		if (err2 && !error)
571			error = err2;
572	}
573
574	if (error)
575		xfs_warn(mp,
576	"Error %d freeing per-AG metadata reserve pool.", error);
577
578	return error;
579}
v3.15
 
  1/*
  2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_shared.h"
 21#include "xfs_format.h"
 22#include "xfs_log_format.h"
 23#include "xfs_trans_resv.h"
 24#include "xfs_sb.h"
 25#include "xfs_ag.h"
 26#include "xfs_mount.h"
 27#include "xfs_inode.h"
 28#include "xfs_trans.h"
 29#include "xfs_inode_item.h"
 30#include "xfs_error.h"
 31#include "xfs_btree.h"
 32#include "xfs_alloc_btree.h"
 33#include "xfs_alloc.h"
 34#include "xfs_ialloc.h"
 35#include "xfs_fsops.h"
 36#include "xfs_itable.h"
 37#include "xfs_trans_space.h"
 38#include "xfs_rtalloc.h"
 
 
 
 39#include "xfs_trace.h"
 40#include "xfs_log.h"
 41#include "xfs_dinode.h"
 42#include "xfs_filestream.h"
 43
 44/*
 45 * File system operations
 
 
 
 
 
 
 
 
 
 46 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48int
 49xfs_fs_geometry(
 50	xfs_mount_t		*mp,
 51	xfs_fsop_geom_t		*geo,
 52	int			new_version)
 53{
 54
 55	memset(geo, 0, sizeof(*geo));
 
 
 56
 57	geo->blocksize = mp->m_sb.sb_blocksize;
 58	geo->rtextsize = mp->m_sb.sb_rextsize;
 59	geo->agblocks = mp->m_sb.sb_agblocks;
 60	geo->agcount = mp->m_sb.sb_agcount;
 61	geo->logblocks = mp->m_sb.sb_logblocks;
 62	geo->sectsize = mp->m_sb.sb_sectsize;
 63	geo->inodesize = mp->m_sb.sb_inodesize;
 64	geo->imaxpct = mp->m_sb.sb_imax_pct;
 65	geo->datablocks = mp->m_sb.sb_dblocks;
 66	geo->rtblocks = mp->m_sb.sb_rblocks;
 67	geo->rtextents = mp->m_sb.sb_rextents;
 68	geo->logstart = mp->m_sb.sb_logstart;
 69	ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
 70	memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
 71	if (new_version >= 2) {
 72		geo->sunit = mp->m_sb.sb_unit;
 73		geo->swidth = mp->m_sb.sb_width;
 74	}
 75	if (new_version >= 3) {
 76		geo->version = XFS_FSOP_GEOM_VERSION;
 77		geo->flags =
 78			(xfs_sb_version_hasattr(&mp->m_sb) ?
 79				XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
 80			(xfs_sb_version_hasnlink(&mp->m_sb) ?
 81				XFS_FSOP_GEOM_FLAGS_NLINK : 0) |
 82			(xfs_sb_version_hasquota(&mp->m_sb) ?
 83				XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
 84			(xfs_sb_version_hasalign(&mp->m_sb) ?
 85				XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
 86			(xfs_sb_version_hasdalign(&mp->m_sb) ?
 87				XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
 88			(xfs_sb_version_hasshared(&mp->m_sb) ?
 89				XFS_FSOP_GEOM_FLAGS_SHARED : 0) |
 90			(xfs_sb_version_hasextflgbit(&mp->m_sb) ?
 91				XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
 92			(xfs_sb_version_hasdirv2(&mp->m_sb) ?
 93				XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) |
 94			(xfs_sb_version_hassector(&mp->m_sb) ?
 95				XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
 96			(xfs_sb_version_hasasciici(&mp->m_sb) ?
 97				XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) |
 98			(xfs_sb_version_haslazysbcount(&mp->m_sb) ?
 99				XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
100			(xfs_sb_version_hasattr2(&mp->m_sb) ?
101				XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
102			(xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
103				XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
104			(xfs_sb_version_hascrc(&mp->m_sb) ?
105				XFS_FSOP_GEOM_FLAGS_V5SB : 0) |
106			(xfs_sb_version_hasftype(&mp->m_sb) ?
107				XFS_FSOP_GEOM_FLAGS_FTYPE : 0);
108		geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
109				mp->m_sb.sb_logsectsize : BBSIZE;
110		geo->rtsectsize = mp->m_sb.sb_blocksize;
111		geo->dirblocksize = mp->m_dirblksize;
112	}
113	if (new_version >= 4) {
114		geo->flags |=
115			(xfs_sb_version_haslogv2(&mp->m_sb) ?
116				XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
117		geo->logsunit = mp->m_sb.sb_logsunit;
118	}
119	return 0;
120}
121
122static struct xfs_buf *
123xfs_growfs_get_hdr_buf(
124	struct xfs_mount	*mp,
125	xfs_daddr_t		blkno,
126	size_t			numblks,
127	int			flags,
128	const struct xfs_buf_ops *ops)
129{
130	struct xfs_buf		*bp;
131
132	bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
133	if (!bp)
134		return NULL;
135
136	xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
137	bp->b_bn = blkno;
138	bp->b_maps[0].bm_bn = blkno;
139	bp->b_ops = ops;
140
141	return bp;
142}
143
 
 
 
144static int
145xfs_growfs_data_private(
146	xfs_mount_t		*mp,		/* mount point for filesystem */
147	xfs_growfs_data_t	*in)		/* growfs data input struct */
148{
149	xfs_agf_t		*agf;
150	struct xfs_agfl		*agfl;
151	xfs_agi_t		*agi;
152	xfs_agnumber_t		agno;
153	xfs_extlen_t		agsize;
154	xfs_extlen_t		tmpsize;
155	xfs_alloc_rec_t		*arec;
156	xfs_buf_t		*bp;
157	int			bucket;
158	int			dpct;
159	int			error, saved_error = 0;
160	xfs_agnumber_t		nagcount;
161	xfs_agnumber_t		nagimax = 0;
162	xfs_rfsblock_t		nb, nb_mod;
163	xfs_rfsblock_t		new;
164	xfs_rfsblock_t		nfree;
165	xfs_agnumber_t		oagcount;
166	int			pct;
167	xfs_trans_t		*tp;
 
168
169	nb = in->newblocks;
170	pct = in->imaxpct;
171	if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
172		return XFS_ERROR(EINVAL);
173	if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
174		return error;
175	dpct = pct - mp->m_sb.sb_imax_pct;
176	bp = xfs_buf_read_uncached(mp->m_ddev_targp,
 
177				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
178				XFS_FSS_TO_BB(mp, 1), 0, NULL);
179	if (!bp)
180		return EIO;
181	if (bp->b_error) {
182		error = bp->b_error;
183		xfs_buf_relse(bp);
184		return error;
185	}
186	xfs_buf_relse(bp);
187
188	new = nb;	/* use new as a temporary here */
189	nb_mod = do_div(new, mp->m_sb.sb_agblocks);
190	nagcount = new + (nb_mod != 0);
191	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
192		nagcount--;
193		nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
194		if (nb < mp->m_sb.sb_dblocks)
195			return XFS_ERROR(EINVAL);
 
 
196	}
197	new = nb - mp->m_sb.sb_dblocks;
 
 
 
 
 
 
 
 
 
 
 
 
 
198	oagcount = mp->m_sb.sb_agcount;
199
200	/* allocate the new per-ag structures */
201	if (nagcount > oagcount) {
202		error = xfs_initialize_perag(mp, nagcount, &nagimax);
203		if (error)
204			return error;
 
 
 
205	}
206
207	tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
208	tp->t_flags |= XFS_TRANS_RESERVE;
209	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
210				  XFS_GROWFS_SPACE_RES(mp), 0);
211	if (error) {
212		xfs_trans_cancel(tp, 0);
213		return error;
214	}
 
215
216	/*
217	 * Write new AG headers to disk. Non-transactional, but written
218	 * synchronously so they are completed prior to the growfs transaction
219	 * being logged.
220	 */
221	nfree = 0;
222	for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
223		__be32	*agfl_bno;
224
225		/*
226		 * AG freespace header block
227		 */
228		bp = xfs_growfs_get_hdr_buf(mp,
229				XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
230				XFS_FSS_TO_BB(mp, 1), 0,
231				&xfs_agf_buf_ops);
232		if (!bp) {
233			error = ENOMEM;
234			goto error0;
235		}
236
237		agf = XFS_BUF_TO_AGF(bp);
238		agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
239		agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
240		agf->agf_seqno = cpu_to_be32(agno);
241		if (agno == nagcount - 1)
242			agsize =
243				nb -
244				(agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
245		else
246			agsize = mp->m_sb.sb_agblocks;
247		agf->agf_length = cpu_to_be32(agsize);
248		agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
249		agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
250		agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
251		agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
252		agf->agf_flfirst = 0;
253		agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
254		agf->agf_flcount = 0;
255		tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
256		agf->agf_freeblks = cpu_to_be32(tmpsize);
257		agf->agf_longest = cpu_to_be32(tmpsize);
258		if (xfs_sb_version_hascrc(&mp->m_sb))
259			uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_uuid);
260
261		error = xfs_bwrite(bp);
262		xfs_buf_relse(bp);
263		if (error)
264			goto error0;
265
266		/*
267		 * AG freelist header block
268		 */
269		bp = xfs_growfs_get_hdr_buf(mp,
270				XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
271				XFS_FSS_TO_BB(mp, 1), 0,
272				&xfs_agfl_buf_ops);
273		if (!bp) {
274			error = ENOMEM;
275			goto error0;
276		}
277
278		agfl = XFS_BUF_TO_AGFL(bp);
279		if (xfs_sb_version_hascrc(&mp->m_sb)) {
280			agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
281			agfl->agfl_seqno = cpu_to_be32(agno);
282			uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
283		}
284
285		agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
286		for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
287			agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
288
289		error = xfs_bwrite(bp);
290		xfs_buf_relse(bp);
291		if (error)
292			goto error0;
293
294		/*
295		 * AG inode header block
296		 */
297		bp = xfs_growfs_get_hdr_buf(mp,
298				XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
299				XFS_FSS_TO_BB(mp, 1), 0,
300				&xfs_agi_buf_ops);
301		if (!bp) {
302			error = ENOMEM;
303			goto error0;
304		}
305
306		agi = XFS_BUF_TO_AGI(bp);
307		agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
308		agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
309		agi->agi_seqno = cpu_to_be32(agno);
310		agi->agi_length = cpu_to_be32(agsize);
311		agi->agi_count = 0;
312		agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
313		agi->agi_level = cpu_to_be32(1);
314		agi->agi_freecount = 0;
315		agi->agi_newino = cpu_to_be32(NULLAGINO);
316		agi->agi_dirino = cpu_to_be32(NULLAGINO);
317		if (xfs_sb_version_hascrc(&mp->m_sb))
318			uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_uuid);
319		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
320			agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
321
322		error = xfs_bwrite(bp);
323		xfs_buf_relse(bp);
324		if (error)
325			goto error0;
326
327		/*
328		 * BNO btree root block
329		 */
330		bp = xfs_growfs_get_hdr_buf(mp,
331				XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
332				BTOBB(mp->m_sb.sb_blocksize), 0,
333				&xfs_allocbt_buf_ops);
334
335		if (!bp) {
336			error = ENOMEM;
337			goto error0;
338		}
339
340		if (xfs_sb_version_hascrc(&mp->m_sb))
341			xfs_btree_init_block(mp, bp, XFS_ABTB_CRC_MAGIC, 0, 1,
342						agno, XFS_BTREE_CRC_BLOCKS);
343		else
344			xfs_btree_init_block(mp, bp, XFS_ABTB_MAGIC, 0, 1,
345						agno, 0);
346
347		arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
348		arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
349		arec->ar_blockcount = cpu_to_be32(
350			agsize - be32_to_cpu(arec->ar_startblock));
351
352		error = xfs_bwrite(bp);
353		xfs_buf_relse(bp);
354		if (error)
355			goto error0;
356
357		/*
358		 * CNT btree root block
359		 */
360		bp = xfs_growfs_get_hdr_buf(mp,
361				XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
362				BTOBB(mp->m_sb.sb_blocksize), 0,
363				&xfs_allocbt_buf_ops);
364		if (!bp) {
365			error = ENOMEM;
366			goto error0;
367		}
368
369		if (xfs_sb_version_hascrc(&mp->m_sb))
370			xfs_btree_init_block(mp, bp, XFS_ABTC_CRC_MAGIC, 0, 1,
371						agno, XFS_BTREE_CRC_BLOCKS);
372		else
373			xfs_btree_init_block(mp, bp, XFS_ABTC_MAGIC, 0, 1,
374						agno, 0);
375
376		arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
377		arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
378		arec->ar_blockcount = cpu_to_be32(
379			agsize - be32_to_cpu(arec->ar_startblock));
380		nfree += be32_to_cpu(arec->ar_blockcount);
381
382		error = xfs_bwrite(bp);
383		xfs_buf_relse(bp);
384		if (error)
385			goto error0;
386
387		/*
388		 * INO btree root block
389		 */
390		bp = xfs_growfs_get_hdr_buf(mp,
391				XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
392				BTOBB(mp->m_sb.sb_blocksize), 0,
393				&xfs_inobt_buf_ops);
394		if (!bp) {
395			error = ENOMEM;
396			goto error0;
397		}
398
399		if (xfs_sb_version_hascrc(&mp->m_sb))
400			xfs_btree_init_block(mp, bp, XFS_IBT_CRC_MAGIC, 0, 0,
401						agno, XFS_BTREE_CRC_BLOCKS);
402		else
403			xfs_btree_init_block(mp, bp, XFS_IBT_MAGIC, 0, 0,
404						agno, 0);
405
406		error = xfs_bwrite(bp);
407		xfs_buf_relse(bp);
408		if (error)
409			goto error0;
410	}
411	xfs_trans_agblocks_delta(tp, nfree);
412	/*
413	 * There are new blocks in the old last a.g.
414	 */
415	if (new) {
416		/*
417		 * Change the agi length.
418		 */
419		error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
420		if (error) {
421			goto error0;
422		}
423		ASSERT(bp);
424		agi = XFS_BUF_TO_AGI(bp);
425		be32_add_cpu(&agi->agi_length, new);
426		ASSERT(nagcount == oagcount ||
427		       be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
428		xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
429		/*
430		 * Change agf length.
431		 */
432		error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp);
433		if (error) {
434			goto error0;
435		}
436		ASSERT(bp);
437		agf = XFS_BUF_TO_AGF(bp);
438		be32_add_cpu(&agf->agf_length, new);
439		ASSERT(be32_to_cpu(agf->agf_length) ==
440		       be32_to_cpu(agi->agi_length));
441
442		xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
443		/*
444		 * Free the new space.
445		 */
446		error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
447			be32_to_cpu(agf->agf_length) - new), new);
448		if (error) {
449			goto error0;
450		}
451	}
 
 
 
452
453	/*
454	 * Update changed superblock fields transactionally. These are not
455	 * seen by the rest of the world until the transaction commit applies
456	 * them atomically to the superblock.
457	 */
458	if (nagcount > oagcount)
459		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
460	if (nb > mp->m_sb.sb_dblocks)
461		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
462				 nb - mp->m_sb.sb_dblocks);
463	if (nfree)
464		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
465	if (dpct)
466		xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
467	error = xfs_trans_commit(tp, 0);
 
 
 
 
 
 
 
468	if (error)
469		return error;
470
471	/* New allocation groups fully initialized, so update mount struct */
472	if (nagimax)
473		mp->m_maxagi = nagimax;
474	if (mp->m_sb.sb_imax_pct) {
475		__uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
476		do_div(icount, 100);
477		mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
478	} else
479		mp->m_maxicount = 0;
480	xfs_set_low_space_thresholds(mp);
 
481
482	/* update secondary superblocks. */
483	for (agno = 1; agno < nagcount; agno++) {
484		error = 0;
485		/*
486		 * new secondary superblocks need to be zeroed, not read from
487		 * disk as the contents of the new area we are growing into is
488		 * completely unknown.
489		 */
490		if (agno < oagcount) {
491			error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
492				  XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
493				  XFS_FSS_TO_BB(mp, 1), 0, &bp,
494				  &xfs_sb_buf_ops);
495		} else {
496			bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp,
497				  XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
498				  XFS_FSS_TO_BB(mp, 1), 0);
499			if (bp) {
500				bp->b_ops = &xfs_sb_buf_ops;
501				xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
502			} else
503				error = ENOMEM;
504		}
505
506		/*
507		 * If we get an error reading or writing alternate superblocks,
508		 * continue.  xfs_repair chooses the "best" superblock based
509		 * on most matches; if we break early, we'll leave more
510		 * superblocks un-updated than updated, and xfs_repair may
511		 * pick them over the properly-updated primary.
512		 */
513		if (error) {
514			xfs_warn(mp,
515		"error %d reading secondary superblock for ag %d",
516				error, agno);
517			saved_error = error;
518			continue;
519		}
520		xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
521
522		error = xfs_bwrite(bp);
523		xfs_buf_relse(bp);
524		if (error) {
525			xfs_warn(mp,
526		"write error %d updating secondary superblock for ag %d",
527				error, agno);
528			saved_error = error;
529			continue;
530		}
531	}
532	return saved_error ? saved_error : error;
533
534 error0:
535	xfs_trans_cancel(tp, XFS_TRANS_ABORT);
 
 
 
536	return error;
537}
538
539static int
540xfs_growfs_log_private(
541	xfs_mount_t		*mp,	/* mount point for filesystem */
542	xfs_growfs_log_t	*in)	/* growfs log input struct */
543{
544	xfs_extlen_t		nb;
545
546	nb = in->newblocks;
547	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
548		return XFS_ERROR(EINVAL);
549	if (nb == mp->m_sb.sb_logblocks &&
550	    in->isint == (mp->m_sb.sb_logstart != 0))
551		return XFS_ERROR(EINVAL);
552	/*
553	 * Moving the log is hard, need new interfaces to sync
554	 * the log first, hold off all activity while moving it.
555	 * Can have shorter or longer log in the same space,
556	 * or transform internal to external log or vice versa.
557	 */
558	return XFS_ERROR(ENOSYS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559}
560
561/*
562 * protected versions of growfs function acquire and release locks on the mount
563 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
564 * XFS_IOC_FSGROWFSRT
565 */
566
567
568int
569xfs_growfs_data(
570	xfs_mount_t		*mp,
571	xfs_growfs_data_t	*in)
572{
573	int error;
574
575	if (!capable(CAP_SYS_ADMIN))
576		return XFS_ERROR(EPERM);
577	if (!mutex_trylock(&mp->m_growlock))
578		return XFS_ERROR(EWOULDBLOCK);
579	error = xfs_growfs_data_private(mp, in);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580	mutex_unlock(&mp->m_growlock);
581	return error;
582}
583
584int
585xfs_growfs_log(
586	xfs_mount_t		*mp,
587	xfs_growfs_log_t	*in)
588{
589	int error;
590
591	if (!capable(CAP_SYS_ADMIN))
592		return XFS_ERROR(EPERM);
593	if (!mutex_trylock(&mp->m_growlock))
594		return XFS_ERROR(EWOULDBLOCK);
595	error = xfs_growfs_log_private(mp, in);
596	mutex_unlock(&mp->m_growlock);
597	return error;
598}
599
600/*
601 * exported through ioctl XFS_IOC_FSCOUNTS
602 */
603
604int
605xfs_fs_counts(
606	xfs_mount_t		*mp,
607	xfs_fsop_counts_t	*cnt)
608{
609	xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
610	spin_lock(&mp->m_sb_lock);
611	cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
612	cnt->freertx = mp->m_sb.sb_frextents;
613	cnt->freeino = mp->m_sb.sb_ifree;
614	cnt->allocino = mp->m_sb.sb_icount;
615	spin_unlock(&mp->m_sb_lock);
616	return 0;
617}
618
619/*
620 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
621 *
622 * xfs_reserve_blocks is called to set m_resblks
623 * in the in-core mount table. The number of unused reserved blocks
624 * is kept in m_resblks_avail.
625 *
626 * Reserve the requested number of blocks if available. Otherwise return
627 * as many as possible to satisfy the request. The actual number
628 * reserved are returned in outval
629 *
630 * A null inval pointer indicates that only the current reserved blocks
631 * available  should  be returned no settings are changed.
632 */
633
634int
635xfs_reserve_blocks(
636	xfs_mount_t             *mp,
637	__uint64_t              *inval,
638	xfs_fsop_resblks_t      *outval)
639{
640	__int64_t		lcounter, delta, fdblks_delta;
641	__uint64_t		request;
642
643	/* If inval is null, report current values and return */
644	if (inval == (__uint64_t *)NULL) {
645		if (!outval)
646			return EINVAL;
647		outval->resblks = mp->m_resblks;
648		outval->resblks_avail = mp->m_resblks_avail;
649		return 0;
650	}
651
652	request = *inval;
653
654	/*
655	 * With per-cpu counters, this becomes an interesting
656	 * problem. we needto work out if we are freeing or allocation
657	 * blocks first, then we can do the modification as necessary.
658	 *
659	 * We do this under the m_sb_lock so that if we are near
660	 * ENOSPC, we will hold out any changes while we work out
661	 * what to do. This means that the amount of free space can
662	 * change while we do this, so we need to retry if we end up
663	 * trying to reserve more space than is available.
664	 *
665	 * We also use the xfs_mod_incore_sb() interface so that we
666	 * don't have to care about whether per cpu counter are
667	 * enabled, disabled or even compiled in....
 
668	 */
669retry:
670	spin_lock(&mp->m_sb_lock);
671	xfs_icsb_sync_counters_locked(mp, 0);
672
673	/*
674	 * If our previous reservation was larger than the current value,
675	 * then move any unused blocks back to the free pool.
 
 
676	 */
677	fdblks_delta = 0;
678	if (mp->m_resblks > request) {
679		lcounter = mp->m_resblks_avail - request;
680		if (lcounter  > 0) {		/* release unused blocks */
681			fdblks_delta = lcounter;
682			mp->m_resblks_avail -= lcounter;
683		}
684		mp->m_resblks = request;
685	} else {
686		__int64_t	free;
 
 
 
687
688		free =  mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
689		if (!free)
690			goto out; /* ENOSPC and fdblks_delta = 0 */
691
692		delta = request - mp->m_resblks;
693		lcounter = free - delta;
694		if (lcounter < 0) {
695			/* We can't satisfy the request, just get what we can */
696			mp->m_resblks += free;
697			mp->m_resblks_avail += free;
698			fdblks_delta = -free;
699		} else {
700			fdblks_delta = -delta;
701			mp->m_resblks = request;
702			mp->m_resblks_avail += delta;
703		}
704	}
705out:
706	if (outval) {
707		outval->resblks = mp->m_resblks;
708		outval->resblks_avail = mp->m_resblks_avail;
709	}
710	spin_unlock(&mp->m_sb_lock);
711
712	if (fdblks_delta) {
713		/*
714		 * If we are putting blocks back here, m_resblks_avail is
715		 * already at its max so this will put it in the free pool.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
716		 *
717		 * If we need space, we'll either succeed in getting it
718		 * from the free block count or we'll get an enospc. If
719		 * we get a ENOSPC, it means things changed while we were
720		 * calculating fdblks_delta and so we should try again to
721		 * see if there is anything left to reserve.
722		 *
723		 * Don't set the reserved flag here - we don't want to reserve
724		 * the extra reserve blocks from the reserve.....
725		 */
726		int error;
727		error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
728						 fdblks_delta, 0);
729		if (error == ENOSPC)
730			goto retry;
731	}
732	return 0;
733}
734
735/*
736 * Dump a transaction into the log that contains no real change. This is needed
737 * to be able to make the log dirty or stamp the current tail LSN into the log
738 * during the covering operation.
739 *
740 * We cannot use an inode here for this - that will push dirty state back up
741 * into the VFS and then periodic inode flushing will prevent log covering from
742 * making progress. Hence we log a field in the superblock instead and use a
743 * synchronous transaction to ensure the superblock is immediately unpinned
744 * and can be written back.
745 */
746int
747xfs_fs_log_dummy(
748	xfs_mount_t	*mp)
749{
750	xfs_trans_t	*tp;
751	int		error;
752
753	tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
754	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
755	if (error) {
756		xfs_trans_cancel(tp, 0);
757		return error;
758	}
759
760	/* log the UUID because it is an unchanging field */
761	xfs_mod_sb(tp, XFS_SB_UUID);
762	xfs_trans_set_sync(tp);
763	return xfs_trans_commit(tp, 0);
764}
765
766int
767xfs_fs_goingdown(
768	xfs_mount_t	*mp,
769	__uint32_t	inflags)
770{
771	switch (inflags) {
772	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
773		struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
774
775		if (sb && !IS_ERR(sb)) {
776			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
777			thaw_bdev(sb->s_bdev, sb);
778		}
779
780		break;
781	}
782	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
783		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
784		break;
785	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
786		xfs_force_shutdown(mp,
787				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
788		break;
789	default:
790		return XFS_ERROR(EINVAL);
791	}
792
793	return 0;
794}
795
796/*
797 * Force a shutdown of the filesystem instantly while keeping the filesystem
798 * consistent. We don't do an unmount here; just shutdown the shop, make sure
799 * that absolutely nothing persistent happens to this filesystem after this
800 * point.
 
 
 
 
 
801 */
802void
803xfs_do_force_shutdown(
804	xfs_mount_t	*mp,
805	int		flags,
806	char		*fname,
807	int		lnnum)
808{
809	int		logerror;
 
 
 
 
 
 
 
 
 
810
811	logerror = flags & SHUTDOWN_LOG_IO_ERROR;
 
812
813	if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
814		xfs_notice(mp,
815	"%s(0x%x) called from line %d of file %s.  Return address = 0x%p",
816			__func__, flags, lnnum, fname, __return_address);
 
 
 
 
 
 
 
 
 
 
 
817	}
818	/*
819	 * No need to duplicate efforts.
820	 */
821	if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
822		return;
823
824	/*
825	 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
826	 * queue up anybody new on the log reservations, and wakes up
827	 * everybody who's sleeping on log reservations to tell them
828	 * the bad news.
829	 */
830	if (xfs_log_force_umount(mp, logerror))
831		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
832
833	if (flags & SHUTDOWN_CORRUPT_INCORE) {
834		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
835    "Corruption of in-memory data detected.  Shutting down filesystem");
836		if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
837			xfs_stack_trace();
838	} else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
839		if (logerror) {
840			xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
841		"Log I/O Error Detected.  Shutting down filesystem");
842		} else if (flags & SHUTDOWN_DEVICE_REQ) {
843			xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
844		"All device paths lost.  Shutting down filesystem");
845		} else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
846			xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
847		"I/O Error Detected. Shutting down filesystem");
848		}
849	}
850	if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
851		xfs_alert(mp,
852	"Please umount the filesystem and rectify the problem(s)");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
853	}
 
 
 
 
 
 
854}