Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 12#include "xfs_sb.h"
 13#include "xfs_mount.h"
 14#include "xfs_trans.h"
 15#include "xfs_error.h"
 16#include "xfs_alloc.h"
 17#include "xfs_fsops.h"
 18#include "xfs_trans_space.h"
 19#include "xfs_log.h"
 20#include "xfs_ag.h"
 21#include "xfs_ag_resv.h"
 22
 23/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24 * growfs operations
 25 */
 26static int
 27xfs_growfs_data_private(
 28	xfs_mount_t		*mp,		/* mount point for filesystem */
 29	xfs_growfs_data_t	*in)		/* growfs data input struct */
 30{
 31	xfs_buf_t		*bp;
 32	int			error;
 33	xfs_agnumber_t		nagcount;
 34	xfs_agnumber_t		nagimax = 0;
 35	xfs_rfsblock_t		nb, nb_mod;
 36	xfs_rfsblock_t		new;
 
 37	xfs_agnumber_t		oagcount;
 38	xfs_trans_t		*tp;
 39	struct aghdr_init_data	id = {};
 40
 41	nb = in->newblocks;
 42	if (nb < mp->m_sb.sb_dblocks)
 43		return -EINVAL;
 44	if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
 45		return error;
 46	error = xfs_buf_read_uncached(mp->m_ddev_targp,
 
 
 47				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
 48				XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
 49	if (error)
 50		return error;
 51	xfs_buf_relse(bp);
 
 52
 53	new = nb;	/* use new as a temporary here */
 54	nb_mod = do_div(new, mp->m_sb.sb_agblocks);
 55	nagcount = new + (nb_mod != 0);
 56	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
 57		nagcount--;
 58		nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
 59		if (nb < mp->m_sb.sb_dblocks)
 60			return -EINVAL;
 61	}
 62	new = nb - mp->m_sb.sb_dblocks;
 
 
 
 
 
 
 
 
 63	oagcount = mp->m_sb.sb_agcount;
 64
 65	/* allocate the new per-ag structures */
 66	if (nagcount > oagcount) {
 67		error = xfs_initialize_perag(mp, nagcount, &nagimax);
 68		if (error)
 69			return error;
 
 
 
 70	}
 71
 72	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
 73			XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
 
 74	if (error)
 75		return error;
 76
 77	/*
 78	 * Write new AG headers to disk. Non-transactional, but need to be
 79	 * written and completed prior to the growfs transaction being logged.
 80	 * To do this, we use a delayed write buffer list and wait for
 81	 * submission and IO completion of the list as a whole. This allows the
 82	 * IO subsystem to merge all the AG headers in a single AG into a single
 83	 * IO and hide most of the latency of the IO from us.
 84	 *
 85	 * This also means that if we get an error whilst building the buffer
 86	 * list to write, we can cancel the entire list without having written
 87	 * anything.
 88	 */
 89	INIT_LIST_HEAD(&id.buffer_list);
 90	for (id.agno = nagcount - 1;
 91	     id.agno >= oagcount;
 92	     id.agno--, new -= id.agsize) {
 93
 94		if (id.agno == nagcount - 1)
 95			id.agsize = nb -
 96				(id.agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
 97		else
 98			id.agsize = mp->m_sb.sb_agblocks;
 99
100		error = xfs_ag_init_headers(mp, &id);
101		if (error) {
102			xfs_buf_delwri_cancel(&id.buffer_list);
103			goto out_trans_cancel;
104		}
105	}
106	error = xfs_buf_delwri_submit(&id.buffer_list);
107	if (error)
108		goto out_trans_cancel;
109
110	xfs_trans_agblocks_delta(tp, id.nfree);
111
112	/* If there are new blocks in the old last AG, extend it. */
113	if (new) {
114		error = xfs_ag_extend_space(mp, tp, &id, new);
115		if (error)
116			goto out_trans_cancel;
117	}
118
119	/*
120	 * Update changed superblock fields transactionally. These are not
121	 * seen by the rest of the world until the transaction commit applies
122	 * them atomically to the superblock.
123	 */
124	if (nagcount > oagcount)
125		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
126	if (nb > mp->m_sb.sb_dblocks)
127		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
128				 nb - mp->m_sb.sb_dblocks);
129	if (id.nfree)
130		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
 
 
 
 
 
 
 
 
 
131	xfs_trans_set_sync(tp);
132	error = xfs_trans_commit(tp);
133	if (error)
134		return error;
135
136	/* New allocation groups fully initialized, so update mount struct */
137	if (nagimax)
138		mp->m_maxagi = nagimax;
139	xfs_set_low_space_thresholds(mp);
140	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
141
142	/*
143	 * If we expanded the last AG, free the per-AG reservation
144	 * so we can reinitialize it with the new size.
145	 */
146	if (new) {
147		struct xfs_perag	*pag;
 
148
149		pag = xfs_perag_get(mp, id.agno);
150		error = xfs_ag_resv_free(pag);
151		xfs_perag_put(pag);
152		if (error)
153			return error;
 
 
 
 
 
 
 
 
 
154	}
155
156	/*
157	 * Reserve AG metadata blocks. ENOSPC here does not mean there was a
158	 * growfs failure, just that there still isn't space for new user data
159	 * after the grow has been run.
160	 */
161	error = xfs_fs_reserve_ag_blocks(mp);
162	if (error == -ENOSPC)
163		error = 0;
164	return error;
165
166out_trans_cancel:
167	xfs_trans_cancel(tp);
168	return error;
169}
170
171static int
172xfs_growfs_log_private(
173	xfs_mount_t		*mp,	/* mount point for filesystem */
174	xfs_growfs_log_t	*in)	/* growfs log input struct */
175{
176	xfs_extlen_t		nb;
177
178	nb = in->newblocks;
179	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
180		return -EINVAL;
181	if (nb == mp->m_sb.sb_logblocks &&
182	    in->isint == (mp->m_sb.sb_logstart != 0))
183		return -EINVAL;
184	/*
185	 * Moving the log is hard, need new interfaces to sync
186	 * the log first, hold off all activity while moving it.
187	 * Can have shorter or longer log in the same space,
188	 * or transform internal to external log or vice versa.
189	 */
190	return -ENOSYS;
191}
192
193static int
194xfs_growfs_imaxpct(
195	struct xfs_mount	*mp,
196	__u32			imaxpct)
197{
198	struct xfs_trans	*tp;
199	int			dpct;
200	int			error;
201
202	if (imaxpct > 100)
203		return -EINVAL;
204
205	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
206			XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
207	if (error)
208		return error;
209
210	dpct = imaxpct - mp->m_sb.sb_imax_pct;
211	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
212	xfs_trans_set_sync(tp);
213	return xfs_trans_commit(tp);
214}
215
216/*
217 * protected versions of growfs function acquire and release locks on the mount
218 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
219 * XFS_IOC_FSGROWFSRT
220 */
221int
222xfs_growfs_data(
223	struct xfs_mount	*mp,
224	struct xfs_growfs_data	*in)
225{
226	int			error = 0;
227
228	if (!capable(CAP_SYS_ADMIN))
229		return -EPERM;
230	if (!mutex_trylock(&mp->m_growlock))
231		return -EWOULDBLOCK;
232
233	/* update imaxpct separately to the physical grow of the filesystem */
234	if (in->imaxpct != mp->m_sb.sb_imax_pct) {
235		error = xfs_growfs_imaxpct(mp, in->imaxpct);
236		if (error)
237			goto out_error;
238	}
239
240	if (in->newblocks != mp->m_sb.sb_dblocks) {
241		error = xfs_growfs_data_private(mp, in);
242		if (error)
243			goto out_error;
244	}
245
246	/* Post growfs calculations needed to reflect new state in operations */
247	if (mp->m_sb.sb_imax_pct) {
248		uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
249		do_div(icount, 100);
250		M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
251	} else
252		M_IGEO(mp)->maxicount = 0;
253
254	/* Update secondary superblocks now the physical grow has completed */
255	error = xfs_update_secondary_sbs(mp);
256
257out_error:
258	/*
259	 * Increment the generation unconditionally, the error could be from
260	 * updating the secondary superblocks, in which case the new size
261	 * is live already.
262	 */
263	mp->m_generation++;
264	mutex_unlock(&mp->m_growlock);
265	return error;
266}
267
268int
269xfs_growfs_log(
270	xfs_mount_t		*mp,
271	xfs_growfs_log_t	*in)
272{
273	int error;
274
275	if (!capable(CAP_SYS_ADMIN))
276		return -EPERM;
277	if (!mutex_trylock(&mp->m_growlock))
278		return -EWOULDBLOCK;
279	error = xfs_growfs_log_private(mp, in);
280	mutex_unlock(&mp->m_growlock);
281	return error;
282}
283
284/*
285 * exported through ioctl XFS_IOC_FSCOUNTS
286 */
287
288void
289xfs_fs_counts(
290	xfs_mount_t		*mp,
291	xfs_fsop_counts_t	*cnt)
292{
293	cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
294	cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
295	cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
296						mp->m_alloc_set_aside;
297
298	spin_lock(&mp->m_sb_lock);
299	cnt->freertx = mp->m_sb.sb_frextents;
300	spin_unlock(&mp->m_sb_lock);
301}
302
303/*
304 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
305 *
306 * xfs_reserve_blocks is called to set m_resblks
307 * in the in-core mount table. The number of unused reserved blocks
308 * is kept in m_resblks_avail.
309 *
310 * Reserve the requested number of blocks if available. Otherwise return
311 * as many as possible to satisfy the request. The actual number
312 * reserved are returned in outval
313 *
314 * A null inval pointer indicates that only the current reserved blocks
315 * available  should  be returned no settings are changed.
316 */
317
318int
319xfs_reserve_blocks(
320	xfs_mount_t             *mp,
321	uint64_t              *inval,
322	xfs_fsop_resblks_t      *outval)
323{
324	int64_t			lcounter, delta;
325	int64_t			fdblks_delta = 0;
326	uint64_t		request;
327	int64_t			free;
328	int			error = 0;
329
330	/* If inval is null, report current values and return */
331	if (inval == (uint64_t *)NULL) {
332		if (!outval)
333			return -EINVAL;
334		outval->resblks = mp->m_resblks;
335		outval->resblks_avail = mp->m_resblks_avail;
336		return 0;
337	}
338
339	request = *inval;
340
341	/*
342	 * With per-cpu counters, this becomes an interesting problem. we need
343	 * to work out if we are freeing or allocation blocks first, then we can
344	 * do the modification as necessary.
345	 *
346	 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
347	 * hold out any changes while we work out what to do. This means that
348	 * the amount of free space can change while we do this, so we need to
349	 * retry if we end up trying to reserve more space than is available.
350	 */
351	spin_lock(&mp->m_sb_lock);
352
353	/*
354	 * If our previous reservation was larger than the current value,
355	 * then move any unused blocks back to the free pool. Modify the resblks
356	 * counters directly since we shouldn't have any problems unreserving
357	 * space.
358	 */
359	if (mp->m_resblks > request) {
360		lcounter = mp->m_resblks_avail - request;
361		if (lcounter  > 0) {		/* release unused blocks */
362			fdblks_delta = lcounter;
363			mp->m_resblks_avail -= lcounter;
364		}
365		mp->m_resblks = request;
366		if (fdblks_delta) {
367			spin_unlock(&mp->m_sb_lock);
368			error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
369			spin_lock(&mp->m_sb_lock);
370		}
371
372		goto out;
373	}
374
375	/*
376	 * If the request is larger than the current reservation, reserve the
377	 * blocks before we update the reserve counters. Sample m_fdblocks and
378	 * perform a partial reservation if the request exceeds free space.
379	 */
380	error = -ENOSPC;
381	do {
382		free = percpu_counter_sum(&mp->m_fdblocks) -
383						mp->m_alloc_set_aside;
384		if (free <= 0)
385			break;
386
387		delta = request - mp->m_resblks;
388		lcounter = free - delta;
389		if (lcounter < 0)
390			/* We can't satisfy the request, just get what we can */
391			fdblks_delta = free;
392		else
393			fdblks_delta = delta;
394
395		/*
396		 * We'll either succeed in getting space from the free block
397		 * count or we'll get an ENOSPC. If we get a ENOSPC, it means
398		 * things changed while we were calculating fdblks_delta and so
399		 * we should try again to see if there is anything left to
400		 * reserve.
401		 *
402		 * Don't set the reserved flag here - we don't want to reserve
403		 * the extra reserve blocks from the reserve.....
404		 */
405		spin_unlock(&mp->m_sb_lock);
406		error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
407		spin_lock(&mp->m_sb_lock);
408	} while (error == -ENOSPC);
409
410	/*
411	 * Update the reserve counters if blocks have been successfully
412	 * allocated.
413	 */
414	if (!error && fdblks_delta) {
415		mp->m_resblks += fdblks_delta;
416		mp->m_resblks_avail += fdblks_delta;
417	}
418
419out:
420	if (outval) {
421		outval->resblks = mp->m_resblks;
422		outval->resblks_avail = mp->m_resblks_avail;
423	}
424
425	spin_unlock(&mp->m_sb_lock);
426	return error;
427}
428
429int
430xfs_fs_goingdown(
431	xfs_mount_t	*mp,
432	uint32_t	inflags)
433{
434	switch (inflags) {
435	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
436		struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
437
438		if (sb && !IS_ERR(sb)) {
439			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
440			thaw_bdev(sb->s_bdev, sb);
441		}
442
443		break;
444	}
445	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
446		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
447		break;
448	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
449		xfs_force_shutdown(mp,
450				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
451		break;
452	default:
453		return -EINVAL;
454	}
455
456	return 0;
457}
458
459/*
460 * Force a shutdown of the filesystem instantly while keeping the filesystem
461 * consistent. We don't do an unmount here; just shutdown the shop, make sure
462 * that absolutely nothing persistent happens to this filesystem after this
463 * point.
464 */
465void
466xfs_do_force_shutdown(
467	struct xfs_mount *mp,
468	int		flags,
469	char		*fname,
470	int		lnnum)
471{
472	bool		logerror = flags & SHUTDOWN_LOG_IO_ERROR;
473
474	/*
475	 * No need to duplicate efforts.
476	 */
477	if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
478		return;
479
480	/*
481	 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
482	 * queue up anybody new on the log reservations, and wakes up
483	 * everybody who's sleeping on log reservations to tell them
484	 * the bad news.
485	 */
486	if (xfs_log_force_umount(mp, logerror))
487		return;
488
489	if (flags & SHUTDOWN_FORCE_UMOUNT) {
490		xfs_alert(mp,
491"User initiated shutdown received. Shutting down filesystem");
 
492		return;
493	}
494
495	xfs_notice(mp,
496"%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
497		__func__, flags, lnnum, fname, __return_address);
498
499	if (flags & SHUTDOWN_CORRUPT_INCORE) {
500		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
501"Corruption of in-memory data detected.  Shutting down filesystem");
 
502		if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
503			xfs_stack_trace();
504	} else if (logerror) {
505		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
506			"Log I/O Error Detected. Shutting down filesystem");
 
507	} else {
508		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
509			"I/O Error Detected. Shutting down filesystem");
 
510	}
511
512	xfs_alert(mp,
513		"Please unmount the filesystem and rectify the problem(s)");
514}
515
516/*
517 * Reserve free space for per-AG metadata.
518 */
519int
520xfs_fs_reserve_ag_blocks(
521	struct xfs_mount	*mp)
522{
523	xfs_agnumber_t		agno;
524	struct xfs_perag	*pag;
525	int			error = 0;
526	int			err2;
527
528	mp->m_finobt_nores = false;
529	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
530		pag = xfs_perag_get(mp, agno);
531		err2 = xfs_ag_resv_init(pag, NULL);
532		xfs_perag_put(pag);
533		if (err2 && !error)
534			error = err2;
535	}
536
537	if (error && error != -ENOSPC) {
538		xfs_warn(mp,
539	"Error %d reserving per-AG metadata reserve pool.", error);
540		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
541	}
542
543	return error;
544}
545
546/*
547 * Free space reserved for per-AG metadata.
548 */
549int
550xfs_fs_unreserve_ag_blocks(
551	struct xfs_mount	*mp)
552{
553	xfs_agnumber_t		agno;
554	struct xfs_perag	*pag;
555	int			error = 0;
556	int			err2;
557
558	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
559		pag = xfs_perag_get(mp, agno);
560		err2 = xfs_ag_resv_free(pag);
561		xfs_perag_put(pag);
562		if (err2 && !error)
563			error = err2;
564	}
565
566	if (error)
567		xfs_warn(mp,
568	"Error %d freeing per-AG metadata reserve pool.", error);
569
570	return error;
571}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 12#include "xfs_sb.h"
 13#include "xfs_mount.h"
 14#include "xfs_trans.h"
 15#include "xfs_error.h"
 16#include "xfs_alloc.h"
 17#include "xfs_fsops.h"
 18#include "xfs_trans_space.h"
 19#include "xfs_log.h"
 20#include "xfs_ag.h"
 21#include "xfs_ag_resv.h"
 22
 23/*
 24 * Write new AG headers to disk. Non-transactional, but need to be
 25 * written and completed prior to the growfs transaction being logged.
 26 * To do this, we use a delayed write buffer list and wait for
 27 * submission and IO completion of the list as a whole. This allows the
 28 * IO subsystem to merge all the AG headers in a single AG into a single
 29 * IO and hide most of the latency of the IO from us.
 30 *
 31 * This also means that if we get an error whilst building the buffer
 32 * list to write, we can cancel the entire list without having written
 33 * anything.
 34 */
 35static int
 36xfs_resizefs_init_new_ags(
 37	struct xfs_trans	*tp,
 38	struct aghdr_init_data	*id,
 39	xfs_agnumber_t		oagcount,
 40	xfs_agnumber_t		nagcount,
 41	xfs_rfsblock_t		delta,
 42	bool			*lastag_extended)
 43{
 44	struct xfs_mount	*mp = tp->t_mountp;
 45	xfs_rfsblock_t		nb = mp->m_sb.sb_dblocks + delta;
 46	int			error;
 47
 48	*lastag_extended = false;
 49
 50	INIT_LIST_HEAD(&id->buffer_list);
 51	for (id->agno = nagcount - 1;
 52	     id->agno >= oagcount;
 53	     id->agno--, delta -= id->agsize) {
 54
 55		if (id->agno == nagcount - 1)
 56			id->agsize = nb - (id->agno *
 57					(xfs_rfsblock_t)mp->m_sb.sb_agblocks);
 58		else
 59			id->agsize = mp->m_sb.sb_agblocks;
 60
 61		error = xfs_ag_init_headers(mp, id);
 62		if (error) {
 63			xfs_buf_delwri_cancel(&id->buffer_list);
 64			return error;
 65		}
 66	}
 67
 68	error = xfs_buf_delwri_submit(&id->buffer_list);
 69	if (error)
 70		return error;
 71
 72	if (delta) {
 73		*lastag_extended = true;
 74		error = xfs_ag_extend_space(mp, tp, id, delta);
 75	}
 76	return error;
 77}
 78
 79/*
 80 * growfs operations
 81 */
 82static int
 83xfs_growfs_data_private(
 84	struct xfs_mount	*mp,		/* mount point for filesystem */
 85	struct xfs_growfs_data	*in)		/* growfs data input struct */
 86{
 87	struct xfs_buf		*bp;
 88	int			error;
 89	xfs_agnumber_t		nagcount;
 90	xfs_agnumber_t		nagimax = 0;
 91	xfs_rfsblock_t		nb, nb_div, nb_mod;
 92	int64_t			delta;
 93	bool			lastag_extended;
 94	xfs_agnumber_t		oagcount;
 95	struct xfs_trans	*tp;
 96	struct aghdr_init_data	id = {};
 97
 98	nb = in->newblocks;
 99	error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
100	if (error)
 
101		return error;
102
103	if (nb > mp->m_sb.sb_dblocks) {
104		error = xfs_buf_read_uncached(mp->m_ddev_targp,
105				XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
106				XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
107		if (error)
108			return error;
109		xfs_buf_relse(bp);
110	}
111
112	nb_div = nb;
113	nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
114	nagcount = nb_div + (nb_mod != 0);
115	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
116		nagcount--;
117		nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
 
 
118	}
119	delta = nb - mp->m_sb.sb_dblocks;
120	/*
121	 * Reject filesystems with a single AG because they are not
122	 * supported, and reject a shrink operation that would cause a
123	 * filesystem to become unsupported.
124	 */
125	if (delta < 0 && nagcount < 2)
126		return -EINVAL;
127
128	oagcount = mp->m_sb.sb_agcount;
129
130	/* allocate the new per-ag structures */
131	if (nagcount > oagcount) {
132		error = xfs_initialize_perag(mp, nagcount, &nagimax);
133		if (error)
134			return error;
135	} else if (nagcount < oagcount) {
136		/* TODO: shrinking the entire AGs hasn't yet completed */
137		return -EINVAL;
138	}
139
140	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
141			(delta > 0 ? XFS_GROWFS_SPACE_RES(mp) : -delta), 0,
142			XFS_TRANS_RESERVE, &tp);
143	if (error)
144		return error;
145
146	if (delta > 0) {
147		error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
148						  delta, &lastag_extended);
149	} else {
150		static struct ratelimit_state shrink_warning = \
151			RATELIMIT_STATE_INIT("shrink_warning", 86400 * HZ, 1);
152		ratelimit_set_flags(&shrink_warning, RATELIMIT_MSG_ON_RELEASE);
153
154		if (__ratelimit(&shrink_warning))
155			xfs_alert(mp,
156	"EXPERIMENTAL online shrink feature in use. Use at your own risk!");
 
 
 
 
 
 
 
 
 
 
 
157
158		error = xfs_ag_shrink_space(mp, &tp, nagcount - 1, -delta);
 
 
 
 
159	}
 
160	if (error)
161		goto out_trans_cancel;
162
 
 
 
 
 
 
 
 
 
163	/*
164	 * Update changed superblock fields transactionally. These are not
165	 * seen by the rest of the world until the transaction commit applies
166	 * them atomically to the superblock.
167	 */
168	if (nagcount > oagcount)
169		xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
170	if (delta)
171		xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
 
172	if (id.nfree)
173		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
174
175	/*
176	 * Sync sb counters now to reflect the updated values. This is
177	 * particularly important for shrink because the write verifier
178	 * will fail if sb_fdblocks is ever larger than sb_dblocks.
179	 */
180	if (xfs_sb_version_haslazysbcount(&mp->m_sb))
181		xfs_log_sb(tp);
182
183	xfs_trans_set_sync(tp);
184	error = xfs_trans_commit(tp);
185	if (error)
186		return error;
187
188	/* New allocation groups fully initialized, so update mount struct */
189	if (nagimax)
190		mp->m_maxagi = nagimax;
191	xfs_set_low_space_thresholds(mp);
192	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
193
194	if (delta > 0) {
195		/*
196		 * If we expanded the last AG, free the per-AG reservation
197		 * so we can reinitialize it with the new size.
198		 */
199		if (lastag_extended) {
200			struct xfs_perag	*pag;
201
202			pag = xfs_perag_get(mp, id.agno);
203			error = xfs_ag_resv_free(pag);
204			xfs_perag_put(pag);
205			if (error)
206				return error;
207		}
208		/*
209		 * Reserve AG metadata blocks. ENOSPC here does not mean there
210		 * was a growfs failure, just that there still isn't space for
211		 * new user data after the grow has been run.
212		 */
213		error = xfs_fs_reserve_ag_blocks(mp);
214		if (error == -ENOSPC)
215			error = 0;
216	}
 
 
 
 
 
 
 
 
 
217	return error;
218
219out_trans_cancel:
220	xfs_trans_cancel(tp);
221	return error;
222}
223
224static int
225xfs_growfs_log_private(
226	struct xfs_mount	*mp,	/* mount point for filesystem */
227	struct xfs_growfs_log	*in)	/* growfs log input struct */
228{
229	xfs_extlen_t		nb;
230
231	nb = in->newblocks;
232	if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
233		return -EINVAL;
234	if (nb == mp->m_sb.sb_logblocks &&
235	    in->isint == (mp->m_sb.sb_logstart != 0))
236		return -EINVAL;
237	/*
238	 * Moving the log is hard, need new interfaces to sync
239	 * the log first, hold off all activity while moving it.
240	 * Can have shorter or longer log in the same space,
241	 * or transform internal to external log or vice versa.
242	 */
243	return -ENOSYS;
244}
245
246static int
247xfs_growfs_imaxpct(
248	struct xfs_mount	*mp,
249	__u32			imaxpct)
250{
251	struct xfs_trans	*tp;
252	int			dpct;
253	int			error;
254
255	if (imaxpct > 100)
256		return -EINVAL;
257
258	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
259			XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
260	if (error)
261		return error;
262
263	dpct = imaxpct - mp->m_sb.sb_imax_pct;
264	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
265	xfs_trans_set_sync(tp);
266	return xfs_trans_commit(tp);
267}
268
269/*
270 * protected versions of growfs function acquire and release locks on the mount
271 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
272 * XFS_IOC_FSGROWFSRT
273 */
274int
275xfs_growfs_data(
276	struct xfs_mount	*mp,
277	struct xfs_growfs_data	*in)
278{
279	int			error = 0;
280
281	if (!capable(CAP_SYS_ADMIN))
282		return -EPERM;
283	if (!mutex_trylock(&mp->m_growlock))
284		return -EWOULDBLOCK;
285
286	/* update imaxpct separately to the physical grow of the filesystem */
287	if (in->imaxpct != mp->m_sb.sb_imax_pct) {
288		error = xfs_growfs_imaxpct(mp, in->imaxpct);
289		if (error)
290			goto out_error;
291	}
292
293	if (in->newblocks != mp->m_sb.sb_dblocks) {
294		error = xfs_growfs_data_private(mp, in);
295		if (error)
296			goto out_error;
297	}
298
299	/* Post growfs calculations needed to reflect new state in operations */
300	if (mp->m_sb.sb_imax_pct) {
301		uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
302		do_div(icount, 100);
303		M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
304	} else
305		M_IGEO(mp)->maxicount = 0;
306
307	/* Update secondary superblocks now the physical grow has completed */
308	error = xfs_update_secondary_sbs(mp);
309
310out_error:
311	/*
312	 * Increment the generation unconditionally, the error could be from
313	 * updating the secondary superblocks, in which case the new size
314	 * is live already.
315	 */
316	mp->m_generation++;
317	mutex_unlock(&mp->m_growlock);
318	return error;
319}
320
321int
322xfs_growfs_log(
323	xfs_mount_t		*mp,
324	struct xfs_growfs_log	*in)
325{
326	int error;
327
328	if (!capable(CAP_SYS_ADMIN))
329		return -EPERM;
330	if (!mutex_trylock(&mp->m_growlock))
331		return -EWOULDBLOCK;
332	error = xfs_growfs_log_private(mp, in);
333	mutex_unlock(&mp->m_growlock);
334	return error;
335}
336
337/*
338 * exported through ioctl XFS_IOC_FSCOUNTS
339 */
340
341void
342xfs_fs_counts(
343	xfs_mount_t		*mp,
344	xfs_fsop_counts_t	*cnt)
345{
346	cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
347	cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
348	cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
349						mp->m_alloc_set_aside;
350
351	spin_lock(&mp->m_sb_lock);
352	cnt->freertx = mp->m_sb.sb_frextents;
353	spin_unlock(&mp->m_sb_lock);
354}
355
356/*
357 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
358 *
359 * xfs_reserve_blocks is called to set m_resblks
360 * in the in-core mount table. The number of unused reserved blocks
361 * is kept in m_resblks_avail.
362 *
363 * Reserve the requested number of blocks if available. Otherwise return
364 * as many as possible to satisfy the request. The actual number
365 * reserved are returned in outval
366 *
367 * A null inval pointer indicates that only the current reserved blocks
368 * available  should  be returned no settings are changed.
369 */
370
371int
372xfs_reserve_blocks(
373	xfs_mount_t             *mp,
374	uint64_t              *inval,
375	xfs_fsop_resblks_t      *outval)
376{
377	int64_t			lcounter, delta;
378	int64_t			fdblks_delta = 0;
379	uint64_t		request;
380	int64_t			free;
381	int			error = 0;
382
383	/* If inval is null, report current values and return */
384	if (inval == (uint64_t *)NULL) {
385		if (!outval)
386			return -EINVAL;
387		outval->resblks = mp->m_resblks;
388		outval->resblks_avail = mp->m_resblks_avail;
389		return 0;
390	}
391
392	request = *inval;
393
394	/*
395	 * With per-cpu counters, this becomes an interesting problem. we need
396	 * to work out if we are freeing or allocation blocks first, then we can
397	 * do the modification as necessary.
398	 *
399	 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
400	 * hold out any changes while we work out what to do. This means that
401	 * the amount of free space can change while we do this, so we need to
402	 * retry if we end up trying to reserve more space than is available.
403	 */
404	spin_lock(&mp->m_sb_lock);
405
406	/*
407	 * If our previous reservation was larger than the current value,
408	 * then move any unused blocks back to the free pool. Modify the resblks
409	 * counters directly since we shouldn't have any problems unreserving
410	 * space.
411	 */
412	if (mp->m_resblks > request) {
413		lcounter = mp->m_resblks_avail - request;
414		if (lcounter  > 0) {		/* release unused blocks */
415			fdblks_delta = lcounter;
416			mp->m_resblks_avail -= lcounter;
417		}
418		mp->m_resblks = request;
419		if (fdblks_delta) {
420			spin_unlock(&mp->m_sb_lock);
421			error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
422			spin_lock(&mp->m_sb_lock);
423		}
424
425		goto out;
426	}
427
428	/*
429	 * If the request is larger than the current reservation, reserve the
430	 * blocks before we update the reserve counters. Sample m_fdblocks and
431	 * perform a partial reservation if the request exceeds free space.
432	 */
433	error = -ENOSPC;
434	do {
435		free = percpu_counter_sum(&mp->m_fdblocks) -
436						mp->m_alloc_set_aside;
437		if (free <= 0)
438			break;
439
440		delta = request - mp->m_resblks;
441		lcounter = free - delta;
442		if (lcounter < 0)
443			/* We can't satisfy the request, just get what we can */
444			fdblks_delta = free;
445		else
446			fdblks_delta = delta;
447
448		/*
449		 * We'll either succeed in getting space from the free block
450		 * count or we'll get an ENOSPC. If we get a ENOSPC, it means
451		 * things changed while we were calculating fdblks_delta and so
452		 * we should try again to see if there is anything left to
453		 * reserve.
454		 *
455		 * Don't set the reserved flag here - we don't want to reserve
456		 * the extra reserve blocks from the reserve.....
457		 */
458		spin_unlock(&mp->m_sb_lock);
459		error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
460		spin_lock(&mp->m_sb_lock);
461	} while (error == -ENOSPC);
462
463	/*
464	 * Update the reserve counters if blocks have been successfully
465	 * allocated.
466	 */
467	if (!error && fdblks_delta) {
468		mp->m_resblks += fdblks_delta;
469		mp->m_resblks_avail += fdblks_delta;
470	}
471
472out:
473	if (outval) {
474		outval->resblks = mp->m_resblks;
475		outval->resblks_avail = mp->m_resblks_avail;
476	}
477
478	spin_unlock(&mp->m_sb_lock);
479	return error;
480}
481
482int
483xfs_fs_goingdown(
484	xfs_mount_t	*mp,
485	uint32_t	inflags)
486{
487	switch (inflags) {
488	case XFS_FSOP_GOING_FLAGS_DEFAULT: {
489		if (!freeze_bdev(mp->m_super->s_bdev)) {
 
 
490			xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
491			thaw_bdev(mp->m_super->s_bdev);
492		}
 
493		break;
494	}
495	case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
496		xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
497		break;
498	case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
499		xfs_force_shutdown(mp,
500				SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
501		break;
502	default:
503		return -EINVAL;
504	}
505
506	return 0;
507}
508
509/*
510 * Force a shutdown of the filesystem instantly while keeping the filesystem
511 * consistent. We don't do an unmount here; just shutdown the shop, make sure
512 * that absolutely nothing persistent happens to this filesystem after this
513 * point.
514 */
515void
516xfs_do_force_shutdown(
517	struct xfs_mount *mp,
518	int		flags,
519	char		*fname,
520	int		lnnum)
521{
522	bool		logerror = flags & SHUTDOWN_LOG_IO_ERROR;
523
524	/*
525	 * No need to duplicate efforts.
526	 */
527	if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
528		return;
529
530	/*
531	 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
532	 * queue up anybody new on the log reservations, and wakes up
533	 * everybody who's sleeping on log reservations to tell them
534	 * the bad news.
535	 */
536	if (xfs_log_force_umount(mp, logerror))
537		return;
538
539	if (flags & SHUTDOWN_FORCE_UMOUNT) {
540		xfs_alert(mp,
541"User initiated shutdown (0x%x) received. Shutting down filesystem",
542				flags);
543		return;
544	}
545
 
 
 
 
546	if (flags & SHUTDOWN_CORRUPT_INCORE) {
547		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
548"Corruption of in-memory data (0x%x) detected at %pS (%s:%d).  Shutting down filesystem",
549				flags, __return_address, fname, lnnum);
550		if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
551			xfs_stack_trace();
552	} else if (logerror) {
553		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
554"Log I/O error (0x%x) detected at %pS (%s:%d). Shutting down filesystem",
555				flags, __return_address, fname, lnnum);
556	} else {
557		xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
558"I/O error (0x%x) detected at %pS (%s:%d). Shutting down filesystem",
559				flags, __return_address, fname, lnnum);
560	}
561
562	xfs_alert(mp,
563		"Please unmount the filesystem and rectify the problem(s)");
564}
565
566/*
567 * Reserve free space for per-AG metadata.
568 */
569int
570xfs_fs_reserve_ag_blocks(
571	struct xfs_mount	*mp)
572{
573	xfs_agnumber_t		agno;
574	struct xfs_perag	*pag;
575	int			error = 0;
576	int			err2;
577
578	mp->m_finobt_nores = false;
579	for_each_perag(mp, agno, pag) {
 
580		err2 = xfs_ag_resv_init(pag, NULL);
 
581		if (err2 && !error)
582			error = err2;
583	}
584
585	if (error && error != -ENOSPC) {
586		xfs_warn(mp,
587	"Error %d reserving per-AG metadata reserve pool.", error);
588		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
589	}
590
591	return error;
592}
593
594/*
595 * Free space reserved for per-AG metadata.
596 */
597int
598xfs_fs_unreserve_ag_blocks(
599	struct xfs_mount	*mp)
600{
601	xfs_agnumber_t		agno;
602	struct xfs_perag	*pag;
603	int			error = 0;
604	int			err2;
605
606	for_each_perag(mp, agno, pag) {
 
607		err2 = xfs_ag_resv_free(pag);
 
608		if (err2 && !error)
609			error = err2;
610	}
611
612	if (error)
613		xfs_warn(mp,
614	"Error %d freeing per-AG metadata reserve pool.", error);
615
616	return error;
617}