Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_trans.h"
15#include "xfs_error.h"
16#include "xfs_alloc.h"
17#include "xfs_fsops.h"
18#include "xfs_trans_space.h"
19#include "xfs_log.h"
20#include "xfs_ag.h"
21#include "xfs_ag_resv.h"
22
23/*
24 * growfs operations
25 */
26static int
27xfs_growfs_data_private(
28 xfs_mount_t *mp, /* mount point for filesystem */
29 xfs_growfs_data_t *in) /* growfs data input struct */
30{
31 xfs_buf_t *bp;
32 int error;
33 xfs_agnumber_t nagcount;
34 xfs_agnumber_t nagimax = 0;
35 xfs_rfsblock_t nb, nb_mod;
36 xfs_rfsblock_t new;
37 xfs_agnumber_t oagcount;
38 xfs_trans_t *tp;
39 struct aghdr_init_data id = {};
40
41 nb = in->newblocks;
42 if (nb < mp->m_sb.sb_dblocks)
43 return -EINVAL;
44 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
45 return error;
46 error = xfs_buf_read_uncached(mp->m_ddev_targp,
47 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
48 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
49 if (error)
50 return error;
51 xfs_buf_relse(bp);
52
53 new = nb; /* use new as a temporary here */
54 nb_mod = do_div(new, mp->m_sb.sb_agblocks);
55 nagcount = new + (nb_mod != 0);
56 if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
57 nagcount--;
58 nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
59 if (nb < mp->m_sb.sb_dblocks)
60 return -EINVAL;
61 }
62 new = nb - mp->m_sb.sb_dblocks;
63 oagcount = mp->m_sb.sb_agcount;
64
65 /* allocate the new per-ag structures */
66 if (nagcount > oagcount) {
67 error = xfs_initialize_perag(mp, nagcount, &nagimax);
68 if (error)
69 return error;
70 }
71
72 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
73 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
74 if (error)
75 return error;
76
77 /*
78 * Write new AG headers to disk. Non-transactional, but need to be
79 * written and completed prior to the growfs transaction being logged.
80 * To do this, we use a delayed write buffer list and wait for
81 * submission and IO completion of the list as a whole. This allows the
82 * IO subsystem to merge all the AG headers in a single AG into a single
83 * IO and hide most of the latency of the IO from us.
84 *
85 * This also means that if we get an error whilst building the buffer
86 * list to write, we can cancel the entire list without having written
87 * anything.
88 */
89 INIT_LIST_HEAD(&id.buffer_list);
90 for (id.agno = nagcount - 1;
91 id.agno >= oagcount;
92 id.agno--, new -= id.agsize) {
93
94 if (id.agno == nagcount - 1)
95 id.agsize = nb -
96 (id.agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
97 else
98 id.agsize = mp->m_sb.sb_agblocks;
99
100 error = xfs_ag_init_headers(mp, &id);
101 if (error) {
102 xfs_buf_delwri_cancel(&id.buffer_list);
103 goto out_trans_cancel;
104 }
105 }
106 error = xfs_buf_delwri_submit(&id.buffer_list);
107 if (error)
108 goto out_trans_cancel;
109
110 xfs_trans_agblocks_delta(tp, id.nfree);
111
112 /* If there are new blocks in the old last AG, extend it. */
113 if (new) {
114 error = xfs_ag_extend_space(mp, tp, &id, new);
115 if (error)
116 goto out_trans_cancel;
117 }
118
119 /*
120 * Update changed superblock fields transactionally. These are not
121 * seen by the rest of the world until the transaction commit applies
122 * them atomically to the superblock.
123 */
124 if (nagcount > oagcount)
125 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
126 if (nb > mp->m_sb.sb_dblocks)
127 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
128 nb - mp->m_sb.sb_dblocks);
129 if (id.nfree)
130 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
131 xfs_trans_set_sync(tp);
132 error = xfs_trans_commit(tp);
133 if (error)
134 return error;
135
136 /* New allocation groups fully initialized, so update mount struct */
137 if (nagimax)
138 mp->m_maxagi = nagimax;
139 xfs_set_low_space_thresholds(mp);
140 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
141
142 /*
143 * If we expanded the last AG, free the per-AG reservation
144 * so we can reinitialize it with the new size.
145 */
146 if (new) {
147 struct xfs_perag *pag;
148
149 pag = xfs_perag_get(mp, id.agno);
150 error = xfs_ag_resv_free(pag);
151 xfs_perag_put(pag);
152 if (error)
153 return error;
154 }
155
156 /*
157 * Reserve AG metadata blocks. ENOSPC here does not mean there was a
158 * growfs failure, just that there still isn't space for new user data
159 * after the grow has been run.
160 */
161 error = xfs_fs_reserve_ag_blocks(mp);
162 if (error == -ENOSPC)
163 error = 0;
164 return error;
165
166out_trans_cancel:
167 xfs_trans_cancel(tp);
168 return error;
169}
170
171static int
172xfs_growfs_log_private(
173 xfs_mount_t *mp, /* mount point for filesystem */
174 xfs_growfs_log_t *in) /* growfs log input struct */
175{
176 xfs_extlen_t nb;
177
178 nb = in->newblocks;
179 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
180 return -EINVAL;
181 if (nb == mp->m_sb.sb_logblocks &&
182 in->isint == (mp->m_sb.sb_logstart != 0))
183 return -EINVAL;
184 /*
185 * Moving the log is hard, need new interfaces to sync
186 * the log first, hold off all activity while moving it.
187 * Can have shorter or longer log in the same space,
188 * or transform internal to external log or vice versa.
189 */
190 return -ENOSYS;
191}
192
193static int
194xfs_growfs_imaxpct(
195 struct xfs_mount *mp,
196 __u32 imaxpct)
197{
198 struct xfs_trans *tp;
199 int dpct;
200 int error;
201
202 if (imaxpct > 100)
203 return -EINVAL;
204
205 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
206 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
207 if (error)
208 return error;
209
210 dpct = imaxpct - mp->m_sb.sb_imax_pct;
211 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
212 xfs_trans_set_sync(tp);
213 return xfs_trans_commit(tp);
214}
215
216/*
217 * protected versions of growfs function acquire and release locks on the mount
218 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
219 * XFS_IOC_FSGROWFSRT
220 */
221int
222xfs_growfs_data(
223 struct xfs_mount *mp,
224 struct xfs_growfs_data *in)
225{
226 int error = 0;
227
228 if (!capable(CAP_SYS_ADMIN))
229 return -EPERM;
230 if (!mutex_trylock(&mp->m_growlock))
231 return -EWOULDBLOCK;
232
233 /* update imaxpct separately to the physical grow of the filesystem */
234 if (in->imaxpct != mp->m_sb.sb_imax_pct) {
235 error = xfs_growfs_imaxpct(mp, in->imaxpct);
236 if (error)
237 goto out_error;
238 }
239
240 if (in->newblocks != mp->m_sb.sb_dblocks) {
241 error = xfs_growfs_data_private(mp, in);
242 if (error)
243 goto out_error;
244 }
245
246 /* Post growfs calculations needed to reflect new state in operations */
247 if (mp->m_sb.sb_imax_pct) {
248 uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
249 do_div(icount, 100);
250 M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
251 } else
252 M_IGEO(mp)->maxicount = 0;
253
254 /* Update secondary superblocks now the physical grow has completed */
255 error = xfs_update_secondary_sbs(mp);
256
257out_error:
258 /*
259 * Increment the generation unconditionally, the error could be from
260 * updating the secondary superblocks, in which case the new size
261 * is live already.
262 */
263 mp->m_generation++;
264 mutex_unlock(&mp->m_growlock);
265 return error;
266}
267
268int
269xfs_growfs_log(
270 xfs_mount_t *mp,
271 xfs_growfs_log_t *in)
272{
273 int error;
274
275 if (!capable(CAP_SYS_ADMIN))
276 return -EPERM;
277 if (!mutex_trylock(&mp->m_growlock))
278 return -EWOULDBLOCK;
279 error = xfs_growfs_log_private(mp, in);
280 mutex_unlock(&mp->m_growlock);
281 return error;
282}
283
284/*
285 * exported through ioctl XFS_IOC_FSCOUNTS
286 */
287
288void
289xfs_fs_counts(
290 xfs_mount_t *mp,
291 xfs_fsop_counts_t *cnt)
292{
293 cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
294 cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
295 cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
296 mp->m_alloc_set_aside;
297
298 spin_lock(&mp->m_sb_lock);
299 cnt->freertx = mp->m_sb.sb_frextents;
300 spin_unlock(&mp->m_sb_lock);
301}
302
303/*
304 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
305 *
306 * xfs_reserve_blocks is called to set m_resblks
307 * in the in-core mount table. The number of unused reserved blocks
308 * is kept in m_resblks_avail.
309 *
310 * Reserve the requested number of blocks if available. Otherwise return
311 * as many as possible to satisfy the request. The actual number
312 * reserved are returned in outval
313 *
314 * A null inval pointer indicates that only the current reserved blocks
315 * available should be returned no settings are changed.
316 */
317
318int
319xfs_reserve_blocks(
320 xfs_mount_t *mp,
321 uint64_t *inval,
322 xfs_fsop_resblks_t *outval)
323{
324 int64_t lcounter, delta;
325 int64_t fdblks_delta = 0;
326 uint64_t request;
327 int64_t free;
328 int error = 0;
329
330 /* If inval is null, report current values and return */
331 if (inval == (uint64_t *)NULL) {
332 if (!outval)
333 return -EINVAL;
334 outval->resblks = mp->m_resblks;
335 outval->resblks_avail = mp->m_resblks_avail;
336 return 0;
337 }
338
339 request = *inval;
340
341 /*
342 * With per-cpu counters, this becomes an interesting problem. we need
343 * to work out if we are freeing or allocation blocks first, then we can
344 * do the modification as necessary.
345 *
346 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
347 * hold out any changes while we work out what to do. This means that
348 * the amount of free space can change while we do this, so we need to
349 * retry if we end up trying to reserve more space than is available.
350 */
351 spin_lock(&mp->m_sb_lock);
352
353 /*
354 * If our previous reservation was larger than the current value,
355 * then move any unused blocks back to the free pool. Modify the resblks
356 * counters directly since we shouldn't have any problems unreserving
357 * space.
358 */
359 if (mp->m_resblks > request) {
360 lcounter = mp->m_resblks_avail - request;
361 if (lcounter > 0) { /* release unused blocks */
362 fdblks_delta = lcounter;
363 mp->m_resblks_avail -= lcounter;
364 }
365 mp->m_resblks = request;
366 if (fdblks_delta) {
367 spin_unlock(&mp->m_sb_lock);
368 error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
369 spin_lock(&mp->m_sb_lock);
370 }
371
372 goto out;
373 }
374
375 /*
376 * If the request is larger than the current reservation, reserve the
377 * blocks before we update the reserve counters. Sample m_fdblocks and
378 * perform a partial reservation if the request exceeds free space.
379 */
380 error = -ENOSPC;
381 do {
382 free = percpu_counter_sum(&mp->m_fdblocks) -
383 mp->m_alloc_set_aside;
384 if (free <= 0)
385 break;
386
387 delta = request - mp->m_resblks;
388 lcounter = free - delta;
389 if (lcounter < 0)
390 /* We can't satisfy the request, just get what we can */
391 fdblks_delta = free;
392 else
393 fdblks_delta = delta;
394
395 /*
396 * We'll either succeed in getting space from the free block
397 * count or we'll get an ENOSPC. If we get a ENOSPC, it means
398 * things changed while we were calculating fdblks_delta and so
399 * we should try again to see if there is anything left to
400 * reserve.
401 *
402 * Don't set the reserved flag here - we don't want to reserve
403 * the extra reserve blocks from the reserve.....
404 */
405 spin_unlock(&mp->m_sb_lock);
406 error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
407 spin_lock(&mp->m_sb_lock);
408 } while (error == -ENOSPC);
409
410 /*
411 * Update the reserve counters if blocks have been successfully
412 * allocated.
413 */
414 if (!error && fdblks_delta) {
415 mp->m_resblks += fdblks_delta;
416 mp->m_resblks_avail += fdblks_delta;
417 }
418
419out:
420 if (outval) {
421 outval->resblks = mp->m_resblks;
422 outval->resblks_avail = mp->m_resblks_avail;
423 }
424
425 spin_unlock(&mp->m_sb_lock);
426 return error;
427}
428
429int
430xfs_fs_goingdown(
431 xfs_mount_t *mp,
432 uint32_t inflags)
433{
434 switch (inflags) {
435 case XFS_FSOP_GOING_FLAGS_DEFAULT: {
436 struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
437
438 if (sb && !IS_ERR(sb)) {
439 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
440 thaw_bdev(sb->s_bdev, sb);
441 }
442
443 break;
444 }
445 case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
446 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
447 break;
448 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
449 xfs_force_shutdown(mp,
450 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
451 break;
452 default:
453 return -EINVAL;
454 }
455
456 return 0;
457}
458
459/*
460 * Force a shutdown of the filesystem instantly while keeping the filesystem
461 * consistent. We don't do an unmount here; just shutdown the shop, make sure
462 * that absolutely nothing persistent happens to this filesystem after this
463 * point.
464 */
465void
466xfs_do_force_shutdown(
467 struct xfs_mount *mp,
468 int flags,
469 char *fname,
470 int lnnum)
471{
472 bool logerror = flags & SHUTDOWN_LOG_IO_ERROR;
473
474 /*
475 * No need to duplicate efforts.
476 */
477 if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
478 return;
479
480 /*
481 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
482 * queue up anybody new on the log reservations, and wakes up
483 * everybody who's sleeping on log reservations to tell them
484 * the bad news.
485 */
486 if (xfs_log_force_umount(mp, logerror))
487 return;
488
489 if (flags & SHUTDOWN_FORCE_UMOUNT) {
490 xfs_alert(mp,
491"User initiated shutdown received. Shutting down filesystem");
492 return;
493 }
494
495 xfs_notice(mp,
496"%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
497 __func__, flags, lnnum, fname, __return_address);
498
499 if (flags & SHUTDOWN_CORRUPT_INCORE) {
500 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
501"Corruption of in-memory data detected. Shutting down filesystem");
502 if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
503 xfs_stack_trace();
504 } else if (logerror) {
505 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
506 "Log I/O Error Detected. Shutting down filesystem");
507 } else {
508 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
509 "I/O Error Detected. Shutting down filesystem");
510 }
511
512 xfs_alert(mp,
513 "Please unmount the filesystem and rectify the problem(s)");
514}
515
516/*
517 * Reserve free space for per-AG metadata.
518 */
519int
520xfs_fs_reserve_ag_blocks(
521 struct xfs_mount *mp)
522{
523 xfs_agnumber_t agno;
524 struct xfs_perag *pag;
525 int error = 0;
526 int err2;
527
528 mp->m_finobt_nores = false;
529 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
530 pag = xfs_perag_get(mp, agno);
531 err2 = xfs_ag_resv_init(pag, NULL);
532 xfs_perag_put(pag);
533 if (err2 && !error)
534 error = err2;
535 }
536
537 if (error && error != -ENOSPC) {
538 xfs_warn(mp,
539 "Error %d reserving per-AG metadata reserve pool.", error);
540 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
541 }
542
543 return error;
544}
545
546/*
547 * Free space reserved for per-AG metadata.
548 */
549int
550xfs_fs_unreserve_ag_blocks(
551 struct xfs_mount *mp)
552{
553 xfs_agnumber_t agno;
554 struct xfs_perag *pag;
555 int error = 0;
556 int err2;
557
558 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
559 pag = xfs_perag_get(mp, agno);
560 err2 = xfs_ag_resv_free(pag);
561 xfs_perag_put(pag);
562 if (err2 && !error)
563 error = err2;
564 }
565
566 if (error)
567 xfs_warn(mp,
568 "Error %d freeing per-AG metadata reserve pool.", error);
569
570 return error;
571}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_trans.h"
15#include "xfs_error.h"
16#include "xfs_alloc.h"
17#include "xfs_fsops.h"
18#include "xfs_trans_space.h"
19#include "xfs_log.h"
20#include "xfs_log_priv.h"
21#include "xfs_ag.h"
22#include "xfs_ag_resv.h"
23#include "xfs_trace.h"
24
25/*
26 * Write new AG headers to disk. Non-transactional, but need to be
27 * written and completed prior to the growfs transaction being logged.
28 * To do this, we use a delayed write buffer list and wait for
29 * submission and IO completion of the list as a whole. This allows the
30 * IO subsystem to merge all the AG headers in a single AG into a single
31 * IO and hide most of the latency of the IO from us.
32 *
33 * This also means that if we get an error whilst building the buffer
34 * list to write, we can cancel the entire list without having written
35 * anything.
36 */
37static int
38xfs_resizefs_init_new_ags(
39 struct xfs_trans *tp,
40 struct aghdr_init_data *id,
41 xfs_agnumber_t oagcount,
42 xfs_agnumber_t nagcount,
43 xfs_rfsblock_t delta,
44 struct xfs_perag *last_pag,
45 bool *lastag_extended)
46{
47 struct xfs_mount *mp = tp->t_mountp;
48 xfs_rfsblock_t nb = mp->m_sb.sb_dblocks + delta;
49 int error;
50
51 *lastag_extended = false;
52
53 INIT_LIST_HEAD(&id->buffer_list);
54 for (id->agno = nagcount - 1;
55 id->agno >= oagcount;
56 id->agno--, delta -= id->agsize) {
57
58 if (id->agno == nagcount - 1)
59 id->agsize = nb - (id->agno *
60 (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
61 else
62 id->agsize = mp->m_sb.sb_agblocks;
63
64 error = xfs_ag_init_headers(mp, id);
65 if (error) {
66 xfs_buf_delwri_cancel(&id->buffer_list);
67 return error;
68 }
69 }
70
71 error = xfs_buf_delwri_submit(&id->buffer_list);
72 if (error)
73 return error;
74
75 if (delta) {
76 *lastag_extended = true;
77 error = xfs_ag_extend_space(last_pag, tp, delta);
78 }
79 return error;
80}
81
82/*
83 * growfs operations
84 */
85static int
86xfs_growfs_data_private(
87 struct xfs_mount *mp, /* mount point for filesystem */
88 struct xfs_growfs_data *in) /* growfs data input struct */
89{
90 xfs_agnumber_t oagcount = mp->m_sb.sb_agcount;
91 struct xfs_buf *bp;
92 int error;
93 xfs_agnumber_t nagcount;
94 xfs_agnumber_t nagimax = 0;
95 xfs_rfsblock_t nb, nb_div, nb_mod;
96 int64_t delta;
97 bool lastag_extended = false;
98 struct xfs_trans *tp;
99 struct aghdr_init_data id = {};
100 struct xfs_perag *last_pag;
101
102 nb = in->newblocks;
103 error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
104 if (error)
105 return error;
106
107 if (nb > mp->m_sb.sb_dblocks) {
108 error = xfs_buf_read_uncached(mp->m_ddev_targp,
109 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
110 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
111 if (error)
112 return error;
113 xfs_buf_relse(bp);
114 }
115
116 nb_div = nb;
117 nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
118 if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS)
119 nb_div++;
120 else if (nb_mod)
121 nb = nb_div * mp->m_sb.sb_agblocks;
122
123 if (nb_div > XFS_MAX_AGNUMBER + 1) {
124 nb_div = XFS_MAX_AGNUMBER + 1;
125 nb = nb_div * mp->m_sb.sb_agblocks;
126 }
127 nagcount = nb_div;
128 delta = nb - mp->m_sb.sb_dblocks;
129 /*
130 * Reject filesystems with a single AG because they are not
131 * supported, and reject a shrink operation that would cause a
132 * filesystem to become unsupported.
133 */
134 if (delta < 0 && nagcount < 2)
135 return -EINVAL;
136
137 /* No work to do */
138 if (delta == 0)
139 return 0;
140
141 /* TODO: shrinking the entire AGs hasn't yet completed */
142 if (nagcount < oagcount)
143 return -EINVAL;
144
145 /* allocate the new per-ag structures */
146 error = xfs_initialize_perag(mp, oagcount, nagcount, nb, &nagimax);
147 if (error)
148 return error;
149
150 if (delta > 0)
151 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
152 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
153 &tp);
154 else
155 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, -delta, 0,
156 0, &tp);
157 if (error)
158 goto out_free_unused_perag;
159
160 last_pag = xfs_perag_get(mp, oagcount - 1);
161 if (delta > 0) {
162 error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
163 delta, last_pag, &lastag_extended);
164 } else {
165 xfs_warn_experimental(mp, XFS_EXPERIMENTAL_SHRINK);
166 error = xfs_ag_shrink_space(last_pag, &tp, -delta);
167 }
168 xfs_perag_put(last_pag);
169 if (error)
170 goto out_trans_cancel;
171
172 /*
173 * Update changed superblock fields transactionally. These are not
174 * seen by the rest of the world until the transaction commit applies
175 * them atomically to the superblock.
176 */
177 if (nagcount > oagcount)
178 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
179 if (delta)
180 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
181 if (id.nfree)
182 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
183
184 /*
185 * Sync sb counters now to reflect the updated values. This is
186 * particularly important for shrink because the write verifier
187 * will fail if sb_fdblocks is ever larger than sb_dblocks.
188 */
189 if (xfs_has_lazysbcount(mp))
190 xfs_log_sb(tp);
191
192 xfs_trans_set_sync(tp);
193 error = xfs_trans_commit(tp);
194 if (error)
195 return error;
196
197 /* New allocation groups fully initialized, so update mount struct */
198 if (nagimax)
199 mp->m_maxagi = nagimax;
200 xfs_set_low_space_thresholds(mp);
201 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
202
203 if (delta > 0) {
204 /*
205 * If we expanded the last AG, free the per-AG reservation
206 * so we can reinitialize it with the new size.
207 */
208 if (lastag_extended) {
209 struct xfs_perag *pag;
210
211 pag = xfs_perag_get(mp, id.agno);
212 xfs_ag_resv_free(pag);
213 xfs_perag_put(pag);
214 }
215 /*
216 * Reserve AG metadata blocks. ENOSPC here does not mean there
217 * was a growfs failure, just that there still isn't space for
218 * new user data after the grow has been run.
219 */
220 error = xfs_fs_reserve_ag_blocks(mp);
221 if (error == -ENOSPC)
222 error = 0;
223 }
224 return error;
225
226out_trans_cancel:
227 xfs_trans_cancel(tp);
228out_free_unused_perag:
229 if (nagcount > oagcount)
230 xfs_free_perag_range(mp, oagcount, nagcount);
231 return error;
232}
233
234static int
235xfs_growfs_log_private(
236 struct xfs_mount *mp, /* mount point for filesystem */
237 struct xfs_growfs_log *in) /* growfs log input struct */
238{
239 xfs_extlen_t nb;
240
241 nb = in->newblocks;
242 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
243 return -EINVAL;
244 if (nb == mp->m_sb.sb_logblocks &&
245 in->isint == (mp->m_sb.sb_logstart != 0))
246 return -EINVAL;
247 /*
248 * Moving the log is hard, need new interfaces to sync
249 * the log first, hold off all activity while moving it.
250 * Can have shorter or longer log in the same space,
251 * or transform internal to external log or vice versa.
252 */
253 return -ENOSYS;
254}
255
256static int
257xfs_growfs_imaxpct(
258 struct xfs_mount *mp,
259 __u32 imaxpct)
260{
261 struct xfs_trans *tp;
262 int dpct;
263 int error;
264
265 if (imaxpct > 100)
266 return -EINVAL;
267
268 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
269 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
270 if (error)
271 return error;
272
273 dpct = imaxpct - mp->m_sb.sb_imax_pct;
274 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
275 xfs_trans_set_sync(tp);
276 return xfs_trans_commit(tp);
277}
278
279/*
280 * protected versions of growfs function acquire and release locks on the mount
281 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
282 * XFS_IOC_FSGROWFSRT
283 */
284int
285xfs_growfs_data(
286 struct xfs_mount *mp,
287 struct xfs_growfs_data *in)
288{
289 int error = 0;
290
291 if (!capable(CAP_SYS_ADMIN))
292 return -EPERM;
293 if (!mutex_trylock(&mp->m_growlock))
294 return -EWOULDBLOCK;
295
296 /* update imaxpct separately to the physical grow of the filesystem */
297 if (in->imaxpct != mp->m_sb.sb_imax_pct) {
298 error = xfs_growfs_imaxpct(mp, in->imaxpct);
299 if (error)
300 goto out_error;
301 }
302
303 if (in->newblocks != mp->m_sb.sb_dblocks) {
304 error = xfs_growfs_data_private(mp, in);
305 if (error)
306 goto out_error;
307 }
308
309 /* Post growfs calculations needed to reflect new state in operations */
310 if (mp->m_sb.sb_imax_pct) {
311 uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
312 do_div(icount, 100);
313 M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
314 } else
315 M_IGEO(mp)->maxicount = 0;
316
317 /* Update secondary superblocks now the physical grow has completed */
318 error = xfs_update_secondary_sbs(mp);
319
320out_error:
321 /*
322 * Increment the generation unconditionally, the error could be from
323 * updating the secondary superblocks, in which case the new size
324 * is live already.
325 */
326 mp->m_generation++;
327 mutex_unlock(&mp->m_growlock);
328 return error;
329}
330
331int
332xfs_growfs_log(
333 xfs_mount_t *mp,
334 struct xfs_growfs_log *in)
335{
336 int error;
337
338 if (!capable(CAP_SYS_ADMIN))
339 return -EPERM;
340 if (!mutex_trylock(&mp->m_growlock))
341 return -EWOULDBLOCK;
342 error = xfs_growfs_log_private(mp, in);
343 mutex_unlock(&mp->m_growlock);
344 return error;
345}
346
347/*
348 * Reserve the requested number of blocks if available. Otherwise return
349 * as many as possible to satisfy the request. The actual number
350 * reserved are returned in outval.
351 */
352int
353xfs_reserve_blocks(
354 struct xfs_mount *mp,
355 uint64_t request)
356{
357 int64_t lcounter, delta;
358 int64_t fdblks_delta = 0;
359 int64_t free;
360 int error = 0;
361
362 /*
363 * With per-cpu counters, this becomes an interesting problem. we need
364 * to work out if we are freeing or allocation blocks first, then we can
365 * do the modification as necessary.
366 *
367 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
368 * hold out any changes while we work out what to do. This means that
369 * the amount of free space can change while we do this, so we need to
370 * retry if we end up trying to reserve more space than is available.
371 */
372 spin_lock(&mp->m_sb_lock);
373
374 /*
375 * If our previous reservation was larger than the current value,
376 * then move any unused blocks back to the free pool. Modify the resblks
377 * counters directly since we shouldn't have any problems unreserving
378 * space.
379 */
380 if (mp->m_resblks > request) {
381 lcounter = mp->m_resblks_avail - request;
382 if (lcounter > 0) { /* release unused blocks */
383 fdblks_delta = lcounter;
384 mp->m_resblks_avail -= lcounter;
385 }
386 mp->m_resblks = request;
387 if (fdblks_delta) {
388 spin_unlock(&mp->m_sb_lock);
389 xfs_add_fdblocks(mp, fdblks_delta);
390 spin_lock(&mp->m_sb_lock);
391 }
392
393 goto out;
394 }
395
396 /*
397 * If the request is larger than the current reservation, reserve the
398 * blocks before we update the reserve counters. Sample m_fdblocks and
399 * perform a partial reservation if the request exceeds free space.
400 *
401 * The code below estimates how many blocks it can request from
402 * fdblocks to stash in the reserve pool. This is a classic TOCTOU
403 * race since fdblocks updates are not always coordinated via
404 * m_sb_lock. Set the reserve size even if there's not enough free
405 * space to fill it because mod_fdblocks will refill an undersized
406 * reserve when it can.
407 */
408 free = percpu_counter_sum(&mp->m_fdblocks) -
409 xfs_fdblocks_unavailable(mp);
410 delta = request - mp->m_resblks;
411 mp->m_resblks = request;
412 if (delta > 0 && free > 0) {
413 /*
414 * We'll either succeed in getting space from the free block
415 * count or we'll get an ENOSPC. Don't set the reserved flag
416 * here - we don't want to reserve the extra reserve blocks
417 * from the reserve.
418 *
419 * The desired reserve size can change after we drop the lock.
420 * Use mod_fdblocks to put the space into the reserve or into
421 * fdblocks as appropriate.
422 */
423 fdblks_delta = min(free, delta);
424 spin_unlock(&mp->m_sb_lock);
425 error = xfs_dec_fdblocks(mp, fdblks_delta, 0);
426 if (!error)
427 xfs_add_fdblocks(mp, fdblks_delta);
428 spin_lock(&mp->m_sb_lock);
429 }
430out:
431 spin_unlock(&mp->m_sb_lock);
432 return error;
433}
434
435int
436xfs_fs_goingdown(
437 xfs_mount_t *mp,
438 uint32_t inflags)
439{
440 switch (inflags) {
441 case XFS_FSOP_GOING_FLAGS_DEFAULT: {
442 if (!bdev_freeze(mp->m_super->s_bdev)) {
443 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
444 bdev_thaw(mp->m_super->s_bdev);
445 }
446 break;
447 }
448 case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
449 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
450 break;
451 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
452 xfs_force_shutdown(mp,
453 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
454 break;
455 default:
456 return -EINVAL;
457 }
458
459 return 0;
460}
461
462/*
463 * Force a shutdown of the filesystem instantly while keeping the filesystem
464 * consistent. We don't do an unmount here; just shutdown the shop, make sure
465 * that absolutely nothing persistent happens to this filesystem after this
466 * point.
467 *
468 * The shutdown state change is atomic, resulting in the first and only the
469 * first shutdown call processing the shutdown. This means we only shutdown the
470 * log once as it requires, and we don't spam the logs when multiple concurrent
471 * shutdowns race to set the shutdown flags.
472 */
473void
474xfs_do_force_shutdown(
475 struct xfs_mount *mp,
476 uint32_t flags,
477 char *fname,
478 int lnnum)
479{
480 int tag;
481 const char *why;
482
483
484 if (xfs_set_shutdown(mp)) {
485 xlog_shutdown_wait(mp->m_log);
486 return;
487 }
488 if (mp->m_sb_bp)
489 mp->m_sb_bp->b_flags |= XBF_DONE;
490
491 if (flags & SHUTDOWN_FORCE_UMOUNT)
492 xfs_alert(mp, "User initiated shutdown received.");
493
494 if (xlog_force_shutdown(mp->m_log, flags)) {
495 tag = XFS_PTAG_SHUTDOWN_LOGERROR;
496 why = "Log I/O Error";
497 } else if (flags & SHUTDOWN_CORRUPT_INCORE) {
498 tag = XFS_PTAG_SHUTDOWN_CORRUPT;
499 why = "Corruption of in-memory data";
500 } else if (flags & SHUTDOWN_CORRUPT_ONDISK) {
501 tag = XFS_PTAG_SHUTDOWN_CORRUPT;
502 why = "Corruption of on-disk metadata";
503 } else if (flags & SHUTDOWN_DEVICE_REMOVED) {
504 tag = XFS_PTAG_SHUTDOWN_IOERROR;
505 why = "Block device removal";
506 } else {
507 tag = XFS_PTAG_SHUTDOWN_IOERROR;
508 why = "Metadata I/O Error";
509 }
510
511 trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum);
512
513 xfs_alert_tag(mp, tag,
514"%s (0x%x) detected at %pS (%s:%d). Shutting down filesystem.",
515 why, flags, __return_address, fname, lnnum);
516 xfs_alert(mp,
517 "Please unmount the filesystem and rectify the problem(s)");
518 if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
519 xfs_stack_trace();
520}
521
522/*
523 * Reserve free space for per-AG metadata.
524 */
525int
526xfs_fs_reserve_ag_blocks(
527 struct xfs_mount *mp)
528{
529 struct xfs_perag *pag = NULL;
530 int error = 0;
531 int err2;
532
533 mp->m_finobt_nores = false;
534 while ((pag = xfs_perag_next(mp, pag))) {
535 err2 = xfs_ag_resv_init(pag, NULL);
536 if (err2 && !error)
537 error = err2;
538 }
539
540 if (error && error != -ENOSPC) {
541 xfs_warn(mp,
542 "Error %d reserving per-AG metadata reserve pool.", error);
543 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
544 }
545
546 return error;
547}
548
549/*
550 * Free space reserved for per-AG metadata.
551 */
552void
553xfs_fs_unreserve_ag_blocks(
554 struct xfs_mount *mp)
555{
556 struct xfs_perag *pag = NULL;
557
558 while ((pag = xfs_perag_next(mp, pag)))
559 xfs_ag_resv_free(pag);
560}