Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/resize.c
4 *
5 * Support for resizing an ext4 filesystem while it is mounted.
6 *
7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8 *
9 * This could probably be made into a module, because it is not often in use.
10 */
11
12
13#define EXT4FS_DEBUG
14
15#include <linux/errno.h>
16#include <linux/slab.h>
17
18#include "ext4_jbd2.h"
19
20struct ext4_rcu_ptr {
21 struct rcu_head rcu;
22 void *ptr;
23};
24
25static void ext4_rcu_ptr_callback(struct rcu_head *head)
26{
27 struct ext4_rcu_ptr *ptr;
28
29 ptr = container_of(head, struct ext4_rcu_ptr, rcu);
30 kvfree(ptr->ptr);
31 kfree(ptr);
32}
33
34void ext4_kvfree_array_rcu(void *to_free)
35{
36 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
37
38 if (ptr) {
39 ptr->ptr = to_free;
40 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
41 return;
42 }
43 synchronize_rcu();
44 kvfree(to_free);
45}
46
47int ext4_resize_begin(struct super_block *sb)
48{
49 struct ext4_sb_info *sbi = EXT4_SB(sb);
50 int ret = 0;
51
52 if (!capable(CAP_SYS_RESOURCE))
53 return -EPERM;
54
55 /*
56 * If we are not using the primary superblock/GDT copy don't resize,
57 * because the user tools have no way of handling this. Probably a
58 * bad time to do it anyways.
59 */
60 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
61 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
62 ext4_warning(sb, "won't resize using backup superblock at %llu",
63 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
64 return -EPERM;
65 }
66
67 /*
68 * We are not allowed to do online-resizing on a filesystem mounted
69 * with error, because it can destroy the filesystem easily.
70 */
71 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
72 ext4_warning(sb, "There are errors in the filesystem, "
73 "so online resizing is not allowed");
74 return -EPERM;
75 }
76
77 if (ext4_has_feature_sparse_super2(sb)) {
78 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
79 return -EOPNOTSUPP;
80 }
81
82 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
83 &EXT4_SB(sb)->s_ext4_flags))
84 ret = -EBUSY;
85
86 return ret;
87}
88
89void ext4_resize_end(struct super_block *sb)
90{
91 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
92 smp_mb__after_atomic();
93}
94
95static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
96 ext4_group_t group) {
97 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
98 EXT4_DESC_PER_BLOCK_BITS(sb);
99}
100
101static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
102 ext4_group_t group) {
103 group = ext4_meta_bg_first_group(sb, group);
104 return ext4_group_first_block_no(sb, group);
105}
106
107static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
108 ext4_group_t group) {
109 ext4_grpblk_t overhead;
110 overhead = ext4_bg_num_gdb(sb, group);
111 if (ext4_bg_has_super(sb, group))
112 overhead += 1 +
113 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
114 return overhead;
115}
116
117#define outside(b, first, last) ((b) < (first) || (b) >= (last))
118#define inside(b, first, last) ((b) >= (first) && (b) < (last))
119
120static int verify_group_input(struct super_block *sb,
121 struct ext4_new_group_data *input)
122{
123 struct ext4_sb_info *sbi = EXT4_SB(sb);
124 struct ext4_super_block *es = sbi->s_es;
125 ext4_fsblk_t start = ext4_blocks_count(es);
126 ext4_fsblk_t end = start + input->blocks_count;
127 ext4_group_t group = input->group;
128 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
129 unsigned overhead;
130 ext4_fsblk_t metaend;
131 struct buffer_head *bh = NULL;
132 ext4_grpblk_t free_blocks_count, offset;
133 int err = -EINVAL;
134
135 if (group != sbi->s_groups_count) {
136 ext4_warning(sb, "Cannot add at group %u (only %u groups)",
137 input->group, sbi->s_groups_count);
138 return -EINVAL;
139 }
140
141 overhead = ext4_group_overhead_blocks(sb, group);
142 metaend = start + overhead;
143 input->free_clusters_count = free_blocks_count =
144 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
145
146 if (test_opt(sb, DEBUG))
147 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
148 "(%d free, %u reserved)\n",
149 ext4_bg_has_super(sb, input->group) ? "normal" :
150 "no-super", input->group, input->blocks_count,
151 free_blocks_count, input->reserved_blocks);
152
153 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
154 if (offset != 0)
155 ext4_warning(sb, "Last group not full");
156 else if (input->reserved_blocks > input->blocks_count / 5)
157 ext4_warning(sb, "Reserved blocks too high (%u)",
158 input->reserved_blocks);
159 else if (free_blocks_count < 0)
160 ext4_warning(sb, "Bad blocks count %u",
161 input->blocks_count);
162 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
163 err = PTR_ERR(bh);
164 bh = NULL;
165 ext4_warning(sb, "Cannot read last block (%llu)",
166 end - 1);
167 } else if (outside(input->block_bitmap, start, end))
168 ext4_warning(sb, "Block bitmap not in group (block %llu)",
169 (unsigned long long)input->block_bitmap);
170 else if (outside(input->inode_bitmap, start, end))
171 ext4_warning(sb, "Inode bitmap not in group (block %llu)",
172 (unsigned long long)input->inode_bitmap);
173 else if (outside(input->inode_table, start, end) ||
174 outside(itend - 1, start, end))
175 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
176 (unsigned long long)input->inode_table, itend - 1);
177 else if (input->inode_bitmap == input->block_bitmap)
178 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
179 (unsigned long long)input->block_bitmap);
180 else if (inside(input->block_bitmap, input->inode_table, itend))
181 ext4_warning(sb, "Block bitmap (%llu) in inode table "
182 "(%llu-%llu)",
183 (unsigned long long)input->block_bitmap,
184 (unsigned long long)input->inode_table, itend - 1);
185 else if (inside(input->inode_bitmap, input->inode_table, itend))
186 ext4_warning(sb, "Inode bitmap (%llu) in inode table "
187 "(%llu-%llu)",
188 (unsigned long long)input->inode_bitmap,
189 (unsigned long long)input->inode_table, itend - 1);
190 else if (inside(input->block_bitmap, start, metaend))
191 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
192 (unsigned long long)input->block_bitmap,
193 start, metaend - 1);
194 else if (inside(input->inode_bitmap, start, metaend))
195 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
196 (unsigned long long)input->inode_bitmap,
197 start, metaend - 1);
198 else if (inside(input->inode_table, start, metaend) ||
199 inside(itend - 1, start, metaend))
200 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
201 "(%llu-%llu)",
202 (unsigned long long)input->inode_table,
203 itend - 1, start, metaend - 1);
204 else
205 err = 0;
206 brelse(bh);
207
208 return err;
209}
210
211/*
212 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
213 * group each time.
214 */
215struct ext4_new_flex_group_data {
216 struct ext4_new_group_data *groups; /* new_group_data for groups
217 in the flex group */
218 __u16 *bg_flags; /* block group flags of groups
219 in @groups */
220 ext4_group_t count; /* number of groups in @groups
221 */
222};
223
224/*
225 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
226 * @flexbg_size.
227 *
228 * Returns NULL on failure otherwise address of the allocated structure.
229 */
230static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
231{
232 struct ext4_new_flex_group_data *flex_gd;
233
234 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
235 if (flex_gd == NULL)
236 goto out3;
237
238 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
239 goto out2;
240 flex_gd->count = flexbg_size;
241
242 flex_gd->groups = kmalloc_array(flexbg_size,
243 sizeof(struct ext4_new_group_data),
244 GFP_NOFS);
245 if (flex_gd->groups == NULL)
246 goto out2;
247
248 flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
249 GFP_NOFS);
250 if (flex_gd->bg_flags == NULL)
251 goto out1;
252
253 return flex_gd;
254
255out1:
256 kfree(flex_gd->groups);
257out2:
258 kfree(flex_gd);
259out3:
260 return NULL;
261}
262
263static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
264{
265 kfree(flex_gd->bg_flags);
266 kfree(flex_gd->groups);
267 kfree(flex_gd);
268}
269
270/*
271 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
272 * and inode tables for a flex group.
273 *
274 * This function is used by 64bit-resize. Note that this function allocates
275 * group tables from the 1st group of groups contained by @flexgd, which may
276 * be a partial of a flex group.
277 *
278 * @sb: super block of fs to which the groups belongs
279 *
280 * Returns 0 on a successful allocation of the metadata blocks in the
281 * block group.
282 */
283static int ext4_alloc_group_tables(struct super_block *sb,
284 struct ext4_new_flex_group_data *flex_gd,
285 int flexbg_size)
286{
287 struct ext4_new_group_data *group_data = flex_gd->groups;
288 ext4_fsblk_t start_blk;
289 ext4_fsblk_t last_blk;
290 ext4_group_t src_group;
291 ext4_group_t bb_index = 0;
292 ext4_group_t ib_index = 0;
293 ext4_group_t it_index = 0;
294 ext4_group_t group;
295 ext4_group_t last_group;
296 unsigned overhead;
297 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
298 int i;
299
300 BUG_ON(flex_gd->count == 0 || group_data == NULL);
301
302 src_group = group_data[0].group;
303 last_group = src_group + flex_gd->count - 1;
304
305 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
306 (last_group & ~(flexbg_size - 1))));
307next_group:
308 group = group_data[0].group;
309 if (src_group >= group_data[0].group + flex_gd->count)
310 return -ENOSPC;
311 start_blk = ext4_group_first_block_no(sb, src_group);
312 last_blk = start_blk + group_data[src_group - group].blocks_count;
313
314 overhead = ext4_group_overhead_blocks(sb, src_group);
315
316 start_blk += overhead;
317
318 /* We collect contiguous blocks as much as possible. */
319 src_group++;
320 for (; src_group <= last_group; src_group++) {
321 overhead = ext4_group_overhead_blocks(sb, src_group);
322 if (overhead == 0)
323 last_blk += group_data[src_group - group].blocks_count;
324 else
325 break;
326 }
327
328 /* Allocate block bitmaps */
329 for (; bb_index < flex_gd->count; bb_index++) {
330 if (start_blk >= last_blk)
331 goto next_group;
332 group_data[bb_index].block_bitmap = start_blk++;
333 group = ext4_get_group_number(sb, start_blk - 1);
334 group -= group_data[0].group;
335 group_data[group].mdata_blocks++;
336 flex_gd->bg_flags[group] &= uninit_mask;
337 }
338
339 /* Allocate inode bitmaps */
340 for (; ib_index < flex_gd->count; ib_index++) {
341 if (start_blk >= last_blk)
342 goto next_group;
343 group_data[ib_index].inode_bitmap = start_blk++;
344 group = ext4_get_group_number(sb, start_blk - 1);
345 group -= group_data[0].group;
346 group_data[group].mdata_blocks++;
347 flex_gd->bg_flags[group] &= uninit_mask;
348 }
349
350 /* Allocate inode tables */
351 for (; it_index < flex_gd->count; it_index++) {
352 unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
353 ext4_fsblk_t next_group_start;
354
355 if (start_blk + itb > last_blk)
356 goto next_group;
357 group_data[it_index].inode_table = start_blk;
358 group = ext4_get_group_number(sb, start_blk);
359 next_group_start = ext4_group_first_block_no(sb, group + 1);
360 group -= group_data[0].group;
361
362 if (start_blk + itb > next_group_start) {
363 flex_gd->bg_flags[group + 1] &= uninit_mask;
364 overhead = start_blk + itb - next_group_start;
365 group_data[group + 1].mdata_blocks += overhead;
366 itb -= overhead;
367 }
368
369 group_data[group].mdata_blocks += itb;
370 flex_gd->bg_flags[group] &= uninit_mask;
371 start_blk += EXT4_SB(sb)->s_itb_per_group;
372 }
373
374 /* Update free clusters count to exclude metadata blocks */
375 for (i = 0; i < flex_gd->count; i++) {
376 group_data[i].free_clusters_count -=
377 EXT4_NUM_B2C(EXT4_SB(sb),
378 group_data[i].mdata_blocks);
379 }
380
381 if (test_opt(sb, DEBUG)) {
382 int i;
383 group = group_data[0].group;
384
385 printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
386 "%d groups, flexbg size is %d:\n", flex_gd->count,
387 flexbg_size);
388
389 for (i = 0; i < flex_gd->count; i++) {
390 ext4_debug(
391 "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
392 ext4_bg_has_super(sb, group + i) ? "normal" :
393 "no-super", group + i,
394 group_data[i].blocks_count,
395 group_data[i].free_clusters_count,
396 group_data[i].mdata_blocks);
397 }
398 }
399 return 0;
400}
401
402static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
403 ext4_fsblk_t blk)
404{
405 struct buffer_head *bh;
406 int err;
407
408 bh = sb_getblk(sb, blk);
409 if (unlikely(!bh))
410 return ERR_PTR(-ENOMEM);
411 BUFFER_TRACE(bh, "get_write_access");
412 if ((err = ext4_journal_get_write_access(handle, bh))) {
413 brelse(bh);
414 bh = ERR_PTR(err);
415 } else {
416 memset(bh->b_data, 0, sb->s_blocksize);
417 set_buffer_uptodate(bh);
418 }
419
420 return bh;
421}
422
423static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
424{
425 return ext4_journal_ensure_credits_fn(handle, credits,
426 EXT4_MAX_TRANS_DATA, 0, 0);
427}
428
429/*
430 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
431 *
432 * Helper function for ext4_setup_new_group_blocks() which set .
433 *
434 * @sb: super block
435 * @handle: journal handle
436 * @flex_gd: flex group data
437 */
438static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
439 struct ext4_new_flex_group_data *flex_gd,
440 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
441{
442 struct ext4_sb_info *sbi = EXT4_SB(sb);
443 ext4_group_t count = last_cluster - first_cluster + 1;
444 ext4_group_t count2;
445
446 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
447 last_cluster);
448 for (count2 = count; count > 0;
449 count -= count2, first_cluster += count2) {
450 ext4_fsblk_t start;
451 struct buffer_head *bh;
452 ext4_group_t group;
453 int err;
454
455 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
456 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
457 group -= flex_gd->groups[0].group;
458
459 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
460 if (count2 > count)
461 count2 = count;
462
463 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
464 BUG_ON(flex_gd->count > 1);
465 continue;
466 }
467
468 err = ext4_resize_ensure_credits_batch(handle, 1);
469 if (err < 0)
470 return err;
471
472 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
473 if (unlikely(!bh))
474 return -ENOMEM;
475
476 BUFFER_TRACE(bh, "get_write_access");
477 err = ext4_journal_get_write_access(handle, bh);
478 if (err) {
479 brelse(bh);
480 return err;
481 }
482 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
483 first_cluster, first_cluster - start, count2);
484 ext4_set_bits(bh->b_data, first_cluster - start, count2);
485
486 err = ext4_handle_dirty_metadata(handle, NULL, bh);
487 brelse(bh);
488 if (unlikely(err))
489 return err;
490 }
491
492 return 0;
493}
494
495/*
496 * Set up the block and inode bitmaps, and the inode table for the new groups.
497 * This doesn't need to be part of the main transaction, since we are only
498 * changing blocks outside the actual filesystem. We still do journaling to
499 * ensure the recovery is correct in case of a failure just after resize.
500 * If any part of this fails, we simply abort the resize.
501 *
502 * setup_new_flex_group_blocks handles a flex group as follow:
503 * 1. copy super block and GDT, and initialize group tables if necessary.
504 * In this step, we only set bits in blocks bitmaps for blocks taken by
505 * super block and GDT.
506 * 2. allocate group tables in block bitmaps, that is, set bits in block
507 * bitmap for blocks taken by group tables.
508 */
509static int setup_new_flex_group_blocks(struct super_block *sb,
510 struct ext4_new_flex_group_data *flex_gd)
511{
512 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
513 ext4_fsblk_t start;
514 ext4_fsblk_t block;
515 struct ext4_sb_info *sbi = EXT4_SB(sb);
516 struct ext4_super_block *es = sbi->s_es;
517 struct ext4_new_group_data *group_data = flex_gd->groups;
518 __u16 *bg_flags = flex_gd->bg_flags;
519 handle_t *handle;
520 ext4_group_t group, count;
521 struct buffer_head *bh = NULL;
522 int reserved_gdb, i, j, err = 0, err2;
523 int meta_bg;
524
525 BUG_ON(!flex_gd->count || !group_data ||
526 group_data[0].group != sbi->s_groups_count);
527
528 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
529 meta_bg = ext4_has_feature_meta_bg(sb);
530
531 /* This transaction may be extended/restarted along the way */
532 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
533 if (IS_ERR(handle))
534 return PTR_ERR(handle);
535
536 group = group_data[0].group;
537 for (i = 0; i < flex_gd->count; i++, group++) {
538 unsigned long gdblocks;
539 ext4_grpblk_t overhead;
540
541 gdblocks = ext4_bg_num_gdb(sb, group);
542 start = ext4_group_first_block_no(sb, group);
543
544 if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
545 goto handle_itb;
546
547 if (meta_bg == 1) {
548 ext4_group_t first_group;
549 first_group = ext4_meta_bg_first_group(sb, group);
550 if (first_group != group + 1 &&
551 first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
552 goto handle_itb;
553 }
554
555 block = start + ext4_bg_has_super(sb, group);
556 /* Copy all of the GDT blocks into the backup in this group */
557 for (j = 0; j < gdblocks; j++, block++) {
558 struct buffer_head *gdb;
559
560 ext4_debug("update backup group %#04llx\n", block);
561 err = ext4_resize_ensure_credits_batch(handle, 1);
562 if (err < 0)
563 goto out;
564
565 gdb = sb_getblk(sb, block);
566 if (unlikely(!gdb)) {
567 err = -ENOMEM;
568 goto out;
569 }
570
571 BUFFER_TRACE(gdb, "get_write_access");
572 err = ext4_journal_get_write_access(handle, gdb);
573 if (err) {
574 brelse(gdb);
575 goto out;
576 }
577 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
578 s_group_desc, j)->b_data, gdb->b_size);
579 set_buffer_uptodate(gdb);
580
581 err = ext4_handle_dirty_metadata(handle, NULL, gdb);
582 if (unlikely(err)) {
583 brelse(gdb);
584 goto out;
585 }
586 brelse(gdb);
587 }
588
589 /* Zero out all of the reserved backup group descriptor
590 * table blocks
591 */
592 if (ext4_bg_has_super(sb, group)) {
593 err = sb_issue_zeroout(sb, gdblocks + start + 1,
594 reserved_gdb, GFP_NOFS);
595 if (err)
596 goto out;
597 }
598
599handle_itb:
600 /* Initialize group tables of the grop @group */
601 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
602 goto handle_bb;
603
604 /* Zero out all of the inode table blocks */
605 block = group_data[i].inode_table;
606 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
607 block, sbi->s_itb_per_group);
608 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
609 GFP_NOFS);
610 if (err)
611 goto out;
612
613handle_bb:
614 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
615 goto handle_ib;
616
617 /* Initialize block bitmap of the @group */
618 block = group_data[i].block_bitmap;
619 err = ext4_resize_ensure_credits_batch(handle, 1);
620 if (err < 0)
621 goto out;
622
623 bh = bclean(handle, sb, block);
624 if (IS_ERR(bh)) {
625 err = PTR_ERR(bh);
626 goto out;
627 }
628 overhead = ext4_group_overhead_blocks(sb, group);
629 if (overhead != 0) {
630 ext4_debug("mark backup superblock %#04llx (+0)\n",
631 start);
632 ext4_set_bits(bh->b_data, 0,
633 EXT4_NUM_B2C(sbi, overhead));
634 }
635 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
636 sb->s_blocksize * 8, bh->b_data);
637 err = ext4_handle_dirty_metadata(handle, NULL, bh);
638 brelse(bh);
639 if (err)
640 goto out;
641
642handle_ib:
643 if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
644 continue;
645
646 /* Initialize inode bitmap of the @group */
647 block = group_data[i].inode_bitmap;
648 err = ext4_resize_ensure_credits_batch(handle, 1);
649 if (err < 0)
650 goto out;
651 /* Mark unused entries in inode bitmap used */
652 bh = bclean(handle, sb, block);
653 if (IS_ERR(bh)) {
654 err = PTR_ERR(bh);
655 goto out;
656 }
657
658 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
659 sb->s_blocksize * 8, bh->b_data);
660 err = ext4_handle_dirty_metadata(handle, NULL, bh);
661 brelse(bh);
662 if (err)
663 goto out;
664 }
665
666 /* Mark group tables in block bitmap */
667 for (j = 0; j < GROUP_TABLE_COUNT; j++) {
668 count = group_table_count[j];
669 start = (&group_data[0].block_bitmap)[j];
670 block = start;
671 for (i = 1; i < flex_gd->count; i++) {
672 block += group_table_count[j];
673 if (block == (&group_data[i].block_bitmap)[j]) {
674 count += group_table_count[j];
675 continue;
676 }
677 err = set_flexbg_block_bitmap(sb, handle,
678 flex_gd,
679 EXT4_B2C(sbi, start),
680 EXT4_B2C(sbi,
681 start + count
682 - 1));
683 if (err)
684 goto out;
685 count = group_table_count[j];
686 start = (&group_data[i].block_bitmap)[j];
687 block = start;
688 }
689
690 if (count) {
691 err = set_flexbg_block_bitmap(sb, handle,
692 flex_gd,
693 EXT4_B2C(sbi, start),
694 EXT4_B2C(sbi,
695 start + count
696 - 1));
697 if (err)
698 goto out;
699 }
700 }
701
702out:
703 err2 = ext4_journal_stop(handle);
704 if (err2 && !err)
705 err = err2;
706
707 return err;
708}
709
710/*
711 * Iterate through the groups which hold BACKUP superblock/GDT copies in an
712 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
713 * calling this for the first time. In a sparse filesystem it will be the
714 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
715 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
716 */
717static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
718 unsigned *five, unsigned *seven)
719{
720 unsigned *min = three;
721 int mult = 3;
722 unsigned ret;
723
724 if (!ext4_has_feature_sparse_super(sb)) {
725 ret = *min;
726 *min += 1;
727 return ret;
728 }
729
730 if (*five < *min) {
731 min = five;
732 mult = 5;
733 }
734 if (*seven < *min) {
735 min = seven;
736 mult = 7;
737 }
738
739 ret = *min;
740 *min *= mult;
741
742 return ret;
743}
744
745/*
746 * Check that all of the backup GDT blocks are held in the primary GDT block.
747 * It is assumed that they are stored in group order. Returns the number of
748 * groups in current filesystem that have BACKUPS, or -ve error code.
749 */
750static int verify_reserved_gdb(struct super_block *sb,
751 ext4_group_t end,
752 struct buffer_head *primary)
753{
754 const ext4_fsblk_t blk = primary->b_blocknr;
755 unsigned three = 1;
756 unsigned five = 5;
757 unsigned seven = 7;
758 unsigned grp;
759 __le32 *p = (__le32 *)primary->b_data;
760 int gdbackups = 0;
761
762 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
763 if (le32_to_cpu(*p++) !=
764 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
765 ext4_warning(sb, "reserved GDT %llu"
766 " missing grp %d (%llu)",
767 blk, grp,
768 grp *
769 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
770 blk);
771 return -EINVAL;
772 }
773 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
774 return -EFBIG;
775 }
776
777 return gdbackups;
778}
779
780/*
781 * Called when we need to bring a reserved group descriptor table block into
782 * use from the resize inode. The primary copy of the new GDT block currently
783 * is an indirect block (under the double indirect block in the resize inode).
784 * The new backup GDT blocks will be stored as leaf blocks in this indirect
785 * block, in group order. Even though we know all the block numbers we need,
786 * we check to ensure that the resize inode has actually reserved these blocks.
787 *
788 * Don't need to update the block bitmaps because the blocks are still in use.
789 *
790 * We get all of the error cases out of the way, so that we are sure to not
791 * fail once we start modifying the data on disk, because JBD has no rollback.
792 */
793static int add_new_gdb(handle_t *handle, struct inode *inode,
794 ext4_group_t group)
795{
796 struct super_block *sb = inode->i_sb;
797 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
798 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
799 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
800 struct buffer_head **o_group_desc, **n_group_desc = NULL;
801 struct buffer_head *dind = NULL;
802 struct buffer_head *gdb_bh = NULL;
803 int gdbackups;
804 struct ext4_iloc iloc = { .bh = NULL };
805 __le32 *data;
806 int err;
807
808 if (test_opt(sb, DEBUG))
809 printk(KERN_DEBUG
810 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
811 gdb_num);
812
813 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
814 if (IS_ERR(gdb_bh))
815 return PTR_ERR(gdb_bh);
816
817 gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
818 if (gdbackups < 0) {
819 err = gdbackups;
820 goto errout;
821 }
822
823 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
824 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
825 if (IS_ERR(dind)) {
826 err = PTR_ERR(dind);
827 dind = NULL;
828 goto errout;
829 }
830
831 data = (__le32 *)dind->b_data;
832 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
833 ext4_warning(sb, "new group %u GDT block %llu not reserved",
834 group, gdblock);
835 err = -EINVAL;
836 goto errout;
837 }
838
839 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
840 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
841 if (unlikely(err))
842 goto errout;
843
844 BUFFER_TRACE(gdb_bh, "get_write_access");
845 err = ext4_journal_get_write_access(handle, gdb_bh);
846 if (unlikely(err))
847 goto errout;
848
849 BUFFER_TRACE(dind, "get_write_access");
850 err = ext4_journal_get_write_access(handle, dind);
851 if (unlikely(err)) {
852 ext4_std_error(sb, err);
853 goto errout;
854 }
855
856 /* ext4_reserve_inode_write() gets a reference on the iloc */
857 err = ext4_reserve_inode_write(handle, inode, &iloc);
858 if (unlikely(err))
859 goto errout;
860
861 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
862 GFP_KERNEL);
863 if (!n_group_desc) {
864 err = -ENOMEM;
865 ext4_warning(sb, "not enough memory for %lu groups",
866 gdb_num + 1);
867 goto errout;
868 }
869
870 /*
871 * Finally, we have all of the possible failures behind us...
872 *
873 * Remove new GDT block from inode double-indirect block and clear out
874 * the new GDT block for use (which also "frees" the backup GDT blocks
875 * from the reserved inode). We don't need to change the bitmaps for
876 * these blocks, because they are marked as in-use from being in the
877 * reserved inode, and will become GDT blocks (primary and backup).
878 */
879 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
880 err = ext4_handle_dirty_metadata(handle, NULL, dind);
881 if (unlikely(err)) {
882 ext4_std_error(sb, err);
883 goto errout;
884 }
885 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
886 (9 - EXT4_SB(sb)->s_cluster_bits);
887 ext4_mark_iloc_dirty(handle, inode, &iloc);
888 memset(gdb_bh->b_data, 0, sb->s_blocksize);
889 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
890 if (unlikely(err)) {
891 ext4_std_error(sb, err);
892 iloc.bh = NULL;
893 goto errout;
894 }
895 brelse(dind);
896
897 rcu_read_lock();
898 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
899 memcpy(n_group_desc, o_group_desc,
900 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
901 rcu_read_unlock();
902 n_group_desc[gdb_num] = gdb_bh;
903 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
904 EXT4_SB(sb)->s_gdb_count++;
905 ext4_kvfree_array_rcu(o_group_desc);
906
907 lock_buffer(EXT4_SB(sb)->s_sbh);
908 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
909 ext4_superblock_csum_set(sb);
910 unlock_buffer(EXT4_SB(sb)->s_sbh);
911 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
912 if (err)
913 ext4_std_error(sb, err);
914 return err;
915errout:
916 kvfree(n_group_desc);
917 brelse(iloc.bh);
918 brelse(dind);
919 brelse(gdb_bh);
920
921 ext4_debug("leaving with error %d\n", err);
922 return err;
923}
924
925/*
926 * add_new_gdb_meta_bg is the sister of add_new_gdb.
927 */
928static int add_new_gdb_meta_bg(struct super_block *sb,
929 handle_t *handle, ext4_group_t group) {
930 ext4_fsblk_t gdblock;
931 struct buffer_head *gdb_bh;
932 struct buffer_head **o_group_desc, **n_group_desc;
933 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
934 int err;
935
936 gdblock = ext4_meta_bg_first_block_no(sb, group) +
937 ext4_bg_has_super(sb, group);
938 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
939 if (IS_ERR(gdb_bh))
940 return PTR_ERR(gdb_bh);
941 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
942 GFP_KERNEL);
943 if (!n_group_desc) {
944 brelse(gdb_bh);
945 err = -ENOMEM;
946 ext4_warning(sb, "not enough memory for %lu groups",
947 gdb_num + 1);
948 return err;
949 }
950
951 rcu_read_lock();
952 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
953 memcpy(n_group_desc, o_group_desc,
954 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
955 rcu_read_unlock();
956 n_group_desc[gdb_num] = gdb_bh;
957
958 BUFFER_TRACE(gdb_bh, "get_write_access");
959 err = ext4_journal_get_write_access(handle, gdb_bh);
960 if (err) {
961 kvfree(n_group_desc);
962 brelse(gdb_bh);
963 return err;
964 }
965
966 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
967 EXT4_SB(sb)->s_gdb_count++;
968 ext4_kvfree_array_rcu(o_group_desc);
969 return err;
970}
971
972/*
973 * Called when we are adding a new group which has a backup copy of each of
974 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
975 * We need to add these reserved backup GDT blocks to the resize inode, so
976 * that they are kept for future resizing and not allocated to files.
977 *
978 * Each reserved backup GDT block will go into a different indirect block.
979 * The indirect blocks are actually the primary reserved GDT blocks,
980 * so we know in advance what their block numbers are. We only get the
981 * double-indirect block to verify it is pointing to the primary reserved
982 * GDT blocks so we don't overwrite a data block by accident. The reserved
983 * backup GDT blocks are stored in their reserved primary GDT block.
984 */
985static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
986 ext4_group_t group)
987{
988 struct super_block *sb = inode->i_sb;
989 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
990 int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
991 struct buffer_head **primary;
992 struct buffer_head *dind;
993 struct ext4_iloc iloc;
994 ext4_fsblk_t blk;
995 __le32 *data, *end;
996 int gdbackups = 0;
997 int res, i;
998 int err;
999
1000 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
1001 if (!primary)
1002 return -ENOMEM;
1003
1004 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1005 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1006 if (IS_ERR(dind)) {
1007 err = PTR_ERR(dind);
1008 dind = NULL;
1009 goto exit_free;
1010 }
1011
1012 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1013 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1014 EXT4_ADDR_PER_BLOCK(sb));
1015 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1016
1017 /* Get each reserved primary GDT block and verify it holds backups */
1018 for (res = 0; res < reserved_gdb; res++, blk++) {
1019 if (le32_to_cpu(*data) != blk) {
1020 ext4_warning(sb, "reserved block %llu"
1021 " not at offset %ld",
1022 blk,
1023 (long)(data - (__le32 *)dind->b_data));
1024 err = -EINVAL;
1025 goto exit_bh;
1026 }
1027 primary[res] = ext4_sb_bread(sb, blk, 0);
1028 if (IS_ERR(primary[res])) {
1029 err = PTR_ERR(primary[res]);
1030 primary[res] = NULL;
1031 goto exit_bh;
1032 }
1033 gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1034 if (gdbackups < 0) {
1035 brelse(primary[res]);
1036 err = gdbackups;
1037 goto exit_bh;
1038 }
1039 if (++data >= end)
1040 data = (__le32 *)dind->b_data;
1041 }
1042
1043 for (i = 0; i < reserved_gdb; i++) {
1044 BUFFER_TRACE(primary[i], "get_write_access");
1045 if ((err = ext4_journal_get_write_access(handle, primary[i])))
1046 goto exit_bh;
1047 }
1048
1049 if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1050 goto exit_bh;
1051
1052 /*
1053 * Finally we can add each of the reserved backup GDT blocks from
1054 * the new group to its reserved primary GDT block.
1055 */
1056 blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1057 for (i = 0; i < reserved_gdb; i++) {
1058 int err2;
1059 data = (__le32 *)primary[i]->b_data;
1060 /* printk("reserving backup %lu[%u] = %lu\n",
1061 primary[i]->b_blocknr, gdbackups,
1062 blk + primary[i]->b_blocknr); */
1063 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1064 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1065 if (!err)
1066 err = err2;
1067 }
1068
1069 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1070 ext4_mark_iloc_dirty(handle, inode, &iloc);
1071
1072exit_bh:
1073 while (--res >= 0)
1074 brelse(primary[res]);
1075 brelse(dind);
1076
1077exit_free:
1078 kfree(primary);
1079
1080 return err;
1081}
1082
1083/*
1084 * Update the backup copies of the ext4 metadata. These don't need to be part
1085 * of the main resize transaction, because e2fsck will re-write them if there
1086 * is a problem (basically only OOM will cause a problem). However, we
1087 * _should_ update the backups if possible, in case the primary gets trashed
1088 * for some reason and we need to run e2fsck from a backup superblock. The
1089 * important part is that the new block and inode counts are in the backup
1090 * superblocks, and the location of the new group metadata in the GDT backups.
1091 *
1092 * We do not need take the s_resize_lock for this, because these
1093 * blocks are not otherwise touched by the filesystem code when it is
1094 * mounted. We don't need to worry about last changing from
1095 * sbi->s_groups_count, because the worst that can happen is that we
1096 * do not copy the full number of backups at this time. The resize
1097 * which changed s_groups_count will backup again.
1098 */
1099static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1100 int size, int meta_bg)
1101{
1102 struct ext4_sb_info *sbi = EXT4_SB(sb);
1103 ext4_group_t last;
1104 const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1105 unsigned three = 1;
1106 unsigned five = 5;
1107 unsigned seven = 7;
1108 ext4_group_t group = 0;
1109 int rest = sb->s_blocksize - size;
1110 handle_t *handle;
1111 int err = 0, err2;
1112
1113 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1114 if (IS_ERR(handle)) {
1115 group = 1;
1116 err = PTR_ERR(handle);
1117 goto exit_err;
1118 }
1119
1120 if (meta_bg == 0) {
1121 group = ext4_list_backups(sb, &three, &five, &seven);
1122 last = sbi->s_groups_count;
1123 } else {
1124 group = ext4_get_group_number(sb, blk_off) + 1;
1125 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1126 }
1127
1128 while (group < sbi->s_groups_count) {
1129 struct buffer_head *bh;
1130 ext4_fsblk_t backup_block;
1131
1132 /* Out of journal space, and can't get more - abort - so sad */
1133 err = ext4_resize_ensure_credits_batch(handle, 1);
1134 if (err < 0)
1135 break;
1136
1137 if (meta_bg == 0)
1138 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1139 else
1140 backup_block = (ext4_group_first_block_no(sb, group) +
1141 ext4_bg_has_super(sb, group));
1142
1143 bh = sb_getblk(sb, backup_block);
1144 if (unlikely(!bh)) {
1145 err = -ENOMEM;
1146 break;
1147 }
1148 ext4_debug("update metadata backup %llu(+%llu)\n",
1149 backup_block, backup_block -
1150 ext4_group_first_block_no(sb, group));
1151 BUFFER_TRACE(bh, "get_write_access");
1152 if ((err = ext4_journal_get_write_access(handle, bh))) {
1153 brelse(bh);
1154 break;
1155 }
1156 lock_buffer(bh);
1157 memcpy(bh->b_data, data, size);
1158 if (rest)
1159 memset(bh->b_data + size, 0, rest);
1160 set_buffer_uptodate(bh);
1161 unlock_buffer(bh);
1162 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1163 if (unlikely(err))
1164 ext4_std_error(sb, err);
1165 brelse(bh);
1166
1167 if (meta_bg == 0)
1168 group = ext4_list_backups(sb, &three, &five, &seven);
1169 else if (group == last)
1170 break;
1171 else
1172 group = last;
1173 }
1174 if ((err2 = ext4_journal_stop(handle)) && !err)
1175 err = err2;
1176
1177 /*
1178 * Ugh! Need to have e2fsck write the backup copies. It is too
1179 * late to revert the resize, we shouldn't fail just because of
1180 * the backup copies (they are only needed in case of corruption).
1181 *
1182 * However, if we got here we have a journal problem too, so we
1183 * can't really start a transaction to mark the superblock.
1184 * Chicken out and just set the flag on the hope it will be written
1185 * to disk, and if not - we will simply wait until next fsck.
1186 */
1187exit_err:
1188 if (err) {
1189 ext4_warning(sb, "can't update backup for group %u (err %d), "
1190 "forcing fsck on next reboot", group, err);
1191 sbi->s_mount_state &= ~EXT4_VALID_FS;
1192 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1193 mark_buffer_dirty(sbi->s_sbh);
1194 }
1195}
1196
1197/*
1198 * ext4_add_new_descs() adds @count group descriptor of groups
1199 * starting at @group
1200 *
1201 * @handle: journal handle
1202 * @sb: super block
1203 * @group: the group no. of the first group desc to be added
1204 * @resize_inode: the resize inode
1205 * @count: number of group descriptors to be added
1206 */
1207static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1208 ext4_group_t group, struct inode *resize_inode,
1209 ext4_group_t count)
1210{
1211 struct ext4_sb_info *sbi = EXT4_SB(sb);
1212 struct ext4_super_block *es = sbi->s_es;
1213 struct buffer_head *gdb_bh;
1214 int i, gdb_off, gdb_num, err = 0;
1215 int meta_bg;
1216
1217 meta_bg = ext4_has_feature_meta_bg(sb);
1218 for (i = 0; i < count; i++, group++) {
1219 int reserved_gdb = ext4_bg_has_super(sb, group) ?
1220 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1221
1222 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1223 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1224
1225 /*
1226 * We will only either add reserved group blocks to a backup group
1227 * or remove reserved blocks for the first group in a new group block.
1228 * Doing both would be mean more complex code, and sane people don't
1229 * use non-sparse filesystems anymore. This is already checked above.
1230 */
1231 if (gdb_off) {
1232 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1233 gdb_num);
1234 BUFFER_TRACE(gdb_bh, "get_write_access");
1235 err = ext4_journal_get_write_access(handle, gdb_bh);
1236
1237 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1238 err = reserve_backup_gdb(handle, resize_inode, group);
1239 } else if (meta_bg != 0) {
1240 err = add_new_gdb_meta_bg(sb, handle, group);
1241 } else {
1242 err = add_new_gdb(handle, resize_inode, group);
1243 }
1244 if (err)
1245 break;
1246 }
1247 return err;
1248}
1249
1250static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1251{
1252 struct buffer_head *bh = sb_getblk(sb, block);
1253 if (unlikely(!bh))
1254 return NULL;
1255 if (!bh_uptodate_or_lock(bh)) {
1256 if (ext4_read_bh(bh, 0, NULL) < 0) {
1257 brelse(bh);
1258 return NULL;
1259 }
1260 }
1261
1262 return bh;
1263}
1264
1265static int ext4_set_bitmap_checksums(struct super_block *sb,
1266 ext4_group_t group,
1267 struct ext4_group_desc *gdp,
1268 struct ext4_new_group_data *group_data)
1269{
1270 struct buffer_head *bh;
1271
1272 if (!ext4_has_metadata_csum(sb))
1273 return 0;
1274
1275 bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1276 if (!bh)
1277 return -EIO;
1278 ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
1279 EXT4_INODES_PER_GROUP(sb) / 8);
1280 brelse(bh);
1281
1282 bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1283 if (!bh)
1284 return -EIO;
1285 ext4_block_bitmap_csum_set(sb, group, gdp, bh);
1286 brelse(bh);
1287
1288 return 0;
1289}
1290
1291/*
1292 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1293 */
1294static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1295 struct ext4_new_flex_group_data *flex_gd)
1296{
1297 struct ext4_new_group_data *group_data = flex_gd->groups;
1298 struct ext4_group_desc *gdp;
1299 struct ext4_sb_info *sbi = EXT4_SB(sb);
1300 struct buffer_head *gdb_bh;
1301 ext4_group_t group;
1302 __u16 *bg_flags = flex_gd->bg_flags;
1303 int i, gdb_off, gdb_num, err = 0;
1304
1305
1306 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1307 group = group_data->group;
1308
1309 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1310 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1311
1312 /*
1313 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1314 */
1315 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1316 /* Update group descriptor block for new group */
1317 gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1318 gdb_off * EXT4_DESC_SIZE(sb));
1319
1320 memset(gdp, 0, EXT4_DESC_SIZE(sb));
1321 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1322 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1323 err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
1324 if (err) {
1325 ext4_std_error(sb, err);
1326 break;
1327 }
1328
1329 ext4_inode_table_set(sb, gdp, group_data->inode_table);
1330 ext4_free_group_clusters_set(sb, gdp,
1331 group_data->free_clusters_count);
1332 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1333 if (ext4_has_group_desc_csum(sb))
1334 ext4_itable_unused_set(sb, gdp,
1335 EXT4_INODES_PER_GROUP(sb));
1336 gdp->bg_flags = cpu_to_le16(*bg_flags);
1337 ext4_group_desc_csum_set(sb, group, gdp);
1338
1339 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1340 if (unlikely(err)) {
1341 ext4_std_error(sb, err);
1342 break;
1343 }
1344
1345 /*
1346 * We can allocate memory for mb_alloc based on the new group
1347 * descriptor
1348 */
1349 err = ext4_mb_add_groupinfo(sb, group, gdp);
1350 if (err)
1351 break;
1352 }
1353 return err;
1354}
1355
1356/*
1357 * ext4_update_super() updates the super block so that the newly added
1358 * groups can be seen by the filesystem.
1359 *
1360 * @sb: super block
1361 * @flex_gd: new added groups
1362 */
1363static void ext4_update_super(struct super_block *sb,
1364 struct ext4_new_flex_group_data *flex_gd)
1365{
1366 ext4_fsblk_t blocks_count = 0;
1367 ext4_fsblk_t free_blocks = 0;
1368 ext4_fsblk_t reserved_blocks = 0;
1369 struct ext4_new_group_data *group_data = flex_gd->groups;
1370 struct ext4_sb_info *sbi = EXT4_SB(sb);
1371 struct ext4_super_block *es = sbi->s_es;
1372 int i;
1373
1374 BUG_ON(flex_gd->count == 0 || group_data == NULL);
1375 /*
1376 * Make the new blocks and inodes valid next. We do this before
1377 * increasing the group count so that once the group is enabled,
1378 * all of its blocks and inodes are already valid.
1379 *
1380 * We always allocate group-by-group, then block-by-block or
1381 * inode-by-inode within a group, so enabling these
1382 * blocks/inodes before the group is live won't actually let us
1383 * allocate the new space yet.
1384 */
1385 for (i = 0; i < flex_gd->count; i++) {
1386 blocks_count += group_data[i].blocks_count;
1387 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1388 }
1389
1390 reserved_blocks = ext4_r_blocks_count(es) * 100;
1391 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1392 reserved_blocks *= blocks_count;
1393 do_div(reserved_blocks, 100);
1394
1395 lock_buffer(sbi->s_sbh);
1396 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1397 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1398 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1399 flex_gd->count);
1400 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1401 flex_gd->count);
1402
1403 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1404 /*
1405 * We need to protect s_groups_count against other CPUs seeing
1406 * inconsistent state in the superblock.
1407 *
1408 * The precise rules we use are:
1409 *
1410 * * Writers must perform a smp_wmb() after updating all
1411 * dependent data and before modifying the groups count
1412 *
1413 * * Readers must perform an smp_rmb() after reading the groups
1414 * count and before reading any dependent data.
1415 *
1416 * NB. These rules can be relaxed when checking the group count
1417 * while freeing data, as we can only allocate from a block
1418 * group after serialising against the group count, and we can
1419 * only then free after serialising in turn against that
1420 * allocation.
1421 */
1422 smp_wmb();
1423
1424 /* Update the global fs size fields */
1425 sbi->s_groups_count += flex_gd->count;
1426 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1427 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1428
1429 /* Update the reserved block counts only once the new group is
1430 * active. */
1431 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1432 reserved_blocks);
1433 ext4_superblock_csum_set(sb);
1434 unlock_buffer(sbi->s_sbh);
1435
1436 /* Update the free space counts */
1437 percpu_counter_add(&sbi->s_freeclusters_counter,
1438 EXT4_NUM_B2C(sbi, free_blocks));
1439 percpu_counter_add(&sbi->s_freeinodes_counter,
1440 EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1441
1442 ext4_debug("free blocks count %llu",
1443 percpu_counter_read(&sbi->s_freeclusters_counter));
1444 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1445 ext4_group_t flex_group;
1446 struct flex_groups *fg;
1447
1448 flex_group = ext4_flex_group(sbi, group_data[0].group);
1449 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1450 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1451 &fg->free_clusters);
1452 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1453 &fg->free_inodes);
1454 }
1455
1456 /*
1457 * Update the fs overhead information
1458 */
1459 ext4_calculate_overhead(sb);
1460
1461 if (test_opt(sb, DEBUG))
1462 printk(KERN_DEBUG "EXT4-fs: added group %u:"
1463 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1464 blocks_count, free_blocks, reserved_blocks);
1465}
1466
1467/* Add a flex group to an fs. Ensure we handle all possible error conditions
1468 * _before_ we start modifying the filesystem, because we cannot abort the
1469 * transaction and not have it write the data to disk.
1470 */
1471static int ext4_flex_group_add(struct super_block *sb,
1472 struct inode *resize_inode,
1473 struct ext4_new_flex_group_data *flex_gd)
1474{
1475 struct ext4_sb_info *sbi = EXT4_SB(sb);
1476 struct ext4_super_block *es = sbi->s_es;
1477 ext4_fsblk_t o_blocks_count;
1478 ext4_grpblk_t last;
1479 ext4_group_t group;
1480 handle_t *handle;
1481 unsigned reserved_gdb;
1482 int err = 0, err2 = 0, credit;
1483
1484 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1485
1486 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1487 o_blocks_count = ext4_blocks_count(es);
1488 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1489 BUG_ON(last);
1490
1491 err = setup_new_flex_group_blocks(sb, flex_gd);
1492 if (err)
1493 goto exit;
1494 /*
1495 * We will always be modifying at least the superblock and GDT
1496 * blocks. If we are adding a group past the last current GDT block,
1497 * we will also modify the inode and the dindirect block. If we
1498 * are adding a group with superblock/GDT backups we will also
1499 * modify each of the reserved GDT dindirect blocks.
1500 */
1501 credit = 3; /* sb, resize inode, resize inode dindirect */
1502 /* GDT blocks */
1503 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1504 credit += reserved_gdb; /* Reserved GDT dindirect blocks */
1505 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1506 if (IS_ERR(handle)) {
1507 err = PTR_ERR(handle);
1508 goto exit;
1509 }
1510
1511 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1512 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1513 if (err)
1514 goto exit_journal;
1515
1516 group = flex_gd->groups[0].group;
1517 BUG_ON(group != sbi->s_groups_count);
1518 err = ext4_add_new_descs(handle, sb, group,
1519 resize_inode, flex_gd->count);
1520 if (err)
1521 goto exit_journal;
1522
1523 err = ext4_setup_new_descs(handle, sb, flex_gd);
1524 if (err)
1525 goto exit_journal;
1526
1527 ext4_update_super(sb, flex_gd);
1528
1529 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1530
1531exit_journal:
1532 err2 = ext4_journal_stop(handle);
1533 if (!err)
1534 err = err2;
1535
1536 if (!err) {
1537 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1538 int gdb_num_end = ((group + flex_gd->count - 1) /
1539 EXT4_DESC_PER_BLOCK(sb));
1540 int meta_bg = ext4_has_feature_meta_bg(sb);
1541 sector_t old_gdb = 0;
1542
1543 update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
1544 sizeof(struct ext4_super_block), 0);
1545 for (; gdb_num <= gdb_num_end; gdb_num++) {
1546 struct buffer_head *gdb_bh;
1547
1548 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1549 gdb_num);
1550 if (old_gdb == gdb_bh->b_blocknr)
1551 continue;
1552 update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
1553 gdb_bh->b_size, meta_bg);
1554 old_gdb = gdb_bh->b_blocknr;
1555 }
1556 }
1557exit:
1558 return err;
1559}
1560
1561static int ext4_setup_next_flex_gd(struct super_block *sb,
1562 struct ext4_new_flex_group_data *flex_gd,
1563 ext4_fsblk_t n_blocks_count,
1564 unsigned long flexbg_size)
1565{
1566 struct ext4_sb_info *sbi = EXT4_SB(sb);
1567 struct ext4_super_block *es = sbi->s_es;
1568 struct ext4_new_group_data *group_data = flex_gd->groups;
1569 ext4_fsblk_t o_blocks_count;
1570 ext4_group_t n_group;
1571 ext4_group_t group;
1572 ext4_group_t last_group;
1573 ext4_grpblk_t last;
1574 ext4_grpblk_t clusters_per_group;
1575 unsigned long i;
1576
1577 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1578
1579 o_blocks_count = ext4_blocks_count(es);
1580
1581 if (o_blocks_count == n_blocks_count)
1582 return 0;
1583
1584 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1585 BUG_ON(last);
1586 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1587
1588 last_group = group | (flexbg_size - 1);
1589 if (last_group > n_group)
1590 last_group = n_group;
1591
1592 flex_gd->count = last_group - group + 1;
1593
1594 for (i = 0; i < flex_gd->count; i++) {
1595 int overhead;
1596
1597 group_data[i].group = group + i;
1598 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1599 overhead = ext4_group_overhead_blocks(sb, group + i);
1600 group_data[i].mdata_blocks = overhead;
1601 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1602 if (ext4_has_group_desc_csum(sb)) {
1603 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1604 EXT4_BG_INODE_UNINIT;
1605 if (!test_opt(sb, INIT_INODE_TABLE))
1606 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1607 } else
1608 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1609 }
1610
1611 if (last_group == n_group && ext4_has_group_desc_csum(sb))
1612 /* We need to initialize block bitmap of last group. */
1613 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1614
1615 if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1616 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1617 group_data[i - 1].free_clusters_count -= clusters_per_group -
1618 last - 1;
1619 }
1620
1621 return 1;
1622}
1623
1624/* Add group descriptor data to an existing or new group descriptor block.
1625 * Ensure we handle all possible error conditions _before_ we start modifying
1626 * the filesystem, because we cannot abort the transaction and not have it
1627 * write the data to disk.
1628 *
1629 * If we are on a GDT block boundary, we need to get the reserved GDT block.
1630 * Otherwise, we may need to add backup GDT blocks for a sparse group.
1631 *
1632 * We only need to hold the superblock lock while we are actually adding
1633 * in the new group's counts to the superblock. Prior to that we have
1634 * not really "added" the group at all. We re-check that we are still
1635 * adding in the last group in case things have changed since verifying.
1636 */
1637int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1638{
1639 struct ext4_new_flex_group_data flex_gd;
1640 struct ext4_sb_info *sbi = EXT4_SB(sb);
1641 struct ext4_super_block *es = sbi->s_es;
1642 int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1643 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1644 struct inode *inode = NULL;
1645 int gdb_off;
1646 int err;
1647 __u16 bg_flags = 0;
1648
1649 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1650
1651 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1652 ext4_warning(sb, "Can't resize non-sparse filesystem further");
1653 return -EPERM;
1654 }
1655
1656 if (ext4_blocks_count(es) + input->blocks_count <
1657 ext4_blocks_count(es)) {
1658 ext4_warning(sb, "blocks_count overflow");
1659 return -EINVAL;
1660 }
1661
1662 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1663 le32_to_cpu(es->s_inodes_count)) {
1664 ext4_warning(sb, "inodes_count overflow");
1665 return -EINVAL;
1666 }
1667
1668 if (reserved_gdb || gdb_off == 0) {
1669 if (!ext4_has_feature_resize_inode(sb) ||
1670 !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1671 ext4_warning(sb,
1672 "No reserved GDT blocks, can't resize");
1673 return -EPERM;
1674 }
1675 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1676 if (IS_ERR(inode)) {
1677 ext4_warning(sb, "Error opening resize inode");
1678 return PTR_ERR(inode);
1679 }
1680 }
1681
1682
1683 err = verify_group_input(sb, input);
1684 if (err)
1685 goto out;
1686
1687 err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1688 if (err)
1689 goto out;
1690
1691 err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1692 if (err)
1693 goto out;
1694
1695 flex_gd.count = 1;
1696 flex_gd.groups = input;
1697 flex_gd.bg_flags = &bg_flags;
1698 err = ext4_flex_group_add(sb, inode, &flex_gd);
1699out:
1700 iput(inode);
1701 return err;
1702} /* ext4_group_add */
1703
1704/*
1705 * extend a group without checking assuming that checking has been done.
1706 */
1707static int ext4_group_extend_no_check(struct super_block *sb,
1708 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1709{
1710 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1711 handle_t *handle;
1712 int err = 0, err2;
1713
1714 /* We will update the superblock, one block bitmap, and
1715 * one group descriptor via ext4_group_add_blocks().
1716 */
1717 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1718 if (IS_ERR(handle)) {
1719 err = PTR_ERR(handle);
1720 ext4_warning(sb, "error %d on journal start", err);
1721 return err;
1722 }
1723
1724 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1725 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
1726 if (err) {
1727 ext4_warning(sb, "error %d on journal write access", err);
1728 goto errout;
1729 }
1730
1731 lock_buffer(EXT4_SB(sb)->s_sbh);
1732 ext4_blocks_count_set(es, o_blocks_count + add);
1733 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1734 ext4_superblock_csum_set(sb);
1735 unlock_buffer(EXT4_SB(sb)->s_sbh);
1736 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1737 o_blocks_count + add);
1738 /* We add the blocks to the bitmap and set the group need init bit */
1739 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1740 if (err)
1741 goto errout;
1742 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1743 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1744 o_blocks_count + add);
1745errout:
1746 err2 = ext4_journal_stop(handle);
1747 if (err2 && !err)
1748 err = err2;
1749
1750 if (!err) {
1751 if (test_opt(sb, DEBUG))
1752 printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1753 "blocks\n", ext4_blocks_count(es));
1754 update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
1755 (char *)es, sizeof(struct ext4_super_block), 0);
1756 }
1757 return err;
1758}
1759
1760/*
1761 * Extend the filesystem to the new number of blocks specified. This entry
1762 * point is only used to extend the current filesystem to the end of the last
1763 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
1764 * for emergencies (because it has no dependencies on reserved blocks).
1765 *
1766 * If we _really_ wanted, we could use default values to call ext4_group_add()
1767 * allow the "remount" trick to work for arbitrary resizing, assuming enough
1768 * GDT blocks are reserved to grow to the desired size.
1769 */
1770int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1771 ext4_fsblk_t n_blocks_count)
1772{
1773 ext4_fsblk_t o_blocks_count;
1774 ext4_grpblk_t last;
1775 ext4_grpblk_t add;
1776 struct buffer_head *bh;
1777 int err;
1778 ext4_group_t group;
1779
1780 o_blocks_count = ext4_blocks_count(es);
1781
1782 if (test_opt(sb, DEBUG))
1783 ext4_msg(sb, KERN_DEBUG,
1784 "extending last group from %llu to %llu blocks",
1785 o_blocks_count, n_blocks_count);
1786
1787 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1788 return 0;
1789
1790 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1791 ext4_msg(sb, KERN_ERR,
1792 "filesystem too large to resize to %llu blocks safely",
1793 n_blocks_count);
1794 return -EINVAL;
1795 }
1796
1797 if (n_blocks_count < o_blocks_count) {
1798 ext4_warning(sb, "can't shrink FS - resize aborted");
1799 return -EINVAL;
1800 }
1801
1802 /* Handle the remaining blocks in the last group only. */
1803 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1804
1805 if (last == 0) {
1806 ext4_warning(sb, "need to use ext2online to resize further");
1807 return -EPERM;
1808 }
1809
1810 add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1811
1812 if (o_blocks_count + add < o_blocks_count) {
1813 ext4_warning(sb, "blocks_count overflow");
1814 return -EINVAL;
1815 }
1816
1817 if (o_blocks_count + add > n_blocks_count)
1818 add = n_blocks_count - o_blocks_count;
1819
1820 if (o_blocks_count + add < n_blocks_count)
1821 ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1822 o_blocks_count + add, add);
1823
1824 /* See if the device is actually as big as what was requested */
1825 bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
1826 if (IS_ERR(bh)) {
1827 ext4_warning(sb, "can't read last block, resize aborted");
1828 return -ENOSPC;
1829 }
1830 brelse(bh);
1831
1832 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
1833 return err;
1834} /* ext4_group_extend */
1835
1836
1837static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1838{
1839 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1840}
1841
1842/*
1843 * Release the resize inode and drop the resize_inode feature if there
1844 * are no more reserved gdt blocks, and then convert the file system
1845 * to enable meta_bg
1846 */
1847static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1848{
1849 handle_t *handle;
1850 struct ext4_sb_info *sbi = EXT4_SB(sb);
1851 struct ext4_super_block *es = sbi->s_es;
1852 struct ext4_inode_info *ei = EXT4_I(inode);
1853 ext4_fsblk_t nr;
1854 int i, ret, err = 0;
1855 int credits = 1;
1856
1857 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1858 if (inode) {
1859 if (es->s_reserved_gdt_blocks) {
1860 ext4_error(sb, "Unexpected non-zero "
1861 "s_reserved_gdt_blocks");
1862 return -EPERM;
1863 }
1864
1865 /* Do a quick sanity check of the resize inode */
1866 if (inode->i_blocks != 1 << (inode->i_blkbits -
1867 (9 - sbi->s_cluster_bits)))
1868 goto invalid_resize_inode;
1869 for (i = 0; i < EXT4_N_BLOCKS; i++) {
1870 if (i == EXT4_DIND_BLOCK) {
1871 if (ei->i_data[i])
1872 continue;
1873 else
1874 goto invalid_resize_inode;
1875 }
1876 if (ei->i_data[i])
1877 goto invalid_resize_inode;
1878 }
1879 credits += 3; /* block bitmap, bg descriptor, resize inode */
1880 }
1881
1882 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1883 if (IS_ERR(handle))
1884 return PTR_ERR(handle);
1885
1886 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1887 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1888 if (err)
1889 goto errout;
1890
1891 lock_buffer(sbi->s_sbh);
1892 ext4_clear_feature_resize_inode(sb);
1893 ext4_set_feature_meta_bg(sb);
1894 sbi->s_es->s_first_meta_bg =
1895 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1896 ext4_superblock_csum_set(sb);
1897 unlock_buffer(sbi->s_sbh);
1898
1899 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1900 if (err) {
1901 ext4_std_error(sb, err);
1902 goto errout;
1903 }
1904
1905 if (inode) {
1906 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1907 ext4_free_blocks(handle, inode, NULL, nr, 1,
1908 EXT4_FREE_BLOCKS_METADATA |
1909 EXT4_FREE_BLOCKS_FORGET);
1910 ei->i_data[EXT4_DIND_BLOCK] = 0;
1911 inode->i_blocks = 0;
1912
1913 err = ext4_mark_inode_dirty(handle, inode);
1914 if (err)
1915 ext4_std_error(sb, err);
1916 }
1917
1918errout:
1919 ret = ext4_journal_stop(handle);
1920 if (!err)
1921 err = ret;
1922 return ret;
1923
1924invalid_resize_inode:
1925 ext4_error(sb, "corrupted/inconsistent resize inode");
1926 return -EINVAL;
1927}
1928
1929/*
1930 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1931 *
1932 * @sb: super block of the fs to be resized
1933 * @n_blocks_count: the number of blocks resides in the resized fs
1934 */
1935int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1936{
1937 struct ext4_new_flex_group_data *flex_gd = NULL;
1938 struct ext4_sb_info *sbi = EXT4_SB(sb);
1939 struct ext4_super_block *es = sbi->s_es;
1940 struct buffer_head *bh;
1941 struct inode *resize_inode = NULL;
1942 ext4_grpblk_t add, offset;
1943 unsigned long n_desc_blocks;
1944 unsigned long o_desc_blocks;
1945 ext4_group_t o_group;
1946 ext4_group_t n_group;
1947 ext4_fsblk_t o_blocks_count;
1948 ext4_fsblk_t n_blocks_count_retry = 0;
1949 unsigned long last_update_time = 0;
1950 int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
1951 int meta_bg;
1952
1953 /* See if the device is actually as big as what was requested */
1954 bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
1955 if (IS_ERR(bh)) {
1956 ext4_warning(sb, "can't read last block, resize aborted");
1957 return -ENOSPC;
1958 }
1959 brelse(bh);
1960
1961retry:
1962 o_blocks_count = ext4_blocks_count(es);
1963
1964 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
1965 "to %llu blocks", o_blocks_count, n_blocks_count);
1966
1967 if (n_blocks_count < o_blocks_count) {
1968 /* On-line shrinking not supported */
1969 ext4_warning(sb, "can't shrink FS - resize aborted");
1970 return -EINVAL;
1971 }
1972
1973 if (n_blocks_count == o_blocks_count)
1974 /* Nothing need to do */
1975 return 0;
1976
1977 n_group = ext4_get_group_number(sb, n_blocks_count - 1);
1978 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1979 ext4_warning(sb, "resize would cause inodes_count overflow");
1980 return -EINVAL;
1981 }
1982 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
1983
1984 n_desc_blocks = num_desc_blocks(sb, n_group + 1);
1985 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
1986
1987 meta_bg = ext4_has_feature_meta_bg(sb);
1988
1989 if (ext4_has_feature_resize_inode(sb)) {
1990 if (meta_bg) {
1991 ext4_error(sb, "resize_inode and meta_bg enabled "
1992 "simultaneously");
1993 return -EINVAL;
1994 }
1995 if (n_desc_blocks > o_desc_blocks +
1996 le16_to_cpu(es->s_reserved_gdt_blocks)) {
1997 n_blocks_count_retry = n_blocks_count;
1998 n_desc_blocks = o_desc_blocks +
1999 le16_to_cpu(es->s_reserved_gdt_blocks);
2000 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2001 n_blocks_count = (ext4_fsblk_t)n_group *
2002 EXT4_BLOCKS_PER_GROUP(sb) +
2003 le32_to_cpu(es->s_first_data_block);
2004 n_group--; /* set to last group number */
2005 }
2006
2007 if (!resize_inode)
2008 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2009 EXT4_IGET_SPECIAL);
2010 if (IS_ERR(resize_inode)) {
2011 ext4_warning(sb, "Error opening resize inode");
2012 return PTR_ERR(resize_inode);
2013 }
2014 }
2015
2016 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
2017 err = ext4_convert_meta_bg(sb, resize_inode);
2018 if (err)
2019 goto out;
2020 if (resize_inode) {
2021 iput(resize_inode);
2022 resize_inode = NULL;
2023 }
2024 if (n_blocks_count_retry) {
2025 n_blocks_count = n_blocks_count_retry;
2026 n_blocks_count_retry = 0;
2027 goto retry;
2028 }
2029 }
2030
2031 /*
2032 * Make sure the last group has enough space so that it's
2033 * guaranteed to have enough space for all metadata blocks
2034 * that it might need to hold. (We might not need to store
2035 * the inode table blocks in the last block group, but there
2036 * will be cases where this might be needed.)
2037 */
2038 if ((ext4_group_first_block_no(sb, n_group) +
2039 ext4_group_overhead_blocks(sb, n_group) + 2 +
2040 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2041 n_blocks_count = ext4_group_first_block_no(sb, n_group);
2042 n_group--;
2043 n_blocks_count_retry = 0;
2044 if (resize_inode) {
2045 iput(resize_inode);
2046 resize_inode = NULL;
2047 }
2048 goto retry;
2049 }
2050
2051 /* extend the last group */
2052 if (n_group == o_group)
2053 add = n_blocks_count - o_blocks_count;
2054 else
2055 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2056 if (add > 0) {
2057 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2058 if (err)
2059 goto out;
2060 }
2061
2062 if (ext4_blocks_count(es) == n_blocks_count)
2063 goto out;
2064
2065 err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2066 if (err)
2067 goto out;
2068
2069 err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2070 if (err)
2071 goto out;
2072
2073 flex_gd = alloc_flex_gd(flexbg_size);
2074 if (flex_gd == NULL) {
2075 err = -ENOMEM;
2076 goto out;
2077 }
2078
2079 /* Add flex groups. Note that a regular group is a
2080 * flex group with 1 group.
2081 */
2082 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
2083 flexbg_size)) {
2084 if (jiffies - last_update_time > HZ * 10) {
2085 if (last_update_time)
2086 ext4_msg(sb, KERN_INFO,
2087 "resized to %llu blocks",
2088 ext4_blocks_count(es));
2089 last_update_time = jiffies;
2090 }
2091 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2092 break;
2093 err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2094 if (unlikely(err))
2095 break;
2096 }
2097
2098 if (!err && n_blocks_count_retry) {
2099 n_blocks_count = n_blocks_count_retry;
2100 n_blocks_count_retry = 0;
2101 free_flex_gd(flex_gd);
2102 flex_gd = NULL;
2103 if (resize_inode) {
2104 iput(resize_inode);
2105 resize_inode = NULL;
2106 }
2107 goto retry;
2108 }
2109
2110out:
2111 if (flex_gd)
2112 free_flex_gd(flex_gd);
2113 if (resize_inode != NULL)
2114 iput(resize_inode);
2115 if (err)
2116 ext4_warning(sb, "error (%d) occurred during "
2117 "file system resize", err);
2118 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2119 ext4_blocks_count(es));
2120 return err;
2121}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/resize.c
4 *
5 * Support for resizing an ext4 filesystem while it is mounted.
6 *
7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8 *
9 * This could probably be made into a module, because it is not often in use.
10 */
11
12
13#include <linux/errno.h>
14#include <linux/slab.h>
15#include <linux/jiffies.h>
16
17#include "ext4_jbd2.h"
18
19struct ext4_rcu_ptr {
20 struct rcu_head rcu;
21 void *ptr;
22};
23
24static void ext4_rcu_ptr_callback(struct rcu_head *head)
25{
26 struct ext4_rcu_ptr *ptr;
27
28 ptr = container_of(head, struct ext4_rcu_ptr, rcu);
29 kvfree(ptr->ptr);
30 kfree(ptr);
31}
32
33void ext4_kvfree_array_rcu(void *to_free)
34{
35 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
36
37 if (ptr) {
38 ptr->ptr = to_free;
39 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
40 return;
41 }
42 synchronize_rcu();
43 kvfree(to_free);
44}
45
46int ext4_resize_begin(struct super_block *sb)
47{
48 struct ext4_sb_info *sbi = EXT4_SB(sb);
49 int ret = 0;
50
51 if (!capable(CAP_SYS_RESOURCE))
52 return -EPERM;
53
54 /*
55 * If the reserved GDT blocks is non-zero, the resize_inode feature
56 * should always be set.
57 */
58 if (sbi->s_es->s_reserved_gdt_blocks &&
59 !ext4_has_feature_resize_inode(sb)) {
60 ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
61 return -EFSCORRUPTED;
62 }
63
64 /*
65 * If we are not using the primary superblock/GDT copy don't resize,
66 * because the user tools have no way of handling this. Probably a
67 * bad time to do it anyways.
68 */
69 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
70 le32_to_cpu(sbi->s_es->s_first_data_block)) {
71 ext4_warning(sb, "won't resize using backup superblock at %llu",
72 (unsigned long long)sbi->s_sbh->b_blocknr);
73 return -EPERM;
74 }
75
76 /*
77 * We are not allowed to do online-resizing on a filesystem mounted
78 * with error, because it can destroy the filesystem easily.
79 */
80 if (sbi->s_mount_state & EXT4_ERROR_FS) {
81 ext4_warning(sb, "There are errors in the filesystem, "
82 "so online resizing is not allowed");
83 return -EPERM;
84 }
85
86 if (ext4_has_feature_sparse_super2(sb)) {
87 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
88 return -EOPNOTSUPP;
89 }
90
91 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
92 &sbi->s_ext4_flags))
93 ret = -EBUSY;
94
95 return ret;
96}
97
98int ext4_resize_end(struct super_block *sb, bool update_backups)
99{
100 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
101 smp_mb__after_atomic();
102 if (update_backups)
103 return ext4_update_overhead(sb, true);
104 return 0;
105}
106
107static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
108 ext4_group_t group) {
109 ext4_grpblk_t overhead;
110 overhead = ext4_bg_num_gdb(sb, group);
111 if (ext4_bg_has_super(sb, group))
112 overhead += 1 +
113 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
114 return overhead;
115}
116
117#define outside(b, first, last) ((b) < (first) || (b) >= (last))
118#define inside(b, first, last) ((b) >= (first) && (b) < (last))
119
120static int verify_group_input(struct super_block *sb,
121 struct ext4_new_group_data *input)
122{
123 struct ext4_sb_info *sbi = EXT4_SB(sb);
124 struct ext4_super_block *es = sbi->s_es;
125 ext4_fsblk_t start = ext4_blocks_count(es);
126 ext4_fsblk_t end = start + input->blocks_count;
127 ext4_group_t group = input->group;
128 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
129 unsigned overhead;
130 ext4_fsblk_t metaend;
131 struct buffer_head *bh = NULL;
132 ext4_grpblk_t free_blocks_count, offset;
133 int err = -EINVAL;
134
135 if (group != sbi->s_groups_count) {
136 ext4_warning(sb, "Cannot add at group %u (only %u groups)",
137 input->group, sbi->s_groups_count);
138 return -EINVAL;
139 }
140
141 overhead = ext4_group_overhead_blocks(sb, group);
142 metaend = start + overhead;
143 free_blocks_count = input->blocks_count - 2 - overhead -
144 sbi->s_itb_per_group;
145 input->free_clusters_count = EXT4_B2C(sbi, free_blocks_count);
146
147 if (test_opt(sb, DEBUG))
148 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
149 "(%d free, %u reserved)\n",
150 ext4_bg_has_super(sb, input->group) ? "normal" :
151 "no-super", input->group, input->blocks_count,
152 free_blocks_count, input->reserved_blocks);
153
154 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
155 if (offset != 0)
156 ext4_warning(sb, "Last group not full");
157 else if (input->reserved_blocks > input->blocks_count / 5)
158 ext4_warning(sb, "Reserved blocks too high (%u)",
159 input->reserved_blocks);
160 else if (free_blocks_count < 0)
161 ext4_warning(sb, "Bad blocks count %u",
162 input->blocks_count);
163 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
164 err = PTR_ERR(bh);
165 bh = NULL;
166 ext4_warning(sb, "Cannot read last block (%llu)",
167 end - 1);
168 } else if (outside(input->block_bitmap, start, end))
169 ext4_warning(sb, "Block bitmap not in group (block %llu)",
170 (unsigned long long)input->block_bitmap);
171 else if (outside(input->inode_bitmap, start, end))
172 ext4_warning(sb, "Inode bitmap not in group (block %llu)",
173 (unsigned long long)input->inode_bitmap);
174 else if (outside(input->inode_table, start, end) ||
175 outside(itend - 1, start, end))
176 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
177 (unsigned long long)input->inode_table, itend - 1);
178 else if (input->inode_bitmap == input->block_bitmap)
179 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
180 (unsigned long long)input->block_bitmap);
181 else if (inside(input->block_bitmap, input->inode_table, itend))
182 ext4_warning(sb, "Block bitmap (%llu) in inode table "
183 "(%llu-%llu)",
184 (unsigned long long)input->block_bitmap,
185 (unsigned long long)input->inode_table, itend - 1);
186 else if (inside(input->inode_bitmap, input->inode_table, itend))
187 ext4_warning(sb, "Inode bitmap (%llu) in inode table "
188 "(%llu-%llu)",
189 (unsigned long long)input->inode_bitmap,
190 (unsigned long long)input->inode_table, itend - 1);
191 else if (inside(input->block_bitmap, start, metaend))
192 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
193 (unsigned long long)input->block_bitmap,
194 start, metaend - 1);
195 else if (inside(input->inode_bitmap, start, metaend))
196 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
197 (unsigned long long)input->inode_bitmap,
198 start, metaend - 1);
199 else if (inside(input->inode_table, start, metaend) ||
200 inside(itend - 1, start, metaend))
201 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
202 "(%llu-%llu)",
203 (unsigned long long)input->inode_table,
204 itend - 1, start, metaend - 1);
205 else
206 err = 0;
207 brelse(bh);
208
209 return err;
210}
211
212/*
213 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
214 * group each time.
215 */
216struct ext4_new_flex_group_data {
217 struct ext4_new_group_data *groups; /* new_group_data for groups
218 in the flex group */
219 __u16 *bg_flags; /* block group flags of groups
220 in @groups */
221 ext4_group_t resize_bg; /* number of allocated
222 new_group_data */
223 ext4_group_t count; /* number of groups in @groups
224 */
225};
226
227/*
228 * Avoiding memory allocation failures due to too many groups added each time.
229 */
230#define MAX_RESIZE_BG 16384
231
232/*
233 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
234 * @flexbg_size.
235 *
236 * Returns NULL on failure otherwise address of the allocated structure.
237 */
238static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size,
239 ext4_group_t o_group, ext4_group_t n_group)
240{
241 ext4_group_t last_group;
242 struct ext4_new_flex_group_data *flex_gd;
243
244 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
245 if (flex_gd == NULL)
246 goto out3;
247
248 if (unlikely(flexbg_size > MAX_RESIZE_BG))
249 flex_gd->resize_bg = MAX_RESIZE_BG;
250 else
251 flex_gd->resize_bg = flexbg_size;
252
253 /* Avoid allocating large 'groups' array if not needed */
254 last_group = o_group | (flex_gd->resize_bg - 1);
255 if (n_group <= last_group)
256 flex_gd->resize_bg = 1 << fls(n_group - o_group + 1);
257 else if (n_group - last_group < flex_gd->resize_bg)
258 flex_gd->resize_bg = 1 << max(fls(last_group - o_group + 1),
259 fls(n_group - last_group));
260
261 flex_gd->groups = kmalloc_array(flex_gd->resize_bg,
262 sizeof(struct ext4_new_group_data),
263 GFP_NOFS);
264 if (flex_gd->groups == NULL)
265 goto out2;
266
267 flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16),
268 GFP_NOFS);
269 if (flex_gd->bg_flags == NULL)
270 goto out1;
271
272 return flex_gd;
273
274out1:
275 kfree(flex_gd->groups);
276out2:
277 kfree(flex_gd);
278out3:
279 return NULL;
280}
281
282static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
283{
284 kfree(flex_gd->bg_flags);
285 kfree(flex_gd->groups);
286 kfree(flex_gd);
287}
288
289/*
290 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
291 * and inode tables for a flex group.
292 *
293 * This function is used by 64bit-resize. Note that this function allocates
294 * group tables from the 1st group of groups contained by @flexgd, which may
295 * be a partial of a flex group.
296 *
297 * @sb: super block of fs to which the groups belongs
298 *
299 * Returns 0 on a successful allocation of the metadata blocks in the
300 * block group.
301 */
302static int ext4_alloc_group_tables(struct super_block *sb,
303 struct ext4_new_flex_group_data *flex_gd,
304 unsigned int flexbg_size)
305{
306 struct ext4_new_group_data *group_data = flex_gd->groups;
307 ext4_fsblk_t start_blk;
308 ext4_fsblk_t last_blk;
309 ext4_group_t src_group;
310 ext4_group_t bb_index = 0;
311 ext4_group_t ib_index = 0;
312 ext4_group_t it_index = 0;
313 ext4_group_t group;
314 ext4_group_t last_group;
315 unsigned overhead;
316 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
317 int i;
318
319 BUG_ON(flex_gd->count == 0 || group_data == NULL);
320
321 src_group = group_data[0].group;
322 last_group = src_group + flex_gd->count - 1;
323
324 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
325 (last_group & ~(flexbg_size - 1))));
326next_group:
327 group = group_data[0].group;
328 if (src_group >= group_data[0].group + flex_gd->count)
329 return -ENOSPC;
330 start_blk = ext4_group_first_block_no(sb, src_group);
331 last_blk = start_blk + group_data[src_group - group].blocks_count;
332
333 overhead = ext4_group_overhead_blocks(sb, src_group);
334
335 start_blk += overhead;
336
337 /* We collect contiguous blocks as much as possible. */
338 src_group++;
339 for (; src_group <= last_group; src_group++) {
340 overhead = ext4_group_overhead_blocks(sb, src_group);
341 if (overhead == 0)
342 last_blk += group_data[src_group - group].blocks_count;
343 else
344 break;
345 }
346
347 /* Allocate block bitmaps */
348 for (; bb_index < flex_gd->count; bb_index++) {
349 if (start_blk >= last_blk)
350 goto next_group;
351 group_data[bb_index].block_bitmap = start_blk++;
352 group = ext4_get_group_number(sb, start_blk - 1);
353 group -= group_data[0].group;
354 group_data[group].mdata_blocks++;
355 flex_gd->bg_flags[group] &= uninit_mask;
356 }
357
358 /* Allocate inode bitmaps */
359 for (; ib_index < flex_gd->count; ib_index++) {
360 if (start_blk >= last_blk)
361 goto next_group;
362 group_data[ib_index].inode_bitmap = start_blk++;
363 group = ext4_get_group_number(sb, start_blk - 1);
364 group -= group_data[0].group;
365 group_data[group].mdata_blocks++;
366 flex_gd->bg_flags[group] &= uninit_mask;
367 }
368
369 /* Allocate inode tables */
370 for (; it_index < flex_gd->count; it_index++) {
371 unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
372 ext4_fsblk_t next_group_start;
373
374 if (start_blk + itb > last_blk)
375 goto next_group;
376 group_data[it_index].inode_table = start_blk;
377 group = ext4_get_group_number(sb, start_blk);
378 next_group_start = ext4_group_first_block_no(sb, group + 1);
379 group -= group_data[0].group;
380
381 if (start_blk + itb > next_group_start) {
382 flex_gd->bg_flags[group + 1] &= uninit_mask;
383 overhead = start_blk + itb - next_group_start;
384 group_data[group + 1].mdata_blocks += overhead;
385 itb -= overhead;
386 }
387
388 group_data[group].mdata_blocks += itb;
389 flex_gd->bg_flags[group] &= uninit_mask;
390 start_blk += EXT4_SB(sb)->s_itb_per_group;
391 }
392
393 /* Update free clusters count to exclude metadata blocks */
394 for (i = 0; i < flex_gd->count; i++) {
395 group_data[i].free_clusters_count -=
396 EXT4_NUM_B2C(EXT4_SB(sb),
397 group_data[i].mdata_blocks);
398 }
399
400 if (test_opt(sb, DEBUG)) {
401 int i;
402 group = group_data[0].group;
403
404 printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
405 "%u groups, flexbg size is %u:\n", flex_gd->count,
406 flexbg_size);
407
408 for (i = 0; i < flex_gd->count; i++) {
409 ext4_debug(
410 "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n",
411 ext4_bg_has_super(sb, group + i) ? "normal" :
412 "no-super", group + i,
413 group_data[i].blocks_count,
414 group_data[i].free_clusters_count,
415 group_data[i].mdata_blocks);
416 }
417 }
418 return 0;
419}
420
421static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
422 ext4_fsblk_t blk)
423{
424 struct buffer_head *bh;
425 int err;
426
427 bh = sb_getblk(sb, blk);
428 if (unlikely(!bh))
429 return ERR_PTR(-ENOMEM);
430 BUFFER_TRACE(bh, "get_write_access");
431 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
432 if (err) {
433 brelse(bh);
434 bh = ERR_PTR(err);
435 } else {
436 memset(bh->b_data, 0, sb->s_blocksize);
437 set_buffer_uptodate(bh);
438 }
439
440 return bh;
441}
442
443static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
444{
445 return ext4_journal_ensure_credits_fn(handle, credits,
446 EXT4_MAX_TRANS_DATA, 0, 0);
447}
448
449/*
450 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
451 *
452 * Helper function for ext4_setup_new_group_blocks() which set .
453 *
454 * @sb: super block
455 * @handle: journal handle
456 * @flex_gd: flex group data
457 */
458static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
459 struct ext4_new_flex_group_data *flex_gd,
460 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
461{
462 struct ext4_sb_info *sbi = EXT4_SB(sb);
463 ext4_group_t count = last_cluster - first_cluster + 1;
464 ext4_group_t count2;
465
466 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
467 last_cluster);
468 for (; count > 0; count -= count2, first_cluster += count2) {
469 ext4_fsblk_t start;
470 struct buffer_head *bh;
471 ext4_group_t group;
472 int err;
473
474 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
475 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
476 group -= flex_gd->groups[0].group;
477
478 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
479 if (count2 > count)
480 count2 = count;
481
482 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
483 BUG_ON(flex_gd->count > 1);
484 continue;
485 }
486
487 err = ext4_resize_ensure_credits_batch(handle, 1);
488 if (err < 0)
489 return err;
490
491 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
492 if (unlikely(!bh))
493 return -ENOMEM;
494
495 BUFFER_TRACE(bh, "get_write_access");
496 err = ext4_journal_get_write_access(handle, sb, bh,
497 EXT4_JTR_NONE);
498 if (err) {
499 brelse(bh);
500 return err;
501 }
502 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
503 first_cluster, first_cluster - start, count2);
504 mb_set_bits(bh->b_data, first_cluster - start, count2);
505
506 err = ext4_handle_dirty_metadata(handle, NULL, bh);
507 brelse(bh);
508 if (unlikely(err))
509 return err;
510 }
511
512 return 0;
513}
514
515/*
516 * Set up the block and inode bitmaps, and the inode table for the new groups.
517 * This doesn't need to be part of the main transaction, since we are only
518 * changing blocks outside the actual filesystem. We still do journaling to
519 * ensure the recovery is correct in case of a failure just after resize.
520 * If any part of this fails, we simply abort the resize.
521 *
522 * setup_new_flex_group_blocks handles a flex group as follow:
523 * 1. copy super block and GDT, and initialize group tables if necessary.
524 * In this step, we only set bits in blocks bitmaps for blocks taken by
525 * super block and GDT.
526 * 2. allocate group tables in block bitmaps, that is, set bits in block
527 * bitmap for blocks taken by group tables.
528 */
529static int setup_new_flex_group_blocks(struct super_block *sb,
530 struct ext4_new_flex_group_data *flex_gd)
531{
532 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
533 ext4_fsblk_t start;
534 ext4_fsblk_t block;
535 struct ext4_sb_info *sbi = EXT4_SB(sb);
536 struct ext4_super_block *es = sbi->s_es;
537 struct ext4_new_group_data *group_data = flex_gd->groups;
538 __u16 *bg_flags = flex_gd->bg_flags;
539 handle_t *handle;
540 ext4_group_t group, count;
541 struct buffer_head *bh = NULL;
542 int reserved_gdb, i, j, err = 0, err2;
543 int meta_bg;
544
545 BUG_ON(!flex_gd->count || !group_data ||
546 group_data[0].group != sbi->s_groups_count);
547
548 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
549 meta_bg = ext4_has_feature_meta_bg(sb);
550
551 /* This transaction may be extended/restarted along the way */
552 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
553 if (IS_ERR(handle))
554 return PTR_ERR(handle);
555
556 group = group_data[0].group;
557 for (i = 0; i < flex_gd->count; i++, group++) {
558 unsigned long gdblocks;
559 ext4_grpblk_t overhead;
560
561 gdblocks = ext4_bg_num_gdb(sb, group);
562 start = ext4_group_first_block_no(sb, group);
563
564 if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
565 goto handle_itb;
566
567 if (meta_bg == 1)
568 goto handle_itb;
569
570 block = start + ext4_bg_has_super(sb, group);
571 /* Copy all of the GDT blocks into the backup in this group */
572 for (j = 0; j < gdblocks; j++, block++) {
573 struct buffer_head *gdb;
574
575 ext4_debug("update backup group %#04llx\n", block);
576 err = ext4_resize_ensure_credits_batch(handle, 1);
577 if (err < 0)
578 goto out;
579
580 gdb = sb_getblk(sb, block);
581 if (unlikely(!gdb)) {
582 err = -ENOMEM;
583 goto out;
584 }
585
586 BUFFER_TRACE(gdb, "get_write_access");
587 err = ext4_journal_get_write_access(handle, sb, gdb,
588 EXT4_JTR_NONE);
589 if (err) {
590 brelse(gdb);
591 goto out;
592 }
593 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
594 s_group_desc, j)->b_data, gdb->b_size);
595 set_buffer_uptodate(gdb);
596
597 err = ext4_handle_dirty_metadata(handle, NULL, gdb);
598 if (unlikely(err)) {
599 brelse(gdb);
600 goto out;
601 }
602 brelse(gdb);
603 }
604
605 /* Zero out all of the reserved backup group descriptor
606 * table blocks
607 */
608 if (ext4_bg_has_super(sb, group)) {
609 err = sb_issue_zeroout(sb, gdblocks + start + 1,
610 reserved_gdb, GFP_NOFS);
611 if (err)
612 goto out;
613 }
614
615handle_itb:
616 /* Initialize group tables of the group @group */
617 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
618 goto handle_bb;
619
620 /* Zero out all of the inode table blocks */
621 block = group_data[i].inode_table;
622 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
623 block, sbi->s_itb_per_group);
624 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
625 GFP_NOFS);
626 if (err)
627 goto out;
628
629handle_bb:
630 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
631 goto handle_ib;
632
633 /* Initialize block bitmap of the @group */
634 block = group_data[i].block_bitmap;
635 err = ext4_resize_ensure_credits_batch(handle, 1);
636 if (err < 0)
637 goto out;
638
639 bh = bclean(handle, sb, block);
640 if (IS_ERR(bh)) {
641 err = PTR_ERR(bh);
642 goto out;
643 }
644 overhead = ext4_group_overhead_blocks(sb, group);
645 if (overhead != 0) {
646 ext4_debug("mark backup superblock %#04llx (+0)\n",
647 start);
648 mb_set_bits(bh->b_data, 0,
649 EXT4_NUM_B2C(sbi, overhead));
650 }
651 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
652 sb->s_blocksize * 8, bh->b_data);
653 err = ext4_handle_dirty_metadata(handle, NULL, bh);
654 brelse(bh);
655 if (err)
656 goto out;
657
658handle_ib:
659 if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
660 continue;
661
662 /* Initialize inode bitmap of the @group */
663 block = group_data[i].inode_bitmap;
664 err = ext4_resize_ensure_credits_batch(handle, 1);
665 if (err < 0)
666 goto out;
667 /* Mark unused entries in inode bitmap used */
668 bh = bclean(handle, sb, block);
669 if (IS_ERR(bh)) {
670 err = PTR_ERR(bh);
671 goto out;
672 }
673
674 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
675 sb->s_blocksize * 8, bh->b_data);
676 err = ext4_handle_dirty_metadata(handle, NULL, bh);
677 brelse(bh);
678 if (err)
679 goto out;
680 }
681
682 /* Mark group tables in block bitmap */
683 for (j = 0; j < GROUP_TABLE_COUNT; j++) {
684 count = group_table_count[j];
685 start = (&group_data[0].block_bitmap)[j];
686 block = start;
687 for (i = 1; i < flex_gd->count; i++) {
688 block += group_table_count[j];
689 if (block == (&group_data[i].block_bitmap)[j]) {
690 count += group_table_count[j];
691 continue;
692 }
693 err = set_flexbg_block_bitmap(sb, handle,
694 flex_gd,
695 EXT4_B2C(sbi, start),
696 EXT4_B2C(sbi,
697 start + count
698 - 1));
699 if (err)
700 goto out;
701 count = group_table_count[j];
702 start = (&group_data[i].block_bitmap)[j];
703 block = start;
704 }
705
706 err = set_flexbg_block_bitmap(sb, handle,
707 flex_gd,
708 EXT4_B2C(sbi, start),
709 EXT4_B2C(sbi,
710 start + count
711 - 1));
712 if (err)
713 goto out;
714 }
715
716out:
717 err2 = ext4_journal_stop(handle);
718 if (err2 && !err)
719 err = err2;
720
721 return err;
722}
723
724/*
725 * Iterate through the groups which hold BACKUP superblock/GDT copies in an
726 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
727 * calling this for the first time. In a sparse filesystem it will be the
728 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
729 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
730 */
731unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three,
732 unsigned int *five, unsigned int *seven)
733{
734 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
735 unsigned int *min = three;
736 int mult = 3;
737 unsigned int ret;
738
739 if (ext4_has_feature_sparse_super2(sb)) {
740 do {
741 if (*min > 2)
742 return UINT_MAX;
743 ret = le32_to_cpu(es->s_backup_bgs[*min - 1]);
744 *min += 1;
745 } while (!ret);
746 return ret;
747 }
748
749 if (!ext4_has_feature_sparse_super(sb)) {
750 ret = *min;
751 *min += 1;
752 return ret;
753 }
754
755 if (*five < *min) {
756 min = five;
757 mult = 5;
758 }
759 if (*seven < *min) {
760 min = seven;
761 mult = 7;
762 }
763
764 ret = *min;
765 *min *= mult;
766
767 return ret;
768}
769
770/*
771 * Check that all of the backup GDT blocks are held in the primary GDT block.
772 * It is assumed that they are stored in group order. Returns the number of
773 * groups in current filesystem that have BACKUPS, or -ve error code.
774 */
775static int verify_reserved_gdb(struct super_block *sb,
776 ext4_group_t end,
777 struct buffer_head *primary)
778{
779 const ext4_fsblk_t blk = primary->b_blocknr;
780 unsigned three = 1;
781 unsigned five = 5;
782 unsigned seven = 7;
783 unsigned grp;
784 __le32 *p = (__le32 *)primary->b_data;
785 int gdbackups = 0;
786
787 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
788 if (le32_to_cpu(*p++) !=
789 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
790 ext4_warning(sb, "reserved GDT %llu"
791 " missing grp %d (%llu)",
792 blk, grp,
793 grp *
794 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
795 blk);
796 return -EINVAL;
797 }
798 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
799 return -EFBIG;
800 }
801
802 return gdbackups;
803}
804
805/*
806 * Called when we need to bring a reserved group descriptor table block into
807 * use from the resize inode. The primary copy of the new GDT block currently
808 * is an indirect block (under the double indirect block in the resize inode).
809 * The new backup GDT blocks will be stored as leaf blocks in this indirect
810 * block, in group order. Even though we know all the block numbers we need,
811 * we check to ensure that the resize inode has actually reserved these blocks.
812 *
813 * Don't need to update the block bitmaps because the blocks are still in use.
814 *
815 * We get all of the error cases out of the way, so that we are sure to not
816 * fail once we start modifying the data on disk, because JBD has no rollback.
817 */
818static int add_new_gdb(handle_t *handle, struct inode *inode,
819 ext4_group_t group)
820{
821 struct super_block *sb = inode->i_sb;
822 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
823 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
824 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
825 struct buffer_head **o_group_desc, **n_group_desc = NULL;
826 struct buffer_head *dind = NULL;
827 struct buffer_head *gdb_bh = NULL;
828 int gdbackups;
829 struct ext4_iloc iloc = { .bh = NULL };
830 __le32 *data;
831 int err;
832
833 if (test_opt(sb, DEBUG))
834 printk(KERN_DEBUG
835 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
836 gdb_num);
837
838 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
839 if (IS_ERR(gdb_bh))
840 return PTR_ERR(gdb_bh);
841
842 gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
843 if (gdbackups < 0) {
844 err = gdbackups;
845 goto errout;
846 }
847
848 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
849 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
850 if (IS_ERR(dind)) {
851 err = PTR_ERR(dind);
852 dind = NULL;
853 goto errout;
854 }
855
856 data = (__le32 *)dind->b_data;
857 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
858 ext4_warning(sb, "new group %u GDT block %llu not reserved",
859 group, gdblock);
860 err = -EINVAL;
861 goto errout;
862 }
863
864 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
865 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
866 EXT4_JTR_NONE);
867 if (unlikely(err))
868 goto errout;
869
870 BUFFER_TRACE(gdb_bh, "get_write_access");
871 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
872 if (unlikely(err))
873 goto errout;
874
875 BUFFER_TRACE(dind, "get_write_access");
876 err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE);
877 if (unlikely(err)) {
878 ext4_std_error(sb, err);
879 goto errout;
880 }
881
882 /* ext4_reserve_inode_write() gets a reference on the iloc */
883 err = ext4_reserve_inode_write(handle, inode, &iloc);
884 if (unlikely(err))
885 goto errout;
886
887 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
888 GFP_KERNEL);
889 if (!n_group_desc) {
890 err = -ENOMEM;
891 ext4_warning(sb, "not enough memory for %lu groups",
892 gdb_num + 1);
893 goto errout;
894 }
895
896 /*
897 * Finally, we have all of the possible failures behind us...
898 *
899 * Remove new GDT block from inode double-indirect block and clear out
900 * the new GDT block for use (which also "frees" the backup GDT blocks
901 * from the reserved inode). We don't need to change the bitmaps for
902 * these blocks, because they are marked as in-use from being in the
903 * reserved inode, and will become GDT blocks (primary and backup).
904 */
905 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
906 err = ext4_handle_dirty_metadata(handle, NULL, dind);
907 if (unlikely(err)) {
908 ext4_std_error(sb, err);
909 goto errout;
910 }
911 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
912 (9 - EXT4_SB(sb)->s_cluster_bits);
913 ext4_mark_iloc_dirty(handle, inode, &iloc);
914 memset(gdb_bh->b_data, 0, sb->s_blocksize);
915 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
916 if (unlikely(err)) {
917 ext4_std_error(sb, err);
918 iloc.bh = NULL;
919 goto errout;
920 }
921 brelse(dind);
922
923 rcu_read_lock();
924 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
925 memcpy(n_group_desc, o_group_desc,
926 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
927 rcu_read_unlock();
928 n_group_desc[gdb_num] = gdb_bh;
929 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
930 EXT4_SB(sb)->s_gdb_count++;
931 ext4_kvfree_array_rcu(o_group_desc);
932
933 lock_buffer(EXT4_SB(sb)->s_sbh);
934 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
935 ext4_superblock_csum_set(sb);
936 unlock_buffer(EXT4_SB(sb)->s_sbh);
937 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
938 if (err)
939 ext4_std_error(sb, err);
940 return err;
941errout:
942 kvfree(n_group_desc);
943 brelse(iloc.bh);
944 brelse(dind);
945 brelse(gdb_bh);
946
947 ext4_debug("leaving with error %d\n", err);
948 return err;
949}
950
951/*
952 * If there is no available space in the existing block group descriptors for
953 * the new block group and there are no reserved block group descriptors, then
954 * the meta_bg feature will get enabled, and es->s_first_meta_bg will get set
955 * to the first block group that is managed using meta_bg and s_first_meta_bg
956 * must be a multiple of EXT4_DESC_PER_BLOCK(sb).
957 * This function will be called when first group of meta_bg is added to bring
958 * new group descriptors block of new added meta_bg.
959 */
960static int add_new_gdb_meta_bg(struct super_block *sb,
961 handle_t *handle, ext4_group_t group) {
962 ext4_fsblk_t gdblock;
963 struct buffer_head *gdb_bh;
964 struct buffer_head **o_group_desc, **n_group_desc;
965 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
966 int err;
967
968 gdblock = ext4_group_first_block_no(sb, group) +
969 ext4_bg_has_super(sb, group);
970 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
971 if (IS_ERR(gdb_bh))
972 return PTR_ERR(gdb_bh);
973 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
974 GFP_KERNEL);
975 if (!n_group_desc) {
976 brelse(gdb_bh);
977 err = -ENOMEM;
978 ext4_warning(sb, "not enough memory for %lu groups",
979 gdb_num + 1);
980 return err;
981 }
982
983 rcu_read_lock();
984 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
985 memcpy(n_group_desc, o_group_desc,
986 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
987 rcu_read_unlock();
988 n_group_desc[gdb_num] = gdb_bh;
989
990 BUFFER_TRACE(gdb_bh, "get_write_access");
991 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
992 if (err) {
993 kvfree(n_group_desc);
994 brelse(gdb_bh);
995 return err;
996 }
997
998 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
999 EXT4_SB(sb)->s_gdb_count++;
1000 ext4_kvfree_array_rcu(o_group_desc);
1001 return err;
1002}
1003
1004/*
1005 * Called when we are adding a new group which has a backup copy of each of
1006 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
1007 * We need to add these reserved backup GDT blocks to the resize inode, so
1008 * that they are kept for future resizing and not allocated to files.
1009 *
1010 * Each reserved backup GDT block will go into a different indirect block.
1011 * The indirect blocks are actually the primary reserved GDT blocks,
1012 * so we know in advance what their block numbers are. We only get the
1013 * double-indirect block to verify it is pointing to the primary reserved
1014 * GDT blocks so we don't overwrite a data block by accident. The reserved
1015 * backup GDT blocks are stored in their reserved primary GDT block.
1016 */
1017static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
1018 ext4_group_t group)
1019{
1020 struct super_block *sb = inode->i_sb;
1021 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
1022 int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
1023 struct buffer_head **primary;
1024 struct buffer_head *dind;
1025 struct ext4_iloc iloc;
1026 ext4_fsblk_t blk;
1027 __le32 *data, *end;
1028 int gdbackups = 0;
1029 int res, i;
1030 int err;
1031
1032 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
1033 if (!primary)
1034 return -ENOMEM;
1035
1036 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1037 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1038 if (IS_ERR(dind)) {
1039 err = PTR_ERR(dind);
1040 dind = NULL;
1041 goto exit_free;
1042 }
1043
1044 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1045 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1046 EXT4_ADDR_PER_BLOCK(sb));
1047 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1048
1049 /* Get each reserved primary GDT block and verify it holds backups */
1050 for (res = 0; res < reserved_gdb; res++, blk++) {
1051 if (le32_to_cpu(*data) != blk) {
1052 ext4_warning(sb, "reserved block %llu"
1053 " not at offset %ld",
1054 blk,
1055 (long)(data - (__le32 *)dind->b_data));
1056 err = -EINVAL;
1057 goto exit_bh;
1058 }
1059 primary[res] = ext4_sb_bread(sb, blk, 0);
1060 if (IS_ERR(primary[res])) {
1061 err = PTR_ERR(primary[res]);
1062 primary[res] = NULL;
1063 goto exit_bh;
1064 }
1065 gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1066 if (gdbackups < 0) {
1067 brelse(primary[res]);
1068 err = gdbackups;
1069 goto exit_bh;
1070 }
1071 if (++data >= end)
1072 data = (__le32 *)dind->b_data;
1073 }
1074
1075 for (i = 0; i < reserved_gdb; i++) {
1076 BUFFER_TRACE(primary[i], "get_write_access");
1077 if ((err = ext4_journal_get_write_access(handle, sb, primary[i],
1078 EXT4_JTR_NONE)))
1079 goto exit_bh;
1080 }
1081
1082 if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1083 goto exit_bh;
1084
1085 /*
1086 * Finally we can add each of the reserved backup GDT blocks from
1087 * the new group to its reserved primary GDT block.
1088 */
1089 blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1090 for (i = 0; i < reserved_gdb; i++) {
1091 int err2;
1092 data = (__le32 *)primary[i]->b_data;
1093 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1094 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1095 if (!err)
1096 err = err2;
1097 }
1098
1099 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1100 ext4_mark_iloc_dirty(handle, inode, &iloc);
1101
1102exit_bh:
1103 while (--res >= 0)
1104 brelse(primary[res]);
1105 brelse(dind);
1106
1107exit_free:
1108 kfree(primary);
1109
1110 return err;
1111}
1112
1113static inline void ext4_set_block_group_nr(struct super_block *sb, char *data,
1114 ext4_group_t group)
1115{
1116 struct ext4_super_block *es = (struct ext4_super_block *) data;
1117
1118 es->s_block_group_nr = cpu_to_le16(group);
1119 if (ext4_has_metadata_csum(sb))
1120 es->s_checksum = ext4_superblock_csum(sb, es);
1121}
1122
1123/*
1124 * Update the backup copies of the ext4 metadata. These don't need to be part
1125 * of the main resize transaction, because e2fsck will re-write them if there
1126 * is a problem (basically only OOM will cause a problem). However, we
1127 * _should_ update the backups if possible, in case the primary gets trashed
1128 * for some reason and we need to run e2fsck from a backup superblock. The
1129 * important part is that the new block and inode counts are in the backup
1130 * superblocks, and the location of the new group metadata in the GDT backups.
1131 *
1132 * We do not need take the s_resize_lock for this, because these
1133 * blocks are not otherwise touched by the filesystem code when it is
1134 * mounted. We don't need to worry about last changing from
1135 * sbi->s_groups_count, because the worst that can happen is that we
1136 * do not copy the full number of backups at this time. The resize
1137 * which changed s_groups_count will backup again.
1138 */
1139static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1140 int size, int meta_bg)
1141{
1142 struct ext4_sb_info *sbi = EXT4_SB(sb);
1143 ext4_group_t last;
1144 const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1145 unsigned three = 1;
1146 unsigned five = 5;
1147 unsigned seven = 7;
1148 ext4_group_t group = 0;
1149 int rest = sb->s_blocksize - size;
1150 handle_t *handle;
1151 int err = 0, err2;
1152
1153 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1154 if (IS_ERR(handle)) {
1155 group = 1;
1156 err = PTR_ERR(handle);
1157 goto exit_err;
1158 }
1159
1160 if (meta_bg == 0) {
1161 group = ext4_list_backups(sb, &three, &five, &seven);
1162 last = sbi->s_groups_count;
1163 } else {
1164 group = ext4_get_group_number(sb, blk_off) + 1;
1165 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1166 }
1167
1168 while (group < sbi->s_groups_count) {
1169 struct buffer_head *bh;
1170 ext4_fsblk_t backup_block;
1171 int has_super = ext4_bg_has_super(sb, group);
1172 ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group);
1173
1174 /* Out of journal space, and can't get more - abort - so sad */
1175 err = ext4_resize_ensure_credits_batch(handle, 1);
1176 if (err < 0)
1177 break;
1178
1179 if (meta_bg == 0)
1180 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1181 else
1182 backup_block = first_block + has_super;
1183
1184 bh = sb_getblk(sb, backup_block);
1185 if (unlikely(!bh)) {
1186 err = -ENOMEM;
1187 break;
1188 }
1189 ext4_debug("update metadata backup %llu(+%llu)\n",
1190 backup_block, backup_block -
1191 ext4_group_first_block_no(sb, group));
1192 BUFFER_TRACE(bh, "get_write_access");
1193 if ((err = ext4_journal_get_write_access(handle, sb, bh,
1194 EXT4_JTR_NONE))) {
1195 brelse(bh);
1196 break;
1197 }
1198 lock_buffer(bh);
1199 memcpy(bh->b_data, data, size);
1200 if (rest)
1201 memset(bh->b_data + size, 0, rest);
1202 if (has_super && (backup_block == first_block))
1203 ext4_set_block_group_nr(sb, bh->b_data, group);
1204 set_buffer_uptodate(bh);
1205 unlock_buffer(bh);
1206 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1207 if (unlikely(err))
1208 ext4_std_error(sb, err);
1209 brelse(bh);
1210
1211 if (meta_bg == 0)
1212 group = ext4_list_backups(sb, &three, &five, &seven);
1213 else if (group == last)
1214 break;
1215 else
1216 group = last;
1217 }
1218 if ((err2 = ext4_journal_stop(handle)) && !err)
1219 err = err2;
1220
1221 /*
1222 * Ugh! Need to have e2fsck write the backup copies. It is too
1223 * late to revert the resize, we shouldn't fail just because of
1224 * the backup copies (they are only needed in case of corruption).
1225 *
1226 * However, if we got here we have a journal problem too, so we
1227 * can't really start a transaction to mark the superblock.
1228 * Chicken out and just set the flag on the hope it will be written
1229 * to disk, and if not - we will simply wait until next fsck.
1230 */
1231exit_err:
1232 if (err) {
1233 ext4_warning(sb, "can't update backup for group %u (err %d), "
1234 "forcing fsck on next reboot", group, err);
1235 sbi->s_mount_state &= ~EXT4_VALID_FS;
1236 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1237 mark_buffer_dirty(sbi->s_sbh);
1238 }
1239}
1240
1241/*
1242 * ext4_add_new_descs() adds @count group descriptor of groups
1243 * starting at @group
1244 *
1245 * @handle: journal handle
1246 * @sb: super block
1247 * @group: the group no. of the first group desc to be added
1248 * @resize_inode: the resize inode
1249 * @count: number of group descriptors to be added
1250 */
1251static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1252 ext4_group_t group, struct inode *resize_inode,
1253 ext4_group_t count)
1254{
1255 struct ext4_sb_info *sbi = EXT4_SB(sb);
1256 struct ext4_super_block *es = sbi->s_es;
1257 struct buffer_head *gdb_bh;
1258 int i, gdb_off, gdb_num, err = 0;
1259 int meta_bg;
1260
1261 meta_bg = ext4_has_feature_meta_bg(sb);
1262 for (i = 0; i < count; i++, group++) {
1263 int reserved_gdb = ext4_bg_has_super(sb, group) ?
1264 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1265
1266 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1267 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1268
1269 /*
1270 * We will only either add reserved group blocks to a backup group
1271 * or remove reserved blocks for the first group in a new group block.
1272 * Doing both would be mean more complex code, and sane people don't
1273 * use non-sparse filesystems anymore. This is already checked above.
1274 */
1275 if (gdb_off) {
1276 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1277 gdb_num);
1278 BUFFER_TRACE(gdb_bh, "get_write_access");
1279 err = ext4_journal_get_write_access(handle, sb, gdb_bh,
1280 EXT4_JTR_NONE);
1281
1282 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1283 err = reserve_backup_gdb(handle, resize_inode, group);
1284 } else if (meta_bg != 0) {
1285 err = add_new_gdb_meta_bg(sb, handle, group);
1286 } else {
1287 err = add_new_gdb(handle, resize_inode, group);
1288 }
1289 if (err)
1290 break;
1291 }
1292 return err;
1293}
1294
1295static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1296{
1297 struct buffer_head *bh = sb_getblk(sb, block);
1298 if (unlikely(!bh))
1299 return NULL;
1300 if (!bh_uptodate_or_lock(bh)) {
1301 if (ext4_read_bh(bh, 0, NULL) < 0) {
1302 brelse(bh);
1303 return NULL;
1304 }
1305 }
1306
1307 return bh;
1308}
1309
1310static int ext4_set_bitmap_checksums(struct super_block *sb,
1311 struct ext4_group_desc *gdp,
1312 struct ext4_new_group_data *group_data)
1313{
1314 struct buffer_head *bh;
1315
1316 if (!ext4_has_metadata_csum(sb))
1317 return 0;
1318
1319 bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1320 if (!bh)
1321 return -EIO;
1322 ext4_inode_bitmap_csum_set(sb, gdp, bh,
1323 EXT4_INODES_PER_GROUP(sb) / 8);
1324 brelse(bh);
1325
1326 bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1327 if (!bh)
1328 return -EIO;
1329 ext4_block_bitmap_csum_set(sb, gdp, bh);
1330 brelse(bh);
1331
1332 return 0;
1333}
1334
1335/*
1336 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1337 */
1338static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1339 struct ext4_new_flex_group_data *flex_gd)
1340{
1341 struct ext4_new_group_data *group_data = flex_gd->groups;
1342 struct ext4_group_desc *gdp;
1343 struct ext4_sb_info *sbi = EXT4_SB(sb);
1344 struct buffer_head *gdb_bh;
1345 ext4_group_t group;
1346 __u16 *bg_flags = flex_gd->bg_flags;
1347 int i, gdb_off, gdb_num, err = 0;
1348
1349
1350 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1351 group = group_data->group;
1352
1353 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1354 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1355
1356 /*
1357 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1358 */
1359 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1360 /* Update group descriptor block for new group */
1361 gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1362 gdb_off * EXT4_DESC_SIZE(sb));
1363
1364 memset(gdp, 0, EXT4_DESC_SIZE(sb));
1365 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1366 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1367 err = ext4_set_bitmap_checksums(sb, gdp, group_data);
1368 if (err) {
1369 ext4_std_error(sb, err);
1370 break;
1371 }
1372
1373 ext4_inode_table_set(sb, gdp, group_data->inode_table);
1374 ext4_free_group_clusters_set(sb, gdp,
1375 group_data->free_clusters_count);
1376 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1377 if (ext4_has_group_desc_csum(sb))
1378 ext4_itable_unused_set(sb, gdp,
1379 EXT4_INODES_PER_GROUP(sb));
1380 gdp->bg_flags = cpu_to_le16(*bg_flags);
1381 ext4_group_desc_csum_set(sb, group, gdp);
1382
1383 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1384 if (unlikely(err)) {
1385 ext4_std_error(sb, err);
1386 break;
1387 }
1388
1389 /*
1390 * We can allocate memory for mb_alloc based on the new group
1391 * descriptor
1392 */
1393 err = ext4_mb_add_groupinfo(sb, group, gdp);
1394 if (err)
1395 break;
1396 }
1397 return err;
1398}
1399
1400static void ext4_add_overhead(struct super_block *sb,
1401 const ext4_fsblk_t overhead)
1402{
1403 struct ext4_sb_info *sbi = EXT4_SB(sb);
1404 struct ext4_super_block *es = sbi->s_es;
1405
1406 sbi->s_overhead += overhead;
1407 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1408 smp_wmb();
1409}
1410
1411/*
1412 * ext4_update_super() updates the super block so that the newly added
1413 * groups can be seen by the filesystem.
1414 *
1415 * @sb: super block
1416 * @flex_gd: new added groups
1417 */
1418static void ext4_update_super(struct super_block *sb,
1419 struct ext4_new_flex_group_data *flex_gd)
1420{
1421 ext4_fsblk_t blocks_count = 0;
1422 ext4_fsblk_t free_blocks = 0;
1423 ext4_fsblk_t reserved_blocks = 0;
1424 struct ext4_new_group_data *group_data = flex_gd->groups;
1425 struct ext4_sb_info *sbi = EXT4_SB(sb);
1426 struct ext4_super_block *es = sbi->s_es;
1427 int i;
1428
1429 BUG_ON(flex_gd->count == 0 || group_data == NULL);
1430 /*
1431 * Make the new blocks and inodes valid next. We do this before
1432 * increasing the group count so that once the group is enabled,
1433 * all of its blocks and inodes are already valid.
1434 *
1435 * We always allocate group-by-group, then block-by-block or
1436 * inode-by-inode within a group, so enabling these
1437 * blocks/inodes before the group is live won't actually let us
1438 * allocate the new space yet.
1439 */
1440 for (i = 0; i < flex_gd->count; i++) {
1441 blocks_count += group_data[i].blocks_count;
1442 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1443 }
1444
1445 reserved_blocks = ext4_r_blocks_count(es) * 100;
1446 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1447 reserved_blocks *= blocks_count;
1448 do_div(reserved_blocks, 100);
1449
1450 lock_buffer(sbi->s_sbh);
1451 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1452 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1453 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1454 flex_gd->count);
1455 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1456 flex_gd->count);
1457
1458 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1459 /*
1460 * We need to protect s_groups_count against other CPUs seeing
1461 * inconsistent state in the superblock.
1462 *
1463 * The precise rules we use are:
1464 *
1465 * * Writers must perform a smp_wmb() after updating all
1466 * dependent data and before modifying the groups count
1467 *
1468 * * Readers must perform an smp_rmb() after reading the groups
1469 * count and before reading any dependent data.
1470 *
1471 * NB. These rules can be relaxed when checking the group count
1472 * while freeing data, as we can only allocate from a block
1473 * group after serialising against the group count, and we can
1474 * only then free after serialising in turn against that
1475 * allocation.
1476 */
1477 smp_wmb();
1478
1479 /* Update the global fs size fields */
1480 sbi->s_groups_count += flex_gd->count;
1481 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1482 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1483
1484 /* Update the reserved block counts only once the new group is
1485 * active. */
1486 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1487 reserved_blocks);
1488
1489 /* Update the free space counts */
1490 percpu_counter_add(&sbi->s_freeclusters_counter,
1491 EXT4_NUM_B2C(sbi, free_blocks));
1492 percpu_counter_add(&sbi->s_freeinodes_counter,
1493 EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1494
1495 ext4_debug("free blocks count %llu",
1496 percpu_counter_read(&sbi->s_freeclusters_counter));
1497 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1498 ext4_group_t flex_group;
1499 struct flex_groups *fg;
1500
1501 flex_group = ext4_flex_group(sbi, group_data[0].group);
1502 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1503 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1504 &fg->free_clusters);
1505 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1506 &fg->free_inodes);
1507 }
1508
1509 /*
1510 * Update the fs overhead information.
1511 *
1512 * For bigalloc, if the superblock already has a properly calculated
1513 * overhead, update it with a value based on numbers already computed
1514 * above for the newly allocated capacity.
1515 */
1516 if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0))
1517 ext4_add_overhead(sb,
1518 EXT4_NUM_B2C(sbi, blocks_count - free_blocks));
1519 else
1520 ext4_calculate_overhead(sb);
1521 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1522
1523 ext4_superblock_csum_set(sb);
1524 unlock_buffer(sbi->s_sbh);
1525 if (test_opt(sb, DEBUG))
1526 printk(KERN_DEBUG "EXT4-fs: added group %u:"
1527 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1528 blocks_count, free_blocks, reserved_blocks);
1529}
1530
1531/* Add a flex group to an fs. Ensure we handle all possible error conditions
1532 * _before_ we start modifying the filesystem, because we cannot abort the
1533 * transaction and not have it write the data to disk.
1534 */
1535static int ext4_flex_group_add(struct super_block *sb,
1536 struct inode *resize_inode,
1537 struct ext4_new_flex_group_data *flex_gd)
1538{
1539 struct ext4_sb_info *sbi = EXT4_SB(sb);
1540 struct ext4_super_block *es = sbi->s_es;
1541 ext4_fsblk_t o_blocks_count;
1542 ext4_grpblk_t last;
1543 ext4_group_t group;
1544 handle_t *handle;
1545 unsigned reserved_gdb;
1546 int err = 0, err2 = 0, credit;
1547
1548 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1549
1550 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1551 o_blocks_count = ext4_blocks_count(es);
1552 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1553 BUG_ON(last);
1554
1555 err = setup_new_flex_group_blocks(sb, flex_gd);
1556 if (err)
1557 goto exit;
1558 /*
1559 * We will always be modifying at least the superblock and GDT
1560 * blocks. If we are adding a group past the last current GDT block,
1561 * we will also modify the inode and the dindirect block. If we
1562 * are adding a group with superblock/GDT backups we will also
1563 * modify each of the reserved GDT dindirect blocks.
1564 */
1565 credit = 3; /* sb, resize inode, resize inode dindirect */
1566 /* GDT blocks */
1567 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1568 credit += reserved_gdb; /* Reserved GDT dindirect blocks */
1569 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1570 if (IS_ERR(handle)) {
1571 err = PTR_ERR(handle);
1572 goto exit;
1573 }
1574
1575 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1576 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1577 EXT4_JTR_NONE);
1578 if (err)
1579 goto exit_journal;
1580
1581 group = flex_gd->groups[0].group;
1582 BUG_ON(group != sbi->s_groups_count);
1583 err = ext4_add_new_descs(handle, sb, group,
1584 resize_inode, flex_gd->count);
1585 if (err)
1586 goto exit_journal;
1587
1588 err = ext4_setup_new_descs(handle, sb, flex_gd);
1589 if (err)
1590 goto exit_journal;
1591
1592 ext4_update_super(sb, flex_gd);
1593
1594 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1595
1596exit_journal:
1597 err2 = ext4_journal_stop(handle);
1598 if (!err)
1599 err = err2;
1600
1601 if (!err) {
1602 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1603 int gdb_num_end = ((group + flex_gd->count - 1) /
1604 EXT4_DESC_PER_BLOCK(sb));
1605 int meta_bg = ext4_has_feature_meta_bg(sb);
1606 sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
1607 ext4_group_first_block_no(sb, 0);
1608
1609 update_backups(sb, ext4_group_first_block_no(sb, 0),
1610 (char *)es, sizeof(struct ext4_super_block), 0);
1611 for (; gdb_num <= gdb_num_end; gdb_num++) {
1612 struct buffer_head *gdb_bh;
1613
1614 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1615 gdb_num);
1616 update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
1617 gdb_bh->b_data, gdb_bh->b_size, meta_bg);
1618 }
1619 }
1620exit:
1621 return err;
1622}
1623
1624static int ext4_setup_next_flex_gd(struct super_block *sb,
1625 struct ext4_new_flex_group_data *flex_gd,
1626 ext4_fsblk_t n_blocks_count)
1627{
1628 struct ext4_sb_info *sbi = EXT4_SB(sb);
1629 struct ext4_super_block *es = sbi->s_es;
1630 struct ext4_new_group_data *group_data = flex_gd->groups;
1631 ext4_fsblk_t o_blocks_count;
1632 ext4_group_t n_group;
1633 ext4_group_t group;
1634 ext4_group_t last_group;
1635 ext4_grpblk_t last;
1636 ext4_grpblk_t clusters_per_group;
1637 unsigned long i;
1638
1639 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1640
1641 o_blocks_count = ext4_blocks_count(es);
1642
1643 if (o_blocks_count == n_blocks_count)
1644 return 0;
1645
1646 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1647 BUG_ON(last);
1648 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1649
1650 last_group = group | (flex_gd->resize_bg - 1);
1651 if (last_group > n_group)
1652 last_group = n_group;
1653
1654 flex_gd->count = last_group - group + 1;
1655
1656 for (i = 0; i < flex_gd->count; i++) {
1657 int overhead;
1658
1659 group_data[i].group = group + i;
1660 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1661 overhead = ext4_group_overhead_blocks(sb, group + i);
1662 group_data[i].mdata_blocks = overhead;
1663 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1664 if (ext4_has_group_desc_csum(sb)) {
1665 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1666 EXT4_BG_INODE_UNINIT;
1667 if (!test_opt(sb, INIT_INODE_TABLE))
1668 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1669 } else
1670 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1671 }
1672
1673 if (last_group == n_group && ext4_has_group_desc_csum(sb))
1674 /* We need to initialize block bitmap of last group. */
1675 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1676
1677 if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1678 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1679 group_data[i - 1].free_clusters_count -= clusters_per_group -
1680 last - 1;
1681 }
1682
1683 return 1;
1684}
1685
1686/* Add group descriptor data to an existing or new group descriptor block.
1687 * Ensure we handle all possible error conditions _before_ we start modifying
1688 * the filesystem, because we cannot abort the transaction and not have it
1689 * write the data to disk.
1690 *
1691 * If we are on a GDT block boundary, we need to get the reserved GDT block.
1692 * Otherwise, we may need to add backup GDT blocks for a sparse group.
1693 *
1694 * We only need to hold the superblock lock while we are actually adding
1695 * in the new group's counts to the superblock. Prior to that we have
1696 * not really "added" the group at all. We re-check that we are still
1697 * adding in the last group in case things have changed since verifying.
1698 */
1699int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1700{
1701 struct ext4_new_flex_group_data flex_gd;
1702 struct ext4_sb_info *sbi = EXT4_SB(sb);
1703 struct ext4_super_block *es = sbi->s_es;
1704 int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1705 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1706 struct inode *inode = NULL;
1707 int gdb_off;
1708 int err;
1709 __u16 bg_flags = 0;
1710
1711 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1712
1713 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1714 ext4_warning(sb, "Can't resize non-sparse filesystem further");
1715 return -EPERM;
1716 }
1717
1718 if (ext4_blocks_count(es) + input->blocks_count <
1719 ext4_blocks_count(es)) {
1720 ext4_warning(sb, "blocks_count overflow");
1721 return -EINVAL;
1722 }
1723
1724 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1725 le32_to_cpu(es->s_inodes_count)) {
1726 ext4_warning(sb, "inodes_count overflow");
1727 return -EINVAL;
1728 }
1729
1730 if (reserved_gdb || gdb_off == 0) {
1731 if (!ext4_has_feature_resize_inode(sb) ||
1732 !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1733 ext4_warning(sb,
1734 "No reserved GDT blocks, can't resize");
1735 return -EPERM;
1736 }
1737 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1738 if (IS_ERR(inode)) {
1739 ext4_warning(sb, "Error opening resize inode");
1740 return PTR_ERR(inode);
1741 }
1742 }
1743
1744
1745 err = verify_group_input(sb, input);
1746 if (err)
1747 goto out;
1748
1749 err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1750 if (err)
1751 goto out;
1752
1753 err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1754 if (err)
1755 goto out;
1756
1757 flex_gd.count = 1;
1758 flex_gd.groups = input;
1759 flex_gd.bg_flags = &bg_flags;
1760 err = ext4_flex_group_add(sb, inode, &flex_gd);
1761out:
1762 iput(inode);
1763 return err;
1764} /* ext4_group_add */
1765
1766/*
1767 * extend a group without checking assuming that checking has been done.
1768 */
1769static int ext4_group_extend_no_check(struct super_block *sb,
1770 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1771{
1772 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1773 handle_t *handle;
1774 int err = 0, err2;
1775
1776 /* We will update the superblock, one block bitmap, and
1777 * one group descriptor via ext4_group_add_blocks().
1778 */
1779 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1780 if (IS_ERR(handle)) {
1781 err = PTR_ERR(handle);
1782 ext4_warning(sb, "error %d on journal start", err);
1783 return err;
1784 }
1785
1786 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1787 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
1788 EXT4_JTR_NONE);
1789 if (err) {
1790 ext4_warning(sb, "error %d on journal write access", err);
1791 goto errout;
1792 }
1793
1794 lock_buffer(EXT4_SB(sb)->s_sbh);
1795 ext4_blocks_count_set(es, o_blocks_count + add);
1796 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1797 ext4_superblock_csum_set(sb);
1798 unlock_buffer(EXT4_SB(sb)->s_sbh);
1799 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1800 o_blocks_count + add);
1801 /* We add the blocks to the bitmap and set the group need init bit */
1802 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1803 if (err)
1804 goto errout;
1805 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1806 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1807 o_blocks_count + add);
1808errout:
1809 err2 = ext4_journal_stop(handle);
1810 if (err2 && !err)
1811 err = err2;
1812
1813 if (!err) {
1814 if (test_opt(sb, DEBUG))
1815 printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1816 "blocks\n", ext4_blocks_count(es));
1817 update_backups(sb, ext4_group_first_block_no(sb, 0),
1818 (char *)es, sizeof(struct ext4_super_block), 0);
1819 }
1820 return err;
1821}
1822
1823/*
1824 * Extend the filesystem to the new number of blocks specified. This entry
1825 * point is only used to extend the current filesystem to the end of the last
1826 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
1827 * for emergencies (because it has no dependencies on reserved blocks).
1828 *
1829 * If we _really_ wanted, we could use default values to call ext4_group_add()
1830 * allow the "remount" trick to work for arbitrary resizing, assuming enough
1831 * GDT blocks are reserved to grow to the desired size.
1832 */
1833int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1834 ext4_fsblk_t n_blocks_count)
1835{
1836 ext4_fsblk_t o_blocks_count;
1837 ext4_grpblk_t last;
1838 ext4_grpblk_t add;
1839 struct buffer_head *bh;
1840 ext4_group_t group;
1841
1842 o_blocks_count = ext4_blocks_count(es);
1843
1844 if (test_opt(sb, DEBUG))
1845 ext4_msg(sb, KERN_DEBUG,
1846 "extending last group from %llu to %llu blocks",
1847 o_blocks_count, n_blocks_count);
1848
1849 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1850 return 0;
1851
1852 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1853 ext4_msg(sb, KERN_ERR,
1854 "filesystem too large to resize to %llu blocks safely",
1855 n_blocks_count);
1856 return -EINVAL;
1857 }
1858
1859 if (n_blocks_count < o_blocks_count) {
1860 ext4_warning(sb, "can't shrink FS - resize aborted");
1861 return -EINVAL;
1862 }
1863
1864 /* Handle the remaining blocks in the last group only. */
1865 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1866
1867 if (last == 0) {
1868 ext4_warning(sb, "need to use ext2online to resize further");
1869 return -EPERM;
1870 }
1871
1872 add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1873
1874 if (o_blocks_count + add < o_blocks_count) {
1875 ext4_warning(sb, "blocks_count overflow");
1876 return -EINVAL;
1877 }
1878
1879 if (o_blocks_count + add > n_blocks_count)
1880 add = n_blocks_count - o_blocks_count;
1881
1882 if (o_blocks_count + add < n_blocks_count)
1883 ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1884 o_blocks_count + add, add);
1885
1886 /* See if the device is actually as big as what was requested */
1887 bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
1888 if (IS_ERR(bh)) {
1889 ext4_warning(sb, "can't read last block, resize aborted");
1890 return -ENOSPC;
1891 }
1892 brelse(bh);
1893
1894 return ext4_group_extend_no_check(sb, o_blocks_count, add);
1895} /* ext4_group_extend */
1896
1897
1898static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1899{
1900 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1901}
1902
1903/*
1904 * Release the resize inode and drop the resize_inode feature if there
1905 * are no more reserved gdt blocks, and then convert the file system
1906 * to enable meta_bg
1907 */
1908static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1909{
1910 handle_t *handle;
1911 struct ext4_sb_info *sbi = EXT4_SB(sb);
1912 struct ext4_super_block *es = sbi->s_es;
1913 struct ext4_inode_info *ei = EXT4_I(inode);
1914 ext4_fsblk_t nr;
1915 int i, ret, err = 0;
1916 int credits = 1;
1917
1918 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1919 if (inode) {
1920 if (es->s_reserved_gdt_blocks) {
1921 ext4_error(sb, "Unexpected non-zero "
1922 "s_reserved_gdt_blocks");
1923 return -EPERM;
1924 }
1925
1926 /* Do a quick sanity check of the resize inode */
1927 if (inode->i_blocks != 1 << (inode->i_blkbits -
1928 (9 - sbi->s_cluster_bits)))
1929 goto invalid_resize_inode;
1930 for (i = 0; i < EXT4_N_BLOCKS; i++) {
1931 if (i == EXT4_DIND_BLOCK) {
1932 if (ei->i_data[i])
1933 continue;
1934 else
1935 goto invalid_resize_inode;
1936 }
1937 if (ei->i_data[i])
1938 goto invalid_resize_inode;
1939 }
1940 credits += 3; /* block bitmap, bg descriptor, resize inode */
1941 }
1942
1943 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1944 if (IS_ERR(handle))
1945 return PTR_ERR(handle);
1946
1947 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1948 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1949 EXT4_JTR_NONE);
1950 if (err)
1951 goto errout;
1952
1953 lock_buffer(sbi->s_sbh);
1954 ext4_clear_feature_resize_inode(sb);
1955 ext4_set_feature_meta_bg(sb);
1956 sbi->s_es->s_first_meta_bg =
1957 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1958 ext4_superblock_csum_set(sb);
1959 unlock_buffer(sbi->s_sbh);
1960
1961 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1962 if (err) {
1963 ext4_std_error(sb, err);
1964 goto errout;
1965 }
1966
1967 if (inode) {
1968 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1969 ext4_free_blocks(handle, inode, NULL, nr, 1,
1970 EXT4_FREE_BLOCKS_METADATA |
1971 EXT4_FREE_BLOCKS_FORGET);
1972 ei->i_data[EXT4_DIND_BLOCK] = 0;
1973 inode->i_blocks = 0;
1974
1975 err = ext4_mark_inode_dirty(handle, inode);
1976 if (err)
1977 ext4_std_error(sb, err);
1978 }
1979
1980errout:
1981 ret = ext4_journal_stop(handle);
1982 return err ? err : ret;
1983
1984invalid_resize_inode:
1985 ext4_error(sb, "corrupted/inconsistent resize inode");
1986 return -EINVAL;
1987}
1988
1989/*
1990 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1991 *
1992 * @sb: super block of the fs to be resized
1993 * @n_blocks_count: the number of blocks resides in the resized fs
1994 */
1995int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1996{
1997 struct ext4_new_flex_group_data *flex_gd = NULL;
1998 struct ext4_sb_info *sbi = EXT4_SB(sb);
1999 struct ext4_super_block *es = sbi->s_es;
2000 struct buffer_head *bh;
2001 struct inode *resize_inode = NULL;
2002 ext4_grpblk_t add, offset;
2003 unsigned long n_desc_blocks;
2004 unsigned long o_desc_blocks;
2005 ext4_group_t o_group;
2006 ext4_group_t n_group;
2007 ext4_fsblk_t o_blocks_count;
2008 ext4_fsblk_t n_blocks_count_retry = 0;
2009 unsigned long last_update_time = 0;
2010 int err = 0;
2011 int meta_bg;
2012 unsigned int flexbg_size = ext4_flex_bg_size(sbi);
2013
2014 /* See if the device is actually as big as what was requested */
2015 bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
2016 if (IS_ERR(bh)) {
2017 ext4_warning(sb, "can't read last block, resize aborted");
2018 return -ENOSPC;
2019 }
2020 brelse(bh);
2021
2022 /*
2023 * For bigalloc, trim the requested size to the nearest cluster
2024 * boundary to avoid creating an unusable filesystem. We do this
2025 * silently, instead of returning an error, to avoid breaking
2026 * callers that blindly resize the filesystem to the full size of
2027 * the underlying block device.
2028 */
2029 if (ext4_has_feature_bigalloc(sb))
2030 n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
2031
2032retry:
2033 o_blocks_count = ext4_blocks_count(es);
2034
2035 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
2036 "to %llu blocks", o_blocks_count, n_blocks_count);
2037
2038 if (n_blocks_count < o_blocks_count) {
2039 /* On-line shrinking not supported */
2040 ext4_warning(sb, "can't shrink FS - resize aborted");
2041 return -EINVAL;
2042 }
2043
2044 if (n_blocks_count == o_blocks_count)
2045 /* Nothing need to do */
2046 return 0;
2047
2048 n_group = ext4_get_group_number(sb, n_blocks_count - 1);
2049 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
2050 ext4_warning(sb, "resize would cause inodes_count overflow");
2051 return -EINVAL;
2052 }
2053 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
2054
2055 n_desc_blocks = num_desc_blocks(sb, n_group + 1);
2056 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
2057
2058 meta_bg = ext4_has_feature_meta_bg(sb);
2059
2060 if (ext4_has_feature_resize_inode(sb)) {
2061 if (meta_bg) {
2062 ext4_error(sb, "resize_inode and meta_bg enabled "
2063 "simultaneously");
2064 return -EINVAL;
2065 }
2066 if (n_desc_blocks > o_desc_blocks +
2067 le16_to_cpu(es->s_reserved_gdt_blocks)) {
2068 n_blocks_count_retry = n_blocks_count;
2069 n_desc_blocks = o_desc_blocks +
2070 le16_to_cpu(es->s_reserved_gdt_blocks);
2071 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2072 n_blocks_count = (ext4_fsblk_t)n_group *
2073 EXT4_BLOCKS_PER_GROUP(sb) +
2074 le32_to_cpu(es->s_first_data_block);
2075 n_group--; /* set to last group number */
2076 }
2077
2078 if (!resize_inode)
2079 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2080 EXT4_IGET_SPECIAL);
2081 if (IS_ERR(resize_inode)) {
2082 ext4_warning(sb, "Error opening resize inode");
2083 return PTR_ERR(resize_inode);
2084 }
2085 }
2086
2087 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
2088 err = ext4_convert_meta_bg(sb, resize_inode);
2089 if (err)
2090 goto out;
2091 if (resize_inode) {
2092 iput(resize_inode);
2093 resize_inode = NULL;
2094 }
2095 if (n_blocks_count_retry) {
2096 n_blocks_count = n_blocks_count_retry;
2097 n_blocks_count_retry = 0;
2098 goto retry;
2099 }
2100 }
2101
2102 /*
2103 * Make sure the last group has enough space so that it's
2104 * guaranteed to have enough space for all metadata blocks
2105 * that it might need to hold. (We might not need to store
2106 * the inode table blocks in the last block group, but there
2107 * will be cases where this might be needed.)
2108 */
2109 if ((ext4_group_first_block_no(sb, n_group) +
2110 ext4_group_overhead_blocks(sb, n_group) + 2 +
2111 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2112 n_blocks_count = ext4_group_first_block_no(sb, n_group);
2113 n_group--;
2114 n_blocks_count_retry = 0;
2115 if (resize_inode) {
2116 iput(resize_inode);
2117 resize_inode = NULL;
2118 }
2119 goto retry;
2120 }
2121
2122 /* extend the last group */
2123 if (n_group == o_group)
2124 add = n_blocks_count - o_blocks_count;
2125 else
2126 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2127 if (add > 0) {
2128 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2129 if (err)
2130 goto out;
2131 }
2132
2133 if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0)
2134 goto out;
2135
2136 err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2137 if (err)
2138 goto out;
2139
2140 err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2141 if (err)
2142 goto out;
2143
2144 flex_gd = alloc_flex_gd(flexbg_size, o_group, n_group);
2145 if (flex_gd == NULL) {
2146 err = -ENOMEM;
2147 goto out;
2148 }
2149
2150 /* Add flex groups. Note that a regular group is a
2151 * flex group with 1 group.
2152 */
2153 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) {
2154 if (time_is_before_jiffies(last_update_time + HZ * 10)) {
2155 if (last_update_time)
2156 ext4_msg(sb, KERN_INFO,
2157 "resized to %llu blocks",
2158 ext4_blocks_count(es));
2159 last_update_time = jiffies;
2160 }
2161 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2162 break;
2163 err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2164 if (unlikely(err))
2165 break;
2166 }
2167
2168 if (!err && n_blocks_count_retry) {
2169 n_blocks_count = n_blocks_count_retry;
2170 n_blocks_count_retry = 0;
2171 free_flex_gd(flex_gd);
2172 flex_gd = NULL;
2173 if (resize_inode) {
2174 iput(resize_inode);
2175 resize_inode = NULL;
2176 }
2177 goto retry;
2178 }
2179
2180out:
2181 if (flex_gd)
2182 free_flex_gd(flex_gd);
2183 if (resize_inode != NULL)
2184 iput(resize_inode);
2185 if (err)
2186 ext4_warning(sb, "error (%d) occurred during "
2187 "file system resize", err);
2188 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2189 ext4_blocks_count(es));
2190 return err;
2191}