Loading...
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * resize.c
5 *
6 * volume resize.
7 * Inspired by ext3/resize.c.
8 *
9 * Copyright (C) 2007 Oracle. All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write to the
23 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
24 * Boston, MA 021110-1307, USA.
25 */
26
27#include <linux/fs.h>
28#include <linux/types.h>
29
30#include <cluster/masklog.h>
31
32#include "ocfs2.h"
33
34#include "alloc.h"
35#include "dlmglue.h"
36#include "inode.h"
37#include "journal.h"
38#include "super.h"
39#include "sysfile.h"
40#include "uptodate.h"
41#include "ocfs2_trace.h"
42
43#include "buffer_head_io.h"
44#include "suballoc.h"
45#include "resize.h"
46
47/*
48 * Check whether there are new backup superblocks exist
49 * in the last group. If there are some, mark them or clear
50 * them in the bitmap.
51 *
52 * Return how many backups we find in the last group.
53 */
54static u16 ocfs2_calc_new_backup_super(struct inode *inode,
55 struct ocfs2_group_desc *gd,
56 int new_clusters,
57 u32 first_new_cluster,
58 u16 cl_cpg,
59 int set)
60{
61 int i;
62 u16 backups = 0;
63 u32 cluster;
64 u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno);
65
66 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
67 blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
68 cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
69
70 gd_blkno = ocfs2_which_cluster_group(inode, cluster);
71 if (gd_blkno < lgd_blkno)
72 continue;
73 else if (gd_blkno > lgd_blkno)
74 break;
75
76 if (set)
77 ocfs2_set_bit(cluster % cl_cpg,
78 (unsigned long *)gd->bg_bitmap);
79 else
80 ocfs2_clear_bit(cluster % cl_cpg,
81 (unsigned long *)gd->bg_bitmap);
82 backups++;
83 }
84
85 return backups;
86}
87
88static int ocfs2_update_last_group_and_inode(handle_t *handle,
89 struct inode *bm_inode,
90 struct buffer_head *bm_bh,
91 struct buffer_head *group_bh,
92 u32 first_new_cluster,
93 int new_clusters)
94{
95 int ret = 0;
96 struct ocfs2_super *osb = OCFS2_SB(bm_inode->i_sb);
97 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bm_bh->b_data;
98 struct ocfs2_chain_list *cl = &fe->id2.i_chain;
99 struct ocfs2_chain_rec *cr;
100 struct ocfs2_group_desc *group;
101 u16 chain, num_bits, backups = 0;
102 u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
103 u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
104
105 trace_ocfs2_update_last_group_and_inode(new_clusters,
106 first_new_cluster);
107
108 ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
109 group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
110 if (ret < 0) {
111 mlog_errno(ret);
112 goto out;
113 }
114
115 group = (struct ocfs2_group_desc *)group_bh->b_data;
116
117 /* update the group first. */
118 num_bits = new_clusters * cl_bpc;
119 le16_add_cpu(&group->bg_bits, num_bits);
120 le16_add_cpu(&group->bg_free_bits_count, num_bits);
121
122 /*
123 * check whether there are some new backup superblocks exist in
124 * this group and update the group bitmap accordingly.
125 */
126 if (OCFS2_HAS_COMPAT_FEATURE(osb->sb,
127 OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
128 backups = ocfs2_calc_new_backup_super(bm_inode,
129 group,
130 new_clusters,
131 first_new_cluster,
132 cl_cpg, 1);
133 le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
134 }
135
136 ocfs2_journal_dirty(handle, group_bh);
137
138 /* update the inode accordingly. */
139 ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh,
140 OCFS2_JOURNAL_ACCESS_WRITE);
141 if (ret < 0) {
142 mlog_errno(ret);
143 goto out_rollback;
144 }
145
146 chain = le16_to_cpu(group->bg_chain);
147 cr = (&cl->cl_recs[chain]);
148 le32_add_cpu(&cr->c_total, num_bits);
149 le32_add_cpu(&cr->c_free, num_bits);
150 le32_add_cpu(&fe->id1.bitmap1.i_total, num_bits);
151 le32_add_cpu(&fe->i_clusters, new_clusters);
152
153 if (backups) {
154 le32_add_cpu(&cr->c_free, -1 * backups);
155 le32_add_cpu(&fe->id1.bitmap1.i_used, backups);
156 }
157
158 spin_lock(&OCFS2_I(bm_inode)->ip_lock);
159 OCFS2_I(bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
160 le64_add_cpu(&fe->i_size, new_clusters << osb->s_clustersize_bits);
161 spin_unlock(&OCFS2_I(bm_inode)->ip_lock);
162 i_size_write(bm_inode, le64_to_cpu(fe->i_size));
163
164 ocfs2_journal_dirty(handle, bm_bh);
165
166out_rollback:
167 if (ret < 0) {
168 ocfs2_calc_new_backup_super(bm_inode,
169 group,
170 new_clusters,
171 first_new_cluster,
172 cl_cpg, 0);
173 le16_add_cpu(&group->bg_free_bits_count, backups);
174 le16_add_cpu(&group->bg_bits, -1 * num_bits);
175 le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
176 }
177out:
178 if (ret)
179 mlog_errno(ret);
180 return ret;
181}
182
183static int update_backups(struct inode * inode, u32 clusters, char *data)
184{
185 int i, ret = 0;
186 u32 cluster;
187 u64 blkno;
188 struct buffer_head *backup = NULL;
189 struct ocfs2_dinode *backup_di = NULL;
190 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
191
192 /* calculate the real backups we need to update. */
193 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
194 blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
195 cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
196 if (cluster > clusters)
197 break;
198
199 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &backup);
200 if (ret < 0) {
201 mlog_errno(ret);
202 break;
203 }
204
205 memcpy(backup->b_data, data, inode->i_sb->s_blocksize);
206
207 backup_di = (struct ocfs2_dinode *)backup->b_data;
208 backup_di->i_blkno = cpu_to_le64(blkno);
209
210 ret = ocfs2_write_super_or_backup(osb, backup);
211 brelse(backup);
212 backup = NULL;
213 if (ret < 0) {
214 mlog_errno(ret);
215 break;
216 }
217 }
218
219 return ret;
220}
221
222static void ocfs2_update_super_and_backups(struct inode *inode,
223 int new_clusters)
224{
225 int ret;
226 u32 clusters = 0;
227 struct buffer_head *super_bh = NULL;
228 struct ocfs2_dinode *super_di = NULL;
229 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
230
231 /*
232 * update the superblock last.
233 * It doesn't matter if the write failed.
234 */
235 ret = ocfs2_read_blocks_sync(osb, OCFS2_SUPER_BLOCK_BLKNO, 1,
236 &super_bh);
237 if (ret < 0) {
238 mlog_errno(ret);
239 goto out;
240 }
241
242 super_di = (struct ocfs2_dinode *)super_bh->b_data;
243 le32_add_cpu(&super_di->i_clusters, new_clusters);
244 clusters = le32_to_cpu(super_di->i_clusters);
245
246 ret = ocfs2_write_super_or_backup(osb, super_bh);
247 if (ret < 0) {
248 mlog_errno(ret);
249 goto out;
250 }
251
252 if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_COMPAT_BACKUP_SB))
253 ret = update_backups(inode, clusters, super_bh->b_data);
254
255out:
256 brelse(super_bh);
257 if (ret)
258 printk(KERN_WARNING "ocfs2: Failed to update super blocks on %s"
259 " during fs resize. This condition is not fatal,"
260 " but fsck.ocfs2 should be run to fix it\n",
261 osb->dev_str);
262 return;
263}
264
265/*
266 * Extend the filesystem to the new number of clusters specified. This entry
267 * point is only used to extend the current filesystem to the end of the last
268 * existing group.
269 */
270int ocfs2_group_extend(struct inode * inode, int new_clusters)
271{
272 int ret;
273 handle_t *handle;
274 struct buffer_head *main_bm_bh = NULL;
275 struct buffer_head *group_bh = NULL;
276 struct inode *main_bm_inode = NULL;
277 struct ocfs2_dinode *fe = NULL;
278 struct ocfs2_group_desc *group = NULL;
279 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
280 u16 cl_bpc;
281 u32 first_new_cluster;
282 u64 lgd_blkno;
283
284 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
285 return -EROFS;
286
287 if (new_clusters < 0)
288 return -EINVAL;
289 else if (new_clusters == 0)
290 return 0;
291
292 main_bm_inode = ocfs2_get_system_file_inode(osb,
293 GLOBAL_BITMAP_SYSTEM_INODE,
294 OCFS2_INVALID_SLOT);
295 if (!main_bm_inode) {
296 ret = -EINVAL;
297 mlog_errno(ret);
298 goto out;
299 }
300
301 mutex_lock(&main_bm_inode->i_mutex);
302
303 ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
304 if (ret < 0) {
305 mlog_errno(ret);
306 goto out_mutex;
307 }
308
309 fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
310
311 /* main_bm_bh is validated by inode read inside ocfs2_inode_lock(),
312 * so any corruption is a code bug. */
313 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
314
315 if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
316 ocfs2_group_bitmap_size(osb->sb, 0,
317 osb->s_feature_incompat) * 8) {
318 mlog(ML_ERROR, "The disk is too old and small. "
319 "Force to do offline resize.");
320 ret = -EINVAL;
321 goto out_unlock;
322 }
323
324 first_new_cluster = le32_to_cpu(fe->i_clusters);
325 lgd_blkno = ocfs2_which_cluster_group(main_bm_inode,
326 first_new_cluster - 1);
327
328 ret = ocfs2_read_group_descriptor(main_bm_inode, fe, lgd_blkno,
329 &group_bh);
330 if (ret < 0) {
331 mlog_errno(ret);
332 goto out_unlock;
333 }
334 group = (struct ocfs2_group_desc *)group_bh->b_data;
335
336 cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
337 if (le16_to_cpu(group->bg_bits) / cl_bpc + new_clusters >
338 le16_to_cpu(fe->id2.i_chain.cl_cpg)) {
339 ret = -EINVAL;
340 goto out_unlock;
341 }
342
343
344 trace_ocfs2_group_extend(
345 (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters);
346
347 handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS);
348 if (IS_ERR(handle)) {
349 mlog_errno(PTR_ERR(handle));
350 ret = -EINVAL;
351 goto out_unlock;
352 }
353
354 /* update the last group descriptor and inode. */
355 ret = ocfs2_update_last_group_and_inode(handle, main_bm_inode,
356 main_bm_bh, group_bh,
357 first_new_cluster,
358 new_clusters);
359 if (ret) {
360 mlog_errno(ret);
361 goto out_commit;
362 }
363
364 ocfs2_update_super_and_backups(main_bm_inode, new_clusters);
365
366out_commit:
367 ocfs2_commit_trans(osb, handle);
368out_unlock:
369 brelse(group_bh);
370 brelse(main_bm_bh);
371
372 ocfs2_inode_unlock(main_bm_inode, 1);
373
374out_mutex:
375 mutex_unlock(&main_bm_inode->i_mutex);
376 iput(main_bm_inode);
377
378out:
379 return ret;
380}
381
382static int ocfs2_check_new_group(struct inode *inode,
383 struct ocfs2_dinode *di,
384 struct ocfs2_new_group_input *input,
385 struct buffer_head *group_bh)
386{
387 int ret;
388 struct ocfs2_group_desc *gd =
389 (struct ocfs2_group_desc *)group_bh->b_data;
390 u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc);
391
392 ret = ocfs2_check_group_descriptor(inode->i_sb, di, group_bh);
393 if (ret)
394 goto out;
395
396 ret = -EINVAL;
397 if (le16_to_cpu(gd->bg_chain) != input->chain)
398 mlog(ML_ERROR, "Group descriptor # %llu has bad chain %u "
399 "while input has %u set.\n",
400 (unsigned long long)le64_to_cpu(gd->bg_blkno),
401 le16_to_cpu(gd->bg_chain), input->chain);
402 else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc)
403 mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but "
404 "input has %u clusters set\n",
405 (unsigned long long)le64_to_cpu(gd->bg_blkno),
406 le16_to_cpu(gd->bg_bits), input->clusters);
407 else if (le16_to_cpu(gd->bg_free_bits_count) != input->frees * cl_bpc)
408 mlog(ML_ERROR, "Group descriptor # %llu has free bit count %u "
409 "but it should have %u set\n",
410 (unsigned long long)le64_to_cpu(gd->bg_blkno),
411 le16_to_cpu(gd->bg_bits),
412 input->frees * cl_bpc);
413 else
414 ret = 0;
415
416out:
417 return ret;
418}
419
420static int ocfs2_verify_group_and_input(struct inode *inode,
421 struct ocfs2_dinode *di,
422 struct ocfs2_new_group_input *input,
423 struct buffer_head *group_bh)
424{
425 u16 cl_count = le16_to_cpu(di->id2.i_chain.cl_count);
426 u16 cl_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg);
427 u16 next_free = le16_to_cpu(di->id2.i_chain.cl_next_free_rec);
428 u32 cluster = ocfs2_blocks_to_clusters(inode->i_sb, input->group);
429 u32 total_clusters = le32_to_cpu(di->i_clusters);
430 int ret = -EINVAL;
431
432 if (cluster < total_clusters)
433 mlog(ML_ERROR, "add a group which is in the current volume.\n");
434 else if (input->chain >= cl_count)
435 mlog(ML_ERROR, "input chain exceeds the limit.\n");
436 else if (next_free != cl_count && next_free != input->chain)
437 mlog(ML_ERROR,
438 "the add group should be in chain %u\n", next_free);
439 else if (total_clusters + input->clusters < total_clusters)
440 mlog(ML_ERROR, "add group's clusters overflow.\n");
441 else if (input->clusters > cl_cpg)
442 mlog(ML_ERROR, "the cluster exceeds the maximum of a group\n");
443 else if (input->frees > input->clusters)
444 mlog(ML_ERROR, "the free cluster exceeds the total clusters\n");
445 else if (total_clusters % cl_cpg != 0)
446 mlog(ML_ERROR,
447 "the last group isn't full. Use group extend first.\n");
448 else if (input->group != ocfs2_which_cluster_group(inode, cluster))
449 mlog(ML_ERROR, "group blkno is invalid\n");
450 else if ((ret = ocfs2_check_new_group(inode, di, input, group_bh)))
451 mlog(ML_ERROR, "group descriptor check failed.\n");
452 else
453 ret = 0;
454
455 return ret;
456}
457
458/* Add a new group descriptor to global_bitmap. */
459int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
460{
461 int ret;
462 handle_t *handle;
463 struct buffer_head *main_bm_bh = NULL;
464 struct inode *main_bm_inode = NULL;
465 struct ocfs2_dinode *fe = NULL;
466 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
467 struct buffer_head *group_bh = NULL;
468 struct ocfs2_group_desc *group = NULL;
469 struct ocfs2_chain_list *cl;
470 struct ocfs2_chain_rec *cr;
471 u16 cl_bpc;
472 u64 bg_ptr;
473
474 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
475 return -EROFS;
476
477 main_bm_inode = ocfs2_get_system_file_inode(osb,
478 GLOBAL_BITMAP_SYSTEM_INODE,
479 OCFS2_INVALID_SLOT);
480 if (!main_bm_inode) {
481 ret = -EINVAL;
482 mlog_errno(ret);
483 goto out;
484 }
485
486 mutex_lock(&main_bm_inode->i_mutex);
487
488 ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
489 if (ret < 0) {
490 mlog_errno(ret);
491 goto out_mutex;
492 }
493
494 fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
495
496 if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
497 ocfs2_group_bitmap_size(osb->sb, 0,
498 osb->s_feature_incompat) * 8) {
499 mlog(ML_ERROR, "The disk is too old and small."
500 " Force to do offline resize.");
501 ret = -EINVAL;
502 goto out_unlock;
503 }
504
505 ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh);
506 if (ret < 0) {
507 mlog(ML_ERROR, "Can't read the group descriptor # %llu "
508 "from the device.", (unsigned long long)input->group);
509 goto out_unlock;
510 }
511
512 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), group_bh);
513
514 ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
515 if (ret) {
516 mlog_errno(ret);
517 goto out_free_group_bh;
518 }
519
520 trace_ocfs2_group_add((unsigned long long)input->group,
521 input->chain, input->clusters, input->frees);
522
523 handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
524 if (IS_ERR(handle)) {
525 mlog_errno(PTR_ERR(handle));
526 ret = -EINVAL;
527 goto out_free_group_bh;
528 }
529
530 cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
531 cl = &fe->id2.i_chain;
532 cr = &cl->cl_recs[input->chain];
533
534 ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode),
535 group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
536 if (ret < 0) {
537 mlog_errno(ret);
538 goto out_commit;
539 }
540
541 group = (struct ocfs2_group_desc *)group_bh->b_data;
542 bg_ptr = le64_to_cpu(group->bg_next_group);
543 group->bg_next_group = cr->c_blkno;
544 ocfs2_journal_dirty(handle, group_bh);
545
546 ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
547 main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
548 if (ret < 0) {
549 group->bg_next_group = cpu_to_le64(bg_ptr);
550 mlog_errno(ret);
551 goto out_commit;
552 }
553
554 if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) {
555 le16_add_cpu(&cl->cl_next_free_rec, 1);
556 memset(cr, 0, sizeof(struct ocfs2_chain_rec));
557 }
558
559 cr->c_blkno = cpu_to_le64(input->group);
560 le32_add_cpu(&cr->c_total, input->clusters * cl_bpc);
561 le32_add_cpu(&cr->c_free, input->frees * cl_bpc);
562
563 le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc);
564 le32_add_cpu(&fe->id1.bitmap1.i_used,
565 (input->clusters - input->frees) * cl_bpc);
566 le32_add_cpu(&fe->i_clusters, input->clusters);
567
568 ocfs2_journal_dirty(handle, main_bm_bh);
569
570 spin_lock(&OCFS2_I(main_bm_inode)->ip_lock);
571 OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
572 le64_add_cpu(&fe->i_size, input->clusters << osb->s_clustersize_bits);
573 spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock);
574 i_size_write(main_bm_inode, le64_to_cpu(fe->i_size));
575
576 ocfs2_update_super_and_backups(main_bm_inode, input->clusters);
577
578out_commit:
579 ocfs2_commit_trans(osb, handle);
580
581out_free_group_bh:
582 brelse(group_bh);
583
584out_unlock:
585 brelse(main_bm_bh);
586
587 ocfs2_inode_unlock(main_bm_inode, 1);
588
589out_mutex:
590 mutex_unlock(&main_bm_inode->i_mutex);
591 iput(main_bm_inode);
592
593out:
594 return ret;
595}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * resize.c
4 *
5 * volume resize.
6 * Inspired by ext3/resize.c.
7 *
8 * Copyright (C) 2007 Oracle. All rights reserved.
9 */
10
11#include <linux/fs.h>
12#include <linux/types.h>
13
14#include <cluster/masklog.h>
15
16#include "ocfs2.h"
17
18#include "alloc.h"
19#include "dlmglue.h"
20#include "inode.h"
21#include "journal.h"
22#include "super.h"
23#include "sysfile.h"
24#include "uptodate.h"
25#include "ocfs2_trace.h"
26
27#include "buffer_head_io.h"
28#include "suballoc.h"
29#include "resize.h"
30
31/*
32 * Check whether there are new backup superblocks exist
33 * in the last group. If there are some, mark them or clear
34 * them in the bitmap.
35 *
36 * Return how many backups we find in the last group.
37 */
38static u16 ocfs2_calc_new_backup_super(struct inode *inode,
39 struct ocfs2_group_desc *gd,
40 u16 cl_cpg,
41 u16 old_bg_clusters,
42 int set)
43{
44 int i;
45 u16 backups = 0;
46 u32 cluster, lgd_cluster;
47 u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno);
48
49 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
50 blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
51 cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
52
53 gd_blkno = ocfs2_which_cluster_group(inode, cluster);
54 if (gd_blkno < lgd_blkno)
55 continue;
56 else if (gd_blkno > lgd_blkno)
57 break;
58
59 /* check if already done backup super */
60 lgd_cluster = ocfs2_blocks_to_clusters(inode->i_sb, lgd_blkno);
61 lgd_cluster += old_bg_clusters;
62 if (lgd_cluster >= cluster)
63 continue;
64
65 if (set)
66 ocfs2_set_bit(cluster % cl_cpg,
67 (unsigned long *)gd->bg_bitmap);
68 else
69 ocfs2_clear_bit(cluster % cl_cpg,
70 (unsigned long *)gd->bg_bitmap);
71 backups++;
72 }
73
74 return backups;
75}
76
77static int ocfs2_update_last_group_and_inode(handle_t *handle,
78 struct inode *bm_inode,
79 struct buffer_head *bm_bh,
80 struct buffer_head *group_bh,
81 u32 first_new_cluster,
82 int new_clusters)
83{
84 int ret = 0;
85 struct ocfs2_super *osb = OCFS2_SB(bm_inode->i_sb);
86 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bm_bh->b_data;
87 struct ocfs2_chain_list *cl = &fe->id2.i_chain;
88 struct ocfs2_chain_rec *cr;
89 struct ocfs2_group_desc *group;
90 u16 chain, num_bits, backups = 0;
91 u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
92 u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
93 u16 old_bg_clusters;
94 u16 contig_bits;
95 __le16 old_bg_contig_free_bits;
96
97 trace_ocfs2_update_last_group_and_inode(new_clusters,
98 first_new_cluster);
99
100 ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
101 group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
102 if (ret < 0) {
103 mlog_errno(ret);
104 goto out;
105 }
106
107 group = (struct ocfs2_group_desc *)group_bh->b_data;
108
109 old_bg_clusters = le16_to_cpu(group->bg_bits) / cl_bpc;
110 /* update the group first. */
111 num_bits = new_clusters * cl_bpc;
112 le16_add_cpu(&group->bg_bits, num_bits);
113 le16_add_cpu(&group->bg_free_bits_count, num_bits);
114
115 /*
116 * check whether there are some new backup superblocks exist in
117 * this group and update the group bitmap accordingly.
118 */
119 if (OCFS2_HAS_COMPAT_FEATURE(osb->sb,
120 OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
121 backups = ocfs2_calc_new_backup_super(bm_inode,
122 group,
123 cl_cpg, old_bg_clusters, 1);
124 le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
125 }
126
127 contig_bits = ocfs2_find_max_contig_free_bits(group->bg_bitmap,
128 le16_to_cpu(group->bg_bits), 0);
129 old_bg_contig_free_bits = group->bg_contig_free_bits;
130 group->bg_contig_free_bits = cpu_to_le16(contig_bits);
131
132 ocfs2_journal_dirty(handle, group_bh);
133
134 /* update the inode accordingly. */
135 ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh,
136 OCFS2_JOURNAL_ACCESS_WRITE);
137 if (ret < 0) {
138 mlog_errno(ret);
139 goto out_rollback;
140 }
141
142 chain = le16_to_cpu(group->bg_chain);
143 cr = (&cl->cl_recs[chain]);
144 le32_add_cpu(&cr->c_total, num_bits);
145 le32_add_cpu(&cr->c_free, num_bits);
146 le32_add_cpu(&fe->id1.bitmap1.i_total, num_bits);
147 le32_add_cpu(&fe->i_clusters, new_clusters);
148
149 if (backups) {
150 le32_add_cpu(&cr->c_free, -1 * backups);
151 le32_add_cpu(&fe->id1.bitmap1.i_used, backups);
152 }
153
154 spin_lock(&OCFS2_I(bm_inode)->ip_lock);
155 OCFS2_I(bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
156 le64_add_cpu(&fe->i_size, (u64)new_clusters << osb->s_clustersize_bits);
157 spin_unlock(&OCFS2_I(bm_inode)->ip_lock);
158 i_size_write(bm_inode, le64_to_cpu(fe->i_size));
159
160 ocfs2_journal_dirty(handle, bm_bh);
161
162out_rollback:
163 if (ret < 0) {
164 ocfs2_calc_new_backup_super(bm_inode,
165 group,
166 cl_cpg, old_bg_clusters, 0);
167 le16_add_cpu(&group->bg_free_bits_count, backups);
168 le16_add_cpu(&group->bg_bits, -1 * num_bits);
169 le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
170 group->bg_contig_free_bits = old_bg_contig_free_bits;
171 }
172out:
173 if (ret)
174 mlog_errno(ret);
175 return ret;
176}
177
178static int update_backups(struct inode * inode, u32 clusters, char *data)
179{
180 int i, ret = 0;
181 u32 cluster;
182 u64 blkno;
183 struct buffer_head *backup = NULL;
184 struct ocfs2_dinode *backup_di = NULL;
185 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
186
187 /* calculate the real backups we need to update. */
188 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
189 blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
190 cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
191 if (cluster >= clusters)
192 break;
193
194 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &backup);
195 if (ret < 0) {
196 mlog_errno(ret);
197 break;
198 }
199
200 memcpy(backup->b_data, data, inode->i_sb->s_blocksize);
201
202 backup_di = (struct ocfs2_dinode *)backup->b_data;
203 backup_di->i_blkno = cpu_to_le64(blkno);
204
205 ret = ocfs2_write_super_or_backup(osb, backup);
206 brelse(backup);
207 backup = NULL;
208 if (ret < 0) {
209 mlog_errno(ret);
210 break;
211 }
212 }
213
214 return ret;
215}
216
217static void ocfs2_update_super_and_backups(struct inode *inode,
218 int new_clusters)
219{
220 int ret;
221 u32 clusters = 0;
222 struct buffer_head *super_bh = NULL;
223 struct ocfs2_dinode *super_di = NULL;
224 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
225
226 /*
227 * update the superblock last.
228 * It doesn't matter if the write failed.
229 */
230 ret = ocfs2_read_blocks_sync(osb, OCFS2_SUPER_BLOCK_BLKNO, 1,
231 &super_bh);
232 if (ret < 0) {
233 mlog_errno(ret);
234 goto out;
235 }
236
237 super_di = (struct ocfs2_dinode *)super_bh->b_data;
238 le32_add_cpu(&super_di->i_clusters, new_clusters);
239 clusters = le32_to_cpu(super_di->i_clusters);
240
241 ret = ocfs2_write_super_or_backup(osb, super_bh);
242 if (ret < 0) {
243 mlog_errno(ret);
244 goto out;
245 }
246
247 if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_COMPAT_BACKUP_SB))
248 ret = update_backups(inode, clusters, super_bh->b_data);
249
250out:
251 brelse(super_bh);
252 if (ret)
253 printk(KERN_WARNING "ocfs2: Failed to update super blocks on %s"
254 " during fs resize. This condition is not fatal,"
255 " but fsck.ocfs2 should be run to fix it\n",
256 osb->dev_str);
257 return;
258}
259
260/*
261 * Extend the filesystem to the new number of clusters specified. This entry
262 * point is only used to extend the current filesystem to the end of the last
263 * existing group.
264 */
265int ocfs2_group_extend(struct inode * inode, int new_clusters)
266{
267 int ret;
268 handle_t *handle;
269 struct buffer_head *main_bm_bh = NULL;
270 struct buffer_head *group_bh = NULL;
271 struct inode *main_bm_inode = NULL;
272 struct ocfs2_dinode *fe = NULL;
273 struct ocfs2_group_desc *group = NULL;
274 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
275 u16 cl_bpc;
276 u32 first_new_cluster;
277 u64 lgd_blkno;
278
279 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
280 return -EROFS;
281
282 if (new_clusters < 0)
283 return -EINVAL;
284 else if (new_clusters == 0)
285 return 0;
286
287 main_bm_inode = ocfs2_get_system_file_inode(osb,
288 GLOBAL_BITMAP_SYSTEM_INODE,
289 OCFS2_INVALID_SLOT);
290 if (!main_bm_inode) {
291 ret = -EINVAL;
292 mlog_errno(ret);
293 goto out;
294 }
295
296 inode_lock(main_bm_inode);
297
298 ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
299 if (ret < 0) {
300 mlog_errno(ret);
301 goto out_mutex;
302 }
303
304 fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
305
306 /* main_bm_bh is validated by inode read inside ocfs2_inode_lock(),
307 * so any corruption is a code bug. */
308 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
309
310 if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
311 ocfs2_group_bitmap_size(osb->sb, 0,
312 osb->s_feature_incompat) * 8) {
313 mlog(ML_ERROR, "The disk is too old and small. "
314 "Force to do offline resize.");
315 ret = -EINVAL;
316 goto out_unlock;
317 }
318
319 first_new_cluster = le32_to_cpu(fe->i_clusters);
320 lgd_blkno = ocfs2_which_cluster_group(main_bm_inode,
321 first_new_cluster - 1);
322
323 ret = ocfs2_read_group_descriptor(main_bm_inode, fe, lgd_blkno,
324 &group_bh);
325 if (ret < 0) {
326 mlog_errno(ret);
327 goto out_unlock;
328 }
329 group = (struct ocfs2_group_desc *)group_bh->b_data;
330
331 cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
332 if (le16_to_cpu(group->bg_bits) / cl_bpc + new_clusters >
333 le16_to_cpu(fe->id2.i_chain.cl_cpg)) {
334 ret = -EINVAL;
335 goto out_unlock;
336 }
337
338
339 trace_ocfs2_group_extend(
340 (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters);
341
342 handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS);
343 if (IS_ERR(handle)) {
344 mlog_errno(PTR_ERR(handle));
345 ret = -EINVAL;
346 goto out_unlock;
347 }
348
349 /* update the last group descriptor and inode. */
350 ret = ocfs2_update_last_group_and_inode(handle, main_bm_inode,
351 main_bm_bh, group_bh,
352 first_new_cluster,
353 new_clusters);
354 if (ret) {
355 mlog_errno(ret);
356 goto out_commit;
357 }
358
359 ocfs2_update_super_and_backups(main_bm_inode, new_clusters);
360
361out_commit:
362 ocfs2_commit_trans(osb, handle);
363out_unlock:
364 brelse(group_bh);
365 brelse(main_bm_bh);
366
367 ocfs2_inode_unlock(main_bm_inode, 1);
368
369out_mutex:
370 inode_unlock(main_bm_inode);
371 iput(main_bm_inode);
372
373out:
374 return ret;
375}
376
377static int ocfs2_check_new_group(struct inode *inode,
378 struct ocfs2_dinode *di,
379 struct ocfs2_new_group_input *input,
380 struct buffer_head *group_bh)
381{
382 int ret;
383 struct ocfs2_group_desc *gd =
384 (struct ocfs2_group_desc *)group_bh->b_data;
385 u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc);
386
387 ret = ocfs2_check_group_descriptor(inode->i_sb, di, group_bh);
388 if (ret)
389 goto out;
390
391 ret = -EINVAL;
392 if (le16_to_cpu(gd->bg_chain) != input->chain)
393 mlog(ML_ERROR, "Group descriptor # %llu has bad chain %u "
394 "while input has %u set.\n",
395 (unsigned long long)le64_to_cpu(gd->bg_blkno),
396 le16_to_cpu(gd->bg_chain), input->chain);
397 else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc)
398 mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but "
399 "input has %u clusters set\n",
400 (unsigned long long)le64_to_cpu(gd->bg_blkno),
401 le16_to_cpu(gd->bg_bits), input->clusters);
402 else if (le16_to_cpu(gd->bg_free_bits_count) != input->frees * cl_bpc)
403 mlog(ML_ERROR, "Group descriptor # %llu has free bit count %u "
404 "but it should have %u set\n",
405 (unsigned long long)le64_to_cpu(gd->bg_blkno),
406 le16_to_cpu(gd->bg_bits),
407 input->frees * cl_bpc);
408 else
409 ret = 0;
410
411out:
412 return ret;
413}
414
415static int ocfs2_verify_group_and_input(struct inode *inode,
416 struct ocfs2_dinode *di,
417 struct ocfs2_new_group_input *input,
418 struct buffer_head *group_bh)
419{
420 u16 cl_count = le16_to_cpu(di->id2.i_chain.cl_count);
421 u16 cl_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg);
422 u16 next_free = le16_to_cpu(di->id2.i_chain.cl_next_free_rec);
423 u32 cluster = ocfs2_blocks_to_clusters(inode->i_sb, input->group);
424 u32 total_clusters = le32_to_cpu(di->i_clusters);
425 int ret = -EINVAL;
426
427 if (cluster < total_clusters)
428 mlog(ML_ERROR, "add a group which is in the current volume.\n");
429 else if (input->chain >= cl_count)
430 mlog(ML_ERROR, "input chain exceeds the limit.\n");
431 else if (next_free != cl_count && next_free != input->chain)
432 mlog(ML_ERROR,
433 "the add group should be in chain %u\n", next_free);
434 else if (total_clusters + input->clusters < total_clusters)
435 mlog(ML_ERROR, "add group's clusters overflow.\n");
436 else if (input->clusters > cl_cpg)
437 mlog(ML_ERROR, "the cluster exceeds the maximum of a group\n");
438 else if (input->frees > input->clusters)
439 mlog(ML_ERROR, "the free cluster exceeds the total clusters\n");
440 else if (total_clusters % cl_cpg != 0)
441 mlog(ML_ERROR,
442 "the last group isn't full. Use group extend first.\n");
443 else if (input->group != ocfs2_which_cluster_group(inode, cluster))
444 mlog(ML_ERROR, "group blkno is invalid\n");
445 else if ((ret = ocfs2_check_new_group(inode, di, input, group_bh)))
446 mlog(ML_ERROR, "group descriptor check failed.\n");
447 else
448 ret = 0;
449
450 return ret;
451}
452
453/* Add a new group descriptor to global_bitmap. */
454int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
455{
456 int ret;
457 handle_t *handle;
458 struct buffer_head *main_bm_bh = NULL;
459 struct inode *main_bm_inode = NULL;
460 struct ocfs2_dinode *fe = NULL;
461 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
462 struct buffer_head *group_bh = NULL;
463 struct ocfs2_group_desc *group = NULL;
464 struct ocfs2_chain_list *cl;
465 struct ocfs2_chain_rec *cr;
466 u16 cl_bpc;
467 u64 bg_ptr;
468
469 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
470 return -EROFS;
471
472 main_bm_inode = ocfs2_get_system_file_inode(osb,
473 GLOBAL_BITMAP_SYSTEM_INODE,
474 OCFS2_INVALID_SLOT);
475 if (!main_bm_inode) {
476 ret = -EINVAL;
477 mlog_errno(ret);
478 goto out;
479 }
480
481 inode_lock(main_bm_inode);
482
483 ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
484 if (ret < 0) {
485 mlog_errno(ret);
486 goto out_mutex;
487 }
488
489 fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
490
491 if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
492 ocfs2_group_bitmap_size(osb->sb, 0,
493 osb->s_feature_incompat) * 8) {
494 mlog(ML_ERROR, "The disk is too old and small."
495 " Force to do offline resize.");
496 ret = -EINVAL;
497 goto out_unlock;
498 }
499
500 ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh);
501 if (ret < 0) {
502 mlog(ML_ERROR, "Can't read the group descriptor # %llu "
503 "from the device.", (unsigned long long)input->group);
504 goto out_unlock;
505 }
506
507 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), group_bh);
508
509 ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
510 if (ret) {
511 mlog_errno(ret);
512 goto out_free_group_bh;
513 }
514
515 trace_ocfs2_group_add((unsigned long long)input->group,
516 input->chain, input->clusters, input->frees);
517
518 handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
519 if (IS_ERR(handle)) {
520 mlog_errno(PTR_ERR(handle));
521 ret = -EINVAL;
522 goto out_free_group_bh;
523 }
524
525 cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
526 cl = &fe->id2.i_chain;
527 cr = &cl->cl_recs[input->chain];
528
529 ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode),
530 group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
531 if (ret < 0) {
532 mlog_errno(ret);
533 goto out_commit;
534 }
535
536 group = (struct ocfs2_group_desc *)group_bh->b_data;
537 bg_ptr = le64_to_cpu(group->bg_next_group);
538 group->bg_next_group = cr->c_blkno;
539 ocfs2_journal_dirty(handle, group_bh);
540
541 ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
542 main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
543 if (ret < 0) {
544 group->bg_next_group = cpu_to_le64(bg_ptr);
545 mlog_errno(ret);
546 goto out_commit;
547 }
548
549 if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) {
550 le16_add_cpu(&cl->cl_next_free_rec, 1);
551 memset(cr, 0, sizeof(struct ocfs2_chain_rec));
552 }
553
554 cr->c_blkno = cpu_to_le64(input->group);
555 le32_add_cpu(&cr->c_total, input->clusters * cl_bpc);
556 le32_add_cpu(&cr->c_free, input->frees * cl_bpc);
557
558 le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc);
559 le32_add_cpu(&fe->id1.bitmap1.i_used,
560 (input->clusters - input->frees) * cl_bpc);
561 le32_add_cpu(&fe->i_clusters, input->clusters);
562
563 ocfs2_journal_dirty(handle, main_bm_bh);
564
565 spin_lock(&OCFS2_I(main_bm_inode)->ip_lock);
566 OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
567 le64_add_cpu(&fe->i_size, (u64)input->clusters << osb->s_clustersize_bits);
568 spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock);
569 i_size_write(main_bm_inode, le64_to_cpu(fe->i_size));
570
571 ocfs2_update_super_and_backups(main_bm_inode, input->clusters);
572
573out_commit:
574 ocfs2_commit_trans(osb, handle);
575
576out_free_group_bh:
577 if (ret < 0)
578 ocfs2_remove_from_cache(INODE_CACHE(inode), group_bh);
579 brelse(group_bh);
580
581out_unlock:
582 brelse(main_bm_bh);
583
584 ocfs2_inode_unlock(main_bm_inode, 1);
585
586out_mutex:
587 inode_unlock(main_bm_inode);
588 iput(main_bm_inode);
589
590out:
591 return ret;
592}