Loading...
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * move_extents.c
5 *
6 * Copyright (C) 2011 Oracle. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#include <linux/fs.h>
18#include <linux/types.h>
19#include <linux/mount.h>
20#include <linux/swap.h>
21
22#include <cluster/masklog.h>
23
24#include "ocfs2.h"
25#include "ocfs2_ioctl.h"
26
27#include "alloc.h"
28#include "aops.h"
29#include "dlmglue.h"
30#include "extent_map.h"
31#include "inode.h"
32#include "journal.h"
33#include "suballoc.h"
34#include "uptodate.h"
35#include "super.h"
36#include "dir.h"
37#include "buffer_head_io.h"
38#include "sysfile.h"
39#include "refcounttree.h"
40#include "move_extents.h"
41
42struct ocfs2_move_extents_context {
43 struct inode *inode;
44 struct file *file;
45 int auto_defrag;
46 int partial;
47 int credits;
48 u32 new_phys_cpos;
49 u32 clusters_moved;
50 u64 refcount_loc;
51 struct ocfs2_move_extents *range;
52 struct ocfs2_extent_tree et;
53 struct ocfs2_alloc_context *meta_ac;
54 struct ocfs2_alloc_context *data_ac;
55 struct ocfs2_cached_dealloc_ctxt dealloc;
56};
57
58static int __ocfs2_move_extent(handle_t *handle,
59 struct ocfs2_move_extents_context *context,
60 u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
61 int ext_flags)
62{
63 int ret = 0, index;
64 struct inode *inode = context->inode;
65 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
66 struct ocfs2_extent_rec *rec, replace_rec;
67 struct ocfs2_path *path = NULL;
68 struct ocfs2_extent_list *el;
69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
70 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
71
72 ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
73 p_cpos, new_p_cpos, len);
74 if (ret) {
75 mlog_errno(ret);
76 goto out;
77 }
78
79 memset(&replace_rec, 0, sizeof(replace_rec));
80 replace_rec.e_cpos = cpu_to_le32(cpos);
81 replace_rec.e_leaf_clusters = cpu_to_le16(len);
82 replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
83 new_p_cpos));
84
85 path = ocfs2_new_path_from_et(&context->et);
86 if (!path) {
87 ret = -ENOMEM;
88 mlog_errno(ret);
89 goto out;
90 }
91
92 ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
93 if (ret) {
94 mlog_errno(ret);
95 goto out;
96 }
97
98 el = path_leaf_el(path);
99
100 index = ocfs2_search_extent_list(el, cpos);
101 if (index == -1) {
102 ret = ocfs2_error(inode->i_sb,
103 "Inode %llu has an extent at cpos %u which can no longer be found\n",
104 (unsigned long long)ino, cpos);
105 goto out;
106 }
107
108 rec = &el->l_recs[index];
109
110 BUG_ON(ext_flags != rec->e_flags);
111 /*
112 * after moving/defraging to new location, the extent is not going
113 * to be refcounted anymore.
114 */
115 replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
116
117 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
118 context->et.et_root_bh,
119 OCFS2_JOURNAL_ACCESS_WRITE);
120 if (ret) {
121 mlog_errno(ret);
122 goto out;
123 }
124
125 ret = ocfs2_split_extent(handle, &context->et, path, index,
126 &replace_rec, context->meta_ac,
127 &context->dealloc);
128 if (ret) {
129 mlog_errno(ret);
130 goto out;
131 }
132
133 ocfs2_journal_dirty(handle, context->et.et_root_bh);
134
135 context->new_phys_cpos = new_p_cpos;
136
137 /*
138 * need I to append truncate log for old clusters?
139 */
140 if (old_blkno) {
141 if (ext_flags & OCFS2_EXT_REFCOUNTED)
142 ret = ocfs2_decrease_refcount(inode, handle,
143 ocfs2_blocks_to_clusters(osb->sb,
144 old_blkno),
145 len, context->meta_ac,
146 &context->dealloc, 1);
147 else
148 ret = ocfs2_truncate_log_append(osb, handle,
149 old_blkno, len);
150 }
151
152 ocfs2_update_inode_fsync_trans(handle, inode, 0);
153out:
154 ocfs2_free_path(path);
155 return ret;
156}
157
158/*
159 * lock allocators, and reserving appropriate number of bits for
160 * meta blocks and data clusters.
161 *
162 * in some cases, we don't need to reserve clusters, just let data_ac
163 * be NULL.
164 */
165static int ocfs2_lock_allocators_move_extents(struct inode *inode,
166 struct ocfs2_extent_tree *et,
167 u32 clusters_to_move,
168 u32 extents_to_split,
169 struct ocfs2_alloc_context **meta_ac,
170 struct ocfs2_alloc_context **data_ac,
171 int extra_blocks,
172 int *credits)
173{
174 int ret, num_free_extents;
175 unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
176 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
177
178 num_free_extents = ocfs2_num_free_extents(et);
179 if (num_free_extents < 0) {
180 ret = num_free_extents;
181 mlog_errno(ret);
182 goto out;
183 }
184
185 if (!num_free_extents ||
186 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
187 extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
188
189 ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
190 if (ret) {
191 mlog_errno(ret);
192 goto out;
193 }
194
195 if (data_ac) {
196 ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
197 if (ret) {
198 mlog_errno(ret);
199 goto out;
200 }
201 }
202
203 *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
204
205 mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
206 extra_blocks, clusters_to_move, *credits);
207out:
208 if (ret) {
209 if (*meta_ac) {
210 ocfs2_free_alloc_context(*meta_ac);
211 *meta_ac = NULL;
212 }
213 }
214
215 return ret;
216}
217
218/*
219 * Using one journal handle to guarantee the data consistency in case
220 * crash happens anywhere.
221 *
222 * XXX: defrag can end up with finishing partial extent as requested,
223 * due to not enough contiguous clusters can be found in allocator.
224 */
225static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
226 u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
227{
228 int ret, credits = 0, extra_blocks = 0, partial = context->partial;
229 handle_t *handle;
230 struct inode *inode = context->inode;
231 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
232 struct inode *tl_inode = osb->osb_tl_inode;
233 struct ocfs2_refcount_tree *ref_tree = NULL;
234 u32 new_phys_cpos, new_len;
235 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
236
237 if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
238 BUG_ON(!ocfs2_is_refcount_inode(inode));
239 BUG_ON(!context->refcount_loc);
240
241 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
242 &ref_tree, NULL);
243 if (ret) {
244 mlog_errno(ret);
245 return ret;
246 }
247
248 ret = ocfs2_prepare_refcount_change_for_del(inode,
249 context->refcount_loc,
250 phys_blkno,
251 *len,
252 &credits,
253 &extra_blocks);
254 if (ret) {
255 mlog_errno(ret);
256 goto out;
257 }
258 }
259
260 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
261 &context->meta_ac,
262 &context->data_ac,
263 extra_blocks, &credits);
264 if (ret) {
265 mlog_errno(ret);
266 goto out;
267 }
268
269 /*
270 * should be using allocation reservation strategy there?
271 *
272 * if (context->data_ac)
273 * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
274 */
275
276 inode_lock(tl_inode);
277
278 if (ocfs2_truncate_log_needs_flush(osb)) {
279 ret = __ocfs2_flush_truncate_log(osb);
280 if (ret < 0) {
281 mlog_errno(ret);
282 goto out_unlock_mutex;
283 }
284 }
285
286 handle = ocfs2_start_trans(osb, credits);
287 if (IS_ERR(handle)) {
288 ret = PTR_ERR(handle);
289 mlog_errno(ret);
290 goto out_unlock_mutex;
291 }
292
293 ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
294 &new_phys_cpos, &new_len);
295 if (ret) {
296 mlog_errno(ret);
297 goto out_commit;
298 }
299
300 /*
301 * allowing partial extent moving is kind of 'pros and cons', it makes
302 * whole defragmentation less likely to fail, on the contrary, the bad
303 * thing is it may make the fs even more fragmented after moving, let
304 * userspace make a good decision here.
305 */
306 if (new_len != *len) {
307 mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
308 if (!partial) {
309 context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
310 ret = -ENOSPC;
311 goto out_commit;
312 }
313 }
314
315 mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
316 phys_cpos, new_phys_cpos);
317
318 ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
319 new_phys_cpos, ext_flags);
320 if (ret)
321 mlog_errno(ret);
322
323 if (partial && (new_len != *len))
324 *len = new_len;
325
326 /*
327 * Here we should write the new page out first if we are
328 * in write-back mode.
329 */
330 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
331 if (ret)
332 mlog_errno(ret);
333
334out_commit:
335 ocfs2_commit_trans(osb, handle);
336
337out_unlock_mutex:
338 inode_unlock(tl_inode);
339
340 if (context->data_ac) {
341 ocfs2_free_alloc_context(context->data_ac);
342 context->data_ac = NULL;
343 }
344
345 if (context->meta_ac) {
346 ocfs2_free_alloc_context(context->meta_ac);
347 context->meta_ac = NULL;
348 }
349
350out:
351 if (ref_tree)
352 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
353
354 return ret;
355}
356
357/*
358 * find the victim alloc group, where #blkno fits.
359 */
360static int ocfs2_find_victim_alloc_group(struct inode *inode,
361 u64 vict_blkno,
362 int type, int slot,
363 int *vict_bit,
364 struct buffer_head **ret_bh)
365{
366 int ret, i, bits_per_unit = 0;
367 u64 blkno;
368 char namebuf[40];
369
370 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
371 struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
372 struct ocfs2_chain_list *cl;
373 struct ocfs2_chain_rec *rec;
374 struct ocfs2_dinode *ac_dinode;
375 struct ocfs2_group_desc *bg;
376
377 ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
378 ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
379 strlen(namebuf), &blkno);
380 if (ret) {
381 ret = -ENOENT;
382 goto out;
383 }
384
385 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
386 if (ret) {
387 mlog_errno(ret);
388 goto out;
389 }
390
391 ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
392 cl = &(ac_dinode->id2.i_chain);
393 rec = &(cl->cl_recs[0]);
394
395 if (type == GLOBAL_BITMAP_SYSTEM_INODE)
396 bits_per_unit = osb->s_clustersize_bits -
397 inode->i_sb->s_blocksize_bits;
398 /*
399 * 'vict_blkno' was out of the valid range.
400 */
401 if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
402 (vict_blkno >= ((u64)le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
403 bits_per_unit))) {
404 ret = -EINVAL;
405 goto out;
406 }
407
408 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
409
410 rec = &(cl->cl_recs[i]);
411 if (!rec)
412 continue;
413
414 bg = NULL;
415
416 do {
417 if (!bg)
418 blkno = le64_to_cpu(rec->c_blkno);
419 else
420 blkno = le64_to_cpu(bg->bg_next_group);
421
422 if (gd_bh) {
423 brelse(gd_bh);
424 gd_bh = NULL;
425 }
426
427 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
428 if (ret) {
429 mlog_errno(ret);
430 goto out;
431 }
432
433 bg = (struct ocfs2_group_desc *)gd_bh->b_data;
434
435 if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
436 le16_to_cpu(bg->bg_bits))) {
437
438 *ret_bh = gd_bh;
439 *vict_bit = (vict_blkno - blkno) >>
440 bits_per_unit;
441 mlog(0, "find the victim group: #%llu, "
442 "total_bits: %u, vict_bit: %u\n",
443 blkno, le16_to_cpu(bg->bg_bits),
444 *vict_bit);
445 goto out;
446 }
447
448 } while (le64_to_cpu(bg->bg_next_group));
449 }
450
451 ret = -EINVAL;
452out:
453 brelse(ac_bh);
454
455 /*
456 * caller has to release the gd_bh properly.
457 */
458 return ret;
459}
460
461/*
462 * XXX: helper to validate and adjust moving goal.
463 */
464static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
465 struct ocfs2_move_extents *range)
466{
467 int ret, goal_bit = 0;
468
469 struct buffer_head *gd_bh = NULL;
470 struct ocfs2_group_desc *bg;
471 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
472 int c_to_b = 1 << (osb->s_clustersize_bits -
473 inode->i_sb->s_blocksize_bits);
474
475 /*
476 * make goal become cluster aligned.
477 */
478 range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
479 range->me_goal);
480 /*
481 * validate goal sits within global_bitmap, and return the victim
482 * group desc
483 */
484 ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
485 GLOBAL_BITMAP_SYSTEM_INODE,
486 OCFS2_INVALID_SLOT,
487 &goal_bit, &gd_bh);
488 if (ret)
489 goto out;
490
491 bg = (struct ocfs2_group_desc *)gd_bh->b_data;
492
493 /*
494 * moving goal is not allowd to start with a group desc blok(#0 blk)
495 * let's compromise to the latter cluster.
496 */
497 if (range->me_goal == le64_to_cpu(bg->bg_blkno))
498 range->me_goal += c_to_b;
499
500 /*
501 * movement is not gonna cross two groups.
502 */
503 if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
504 range->me_len) {
505 ret = -EINVAL;
506 goto out;
507 }
508 /*
509 * more exact validations/adjustments will be performed later during
510 * moving operation for each extent range.
511 */
512 mlog(0, "extents get ready to be moved to #%llu block\n",
513 range->me_goal);
514
515out:
516 brelse(gd_bh);
517
518 return ret;
519}
520
521static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
522 int *goal_bit, u32 move_len, u32 max_hop,
523 u32 *phys_cpos)
524{
525 int i, used, last_free_bits = 0, base_bit = *goal_bit;
526 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
527 u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
528 le64_to_cpu(gd->bg_blkno));
529
530 for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
531
532 used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
533 if (used) {
534 /*
535 * we even tried searching the free chunk by jumping
536 * a 'max_hop' distance, but still failed.
537 */
538 if ((i - base_bit) > max_hop) {
539 *phys_cpos = 0;
540 break;
541 }
542
543 if (last_free_bits)
544 last_free_bits = 0;
545
546 continue;
547 } else
548 last_free_bits++;
549
550 if (last_free_bits == move_len) {
551 *goal_bit = i;
552 *phys_cpos = base_cpos + i;
553 break;
554 }
555 }
556
557 mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
558}
559
560static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
561 u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
562 u32 len, int ext_flags)
563{
564 int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
565 handle_t *handle;
566 struct inode *inode = context->inode;
567 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
568 struct inode *tl_inode = osb->osb_tl_inode;
569 struct inode *gb_inode = NULL;
570 struct buffer_head *gb_bh = NULL;
571 struct buffer_head *gd_bh = NULL;
572 struct ocfs2_group_desc *gd;
573 struct ocfs2_refcount_tree *ref_tree = NULL;
574 u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
575 context->range->me_threshold);
576 u64 phys_blkno, new_phys_blkno;
577
578 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
579
580 if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
581 BUG_ON(!ocfs2_is_refcount_inode(inode));
582 BUG_ON(!context->refcount_loc);
583
584 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
585 &ref_tree, NULL);
586 if (ret) {
587 mlog_errno(ret);
588 return ret;
589 }
590
591 ret = ocfs2_prepare_refcount_change_for_del(inode,
592 context->refcount_loc,
593 phys_blkno,
594 len,
595 &credits,
596 &extra_blocks);
597 if (ret) {
598 mlog_errno(ret);
599 goto out;
600 }
601 }
602
603 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
604 &context->meta_ac,
605 NULL, extra_blocks, &credits);
606 if (ret) {
607 mlog_errno(ret);
608 goto out;
609 }
610
611 /*
612 * need to count 2 extra credits for global_bitmap inode and
613 * group descriptor.
614 */
615 credits += OCFS2_INODE_UPDATE_CREDITS + 1;
616
617 /*
618 * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
619 * logic, while we still need to lock the global_bitmap.
620 */
621 gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
622 OCFS2_INVALID_SLOT);
623 if (!gb_inode) {
624 mlog(ML_ERROR, "unable to get global_bitmap inode\n");
625 ret = -EIO;
626 goto out;
627 }
628
629 inode_lock(gb_inode);
630
631 ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
632 if (ret) {
633 mlog_errno(ret);
634 goto out_unlock_gb_mutex;
635 }
636
637 inode_lock(tl_inode);
638
639 handle = ocfs2_start_trans(osb, credits);
640 if (IS_ERR(handle)) {
641 ret = PTR_ERR(handle);
642 mlog_errno(ret);
643 goto out_unlock_tl_inode;
644 }
645
646 new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
647 ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
648 GLOBAL_BITMAP_SYSTEM_INODE,
649 OCFS2_INVALID_SLOT,
650 &goal_bit, &gd_bh);
651 if (ret) {
652 mlog_errno(ret);
653 goto out_commit;
654 }
655
656 /*
657 * probe the victim cluster group to find a proper
658 * region to fit wanted movement, it even will perfrom
659 * a best-effort attempt by compromising to a threshold
660 * around the goal.
661 */
662 ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
663 new_phys_cpos);
664 if (!*new_phys_cpos) {
665 ret = -ENOSPC;
666 goto out_commit;
667 }
668
669 ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
670 *new_phys_cpos, ext_flags);
671 if (ret) {
672 mlog_errno(ret);
673 goto out_commit;
674 }
675
676 gd = (struct ocfs2_group_desc *)gd_bh->b_data;
677 ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
678 le16_to_cpu(gd->bg_chain));
679 if (ret) {
680 mlog_errno(ret);
681 goto out_commit;
682 }
683
684 ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
685 goal_bit, len);
686 if (ret) {
687 ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len,
688 le16_to_cpu(gd->bg_chain));
689 mlog_errno(ret);
690 }
691
692 /*
693 * Here we should write the new page out first if we are
694 * in write-back mode.
695 */
696 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
697 if (ret)
698 mlog_errno(ret);
699
700out_commit:
701 ocfs2_commit_trans(osb, handle);
702 brelse(gd_bh);
703
704out_unlock_tl_inode:
705 inode_unlock(tl_inode);
706
707 ocfs2_inode_unlock(gb_inode, 1);
708out_unlock_gb_mutex:
709 inode_unlock(gb_inode);
710 brelse(gb_bh);
711 iput(gb_inode);
712
713out:
714 if (context->meta_ac) {
715 ocfs2_free_alloc_context(context->meta_ac);
716 context->meta_ac = NULL;
717 }
718
719 if (ref_tree)
720 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
721
722 return ret;
723}
724
725/*
726 * Helper to calculate the defraging length in one run according to threshold.
727 */
728static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
729 u32 threshold, int *skip)
730{
731 if ((*alloc_size + *len_defraged) < threshold) {
732 /*
733 * proceed defragmentation until we meet the thresh
734 */
735 *len_defraged += *alloc_size;
736 } else if (*len_defraged == 0) {
737 /*
738 * XXX: skip a large extent.
739 */
740 *skip = 1;
741 } else {
742 /*
743 * split this extent to coalesce with former pieces as
744 * to reach the threshold.
745 *
746 * we're done here with one cycle of defragmentation
747 * in a size of 'thresh', resetting 'len_defraged'
748 * forces a new defragmentation.
749 */
750 *alloc_size = threshold - *len_defraged;
751 *len_defraged = 0;
752 }
753}
754
755static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
756 struct ocfs2_move_extents_context *context)
757{
758 int ret = 0, flags, do_defrag, skip = 0;
759 u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
760 u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
761
762 struct inode *inode = context->inode;
763 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
764 struct ocfs2_move_extents *range = context->range;
765 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
766
767 if ((i_size_read(inode) == 0) || (range->me_len == 0))
768 return 0;
769
770 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
771 return 0;
772
773 context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
774
775 ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
776 ocfs2_init_dealloc_ctxt(&context->dealloc);
777
778 /*
779 * TO-DO XXX:
780 *
781 * - xattr extents.
782 */
783
784 do_defrag = context->auto_defrag;
785
786 /*
787 * extents moving happens in unit of clusters, for the sake
788 * of simplicity, we may ignore two clusters where 'byte_start'
789 * and 'byte_start + len' were within.
790 */
791 move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
792 len_to_move = (range->me_start + range->me_len) >>
793 osb->s_clustersize_bits;
794 if (len_to_move >= move_start)
795 len_to_move -= move_start;
796 else
797 len_to_move = 0;
798
799 if (do_defrag) {
800 defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
801 if (defrag_thresh <= 1)
802 goto done;
803 } else
804 new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
805 range->me_goal);
806
807 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
808 "thresh: %u\n",
809 (unsigned long long)OCFS2_I(inode)->ip_blkno,
810 (unsigned long long)range->me_start,
811 (unsigned long long)range->me_len,
812 move_start, len_to_move, defrag_thresh);
813
814 cpos = move_start;
815 while (len_to_move) {
816 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
817 &flags);
818 if (ret) {
819 mlog_errno(ret);
820 goto out;
821 }
822
823 if (alloc_size > len_to_move)
824 alloc_size = len_to_move;
825
826 /*
827 * XXX: how to deal with a hole:
828 *
829 * - skip the hole of course
830 * - force a new defragmentation
831 */
832 if (!phys_cpos) {
833 if (do_defrag)
834 len_defraged = 0;
835
836 goto next;
837 }
838
839 if (do_defrag) {
840 ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
841 defrag_thresh, &skip);
842 /*
843 * skip large extents
844 */
845 if (skip) {
846 skip = 0;
847 goto next;
848 }
849
850 mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
851 "alloc_size: %u, len_defraged: %u\n",
852 cpos, phys_cpos, alloc_size, len_defraged);
853
854 ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
855 &alloc_size, flags);
856 } else {
857 ret = ocfs2_move_extent(context, cpos, phys_cpos,
858 &new_phys_cpos, alloc_size,
859 flags);
860
861 new_phys_cpos += alloc_size;
862 }
863
864 if (ret < 0) {
865 mlog_errno(ret);
866 goto out;
867 }
868
869 context->clusters_moved += alloc_size;
870next:
871 cpos += alloc_size;
872 len_to_move -= alloc_size;
873 }
874
875done:
876 range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
877
878out:
879 range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
880 context->clusters_moved);
881 range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
882 context->new_phys_cpos);
883
884 ocfs2_schedule_truncate_log_flush(osb, 1);
885 ocfs2_run_deallocs(osb, &context->dealloc);
886
887 return ret;
888}
889
890static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
891{
892 int status;
893 handle_t *handle;
894 struct inode *inode = context->inode;
895 struct ocfs2_dinode *di;
896 struct buffer_head *di_bh = NULL;
897 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
898
899 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
900 return -EROFS;
901
902 inode_lock(inode);
903
904 /*
905 * This prevents concurrent writes from other nodes
906 */
907 status = ocfs2_rw_lock(inode, 1);
908 if (status) {
909 mlog_errno(status);
910 goto out;
911 }
912
913 status = ocfs2_inode_lock(inode, &di_bh, 1);
914 if (status) {
915 mlog_errno(status);
916 goto out_rw_unlock;
917 }
918
919 /*
920 * rememer ip_xattr_sem also needs to be held if necessary
921 */
922 down_write(&OCFS2_I(inode)->ip_alloc_sem);
923
924 status = __ocfs2_move_extents_range(di_bh, context);
925
926 up_write(&OCFS2_I(inode)->ip_alloc_sem);
927 if (status) {
928 mlog_errno(status);
929 goto out_inode_unlock;
930 }
931
932 /*
933 * We update ctime for these changes
934 */
935 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
936 if (IS_ERR(handle)) {
937 status = PTR_ERR(handle);
938 mlog_errno(status);
939 goto out_inode_unlock;
940 }
941
942 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
943 OCFS2_JOURNAL_ACCESS_WRITE);
944 if (status) {
945 mlog_errno(status);
946 goto out_commit;
947 }
948
949 di = (struct ocfs2_dinode *)di_bh->b_data;
950 inode->i_ctime = current_time(inode);
951 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
952 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
953 ocfs2_update_inode_fsync_trans(handle, inode, 0);
954
955 ocfs2_journal_dirty(handle, di_bh);
956
957out_commit:
958 ocfs2_commit_trans(osb, handle);
959
960out_inode_unlock:
961 brelse(di_bh);
962 ocfs2_inode_unlock(inode, 1);
963out_rw_unlock:
964 ocfs2_rw_unlock(inode, 1);
965out:
966 inode_unlock(inode);
967
968 return status;
969}
970
971int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
972{
973 int status;
974
975 struct inode *inode = file_inode(filp);
976 struct ocfs2_move_extents range;
977 struct ocfs2_move_extents_context *context;
978
979 if (!argp)
980 return -EINVAL;
981
982 status = mnt_want_write_file(filp);
983 if (status)
984 return status;
985
986 if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) {
987 status = -EPERM;
988 goto out_drop;
989 }
990
991 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
992 status = -EPERM;
993 goto out_drop;
994 }
995
996 context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
997 if (!context) {
998 status = -ENOMEM;
999 mlog_errno(status);
1000 goto out_drop;
1001 }
1002
1003 context->inode = inode;
1004 context->file = filp;
1005
1006 if (copy_from_user(&range, argp, sizeof(range))) {
1007 status = -EFAULT;
1008 goto out_free;
1009 }
1010
1011 if (range.me_start > i_size_read(inode)) {
1012 status = -EINVAL;
1013 goto out_free;
1014 }
1015
1016 if (range.me_start + range.me_len > i_size_read(inode))
1017 range.me_len = i_size_read(inode) - range.me_start;
1018
1019 context->range = ⦥
1020
1021 if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
1022 context->auto_defrag = 1;
1023 /*
1024 * ok, the default theshold for the defragmentation
1025 * is 1M, since our maximum clustersize was 1M also.
1026 * any thought?
1027 */
1028 if (!range.me_threshold)
1029 range.me_threshold = 1024 * 1024;
1030
1031 if (range.me_threshold > i_size_read(inode))
1032 range.me_threshold = i_size_read(inode);
1033
1034 if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
1035 context->partial = 1;
1036 } else {
1037 /*
1038 * first best-effort attempt to validate and adjust the goal
1039 * (physical address in block), while it can't guarantee later
1040 * operation can succeed all the time since global_bitmap may
1041 * change a bit over time.
1042 */
1043
1044 status = ocfs2_validate_and_adjust_move_goal(inode, &range);
1045 if (status)
1046 goto out_copy;
1047 }
1048
1049 status = ocfs2_move_extents(context);
1050 if (status)
1051 mlog_errno(status);
1052out_copy:
1053 /*
1054 * movement/defragmentation may end up being partially completed,
1055 * that's the reason why we need to return userspace the finished
1056 * length and new_offset even if failure happens somewhere.
1057 */
1058 if (copy_to_user(argp, &range, sizeof(range)))
1059 status = -EFAULT;
1060
1061out_free:
1062 kfree(context);
1063out_drop:
1064 mnt_drop_write_file(filp);
1065
1066 return status;
1067}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * move_extents.c
4 *
5 * Copyright (C) 2011 Oracle. All rights reserved.
6 */
7#include <linux/fs.h>
8#include <linux/types.h>
9#include <linux/mount.h>
10#include <linux/swap.h>
11
12#include <cluster/masklog.h>
13
14#include "ocfs2.h"
15#include "ocfs2_ioctl.h"
16
17#include "alloc.h"
18#include "localalloc.h"
19#include "aops.h"
20#include "dlmglue.h"
21#include "extent_map.h"
22#include "inode.h"
23#include "journal.h"
24#include "suballoc.h"
25#include "uptodate.h"
26#include "super.h"
27#include "dir.h"
28#include "buffer_head_io.h"
29#include "sysfile.h"
30#include "refcounttree.h"
31#include "move_extents.h"
32
33struct ocfs2_move_extents_context {
34 struct inode *inode;
35 struct file *file;
36 int auto_defrag;
37 int partial;
38 int credits;
39 u32 new_phys_cpos;
40 u32 clusters_moved;
41 u64 refcount_loc;
42 struct ocfs2_move_extents *range;
43 struct ocfs2_extent_tree et;
44 struct ocfs2_alloc_context *meta_ac;
45 struct ocfs2_alloc_context *data_ac;
46 struct ocfs2_cached_dealloc_ctxt dealloc;
47};
48
49static int __ocfs2_move_extent(handle_t *handle,
50 struct ocfs2_move_extents_context *context,
51 u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
52 int ext_flags)
53{
54 int ret = 0, index;
55 struct inode *inode = context->inode;
56 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
57 struct ocfs2_extent_rec *rec, replace_rec;
58 struct ocfs2_path *path = NULL;
59 struct ocfs2_extent_list *el;
60 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
61 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
62
63 ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
64 p_cpos, new_p_cpos, len);
65 if (ret) {
66 mlog_errno(ret);
67 goto out;
68 }
69
70 memset(&replace_rec, 0, sizeof(replace_rec));
71 replace_rec.e_cpos = cpu_to_le32(cpos);
72 replace_rec.e_leaf_clusters = cpu_to_le16(len);
73 replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
74 new_p_cpos));
75
76 path = ocfs2_new_path_from_et(&context->et);
77 if (!path) {
78 ret = -ENOMEM;
79 mlog_errno(ret);
80 goto out;
81 }
82
83 ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
84 if (ret) {
85 mlog_errno(ret);
86 goto out;
87 }
88
89 el = path_leaf_el(path);
90
91 index = ocfs2_search_extent_list(el, cpos);
92 if (index == -1) {
93 ret = ocfs2_error(inode->i_sb,
94 "Inode %llu has an extent at cpos %u which can no longer be found\n",
95 (unsigned long long)ino, cpos);
96 goto out;
97 }
98
99 rec = &el->l_recs[index];
100
101 BUG_ON(ext_flags != rec->e_flags);
102 /*
103 * after moving/defraging to new location, the extent is not going
104 * to be refcounted anymore.
105 */
106 replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
107
108 ret = ocfs2_split_extent(handle, &context->et, path, index,
109 &replace_rec, context->meta_ac,
110 &context->dealloc);
111 if (ret) {
112 mlog_errno(ret);
113 goto out;
114 }
115
116 context->new_phys_cpos = new_p_cpos;
117
118 /*
119 * need I to append truncate log for old clusters?
120 */
121 if (old_blkno) {
122 if (ext_flags & OCFS2_EXT_REFCOUNTED)
123 ret = ocfs2_decrease_refcount(inode, handle,
124 ocfs2_blocks_to_clusters(osb->sb,
125 old_blkno),
126 len, context->meta_ac,
127 &context->dealloc, 1);
128 else
129 ret = ocfs2_truncate_log_append(osb, handle,
130 old_blkno, len);
131 }
132
133 ocfs2_update_inode_fsync_trans(handle, inode, 0);
134out:
135 ocfs2_free_path(path);
136 return ret;
137}
138
139/*
140 * lock allocator, and reserve appropriate number of bits for
141 * meta blocks.
142 */
143static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
144 struct ocfs2_extent_tree *et,
145 u32 clusters_to_move,
146 u32 extents_to_split,
147 struct ocfs2_alloc_context **meta_ac,
148 int extra_blocks,
149 int *credits)
150{
151 int ret, num_free_extents;
152 unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
153 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
154
155 num_free_extents = ocfs2_num_free_extents(et);
156 if (num_free_extents < 0) {
157 ret = num_free_extents;
158 mlog_errno(ret);
159 goto out;
160 }
161
162 if (!num_free_extents ||
163 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
164 extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
165
166 ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
167 if (ret) {
168 mlog_errno(ret);
169 goto out;
170 }
171
172
173 *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
174
175 mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
176 extra_blocks, clusters_to_move, *credits);
177out:
178 if (ret) {
179 if (*meta_ac) {
180 ocfs2_free_alloc_context(*meta_ac);
181 *meta_ac = NULL;
182 }
183 }
184
185 return ret;
186}
187
188/*
189 * Using one journal handle to guarantee the data consistency in case
190 * crash happens anywhere.
191 *
192 * XXX: defrag can end up with finishing partial extent as requested,
193 * due to not enough contiguous clusters can be found in allocator.
194 */
195static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
196 u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
197{
198 int ret, credits = 0, extra_blocks = 0, partial = context->partial;
199 handle_t *handle;
200 struct inode *inode = context->inode;
201 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
202 struct inode *tl_inode = osb->osb_tl_inode;
203 struct ocfs2_refcount_tree *ref_tree = NULL;
204 u32 new_phys_cpos, new_len;
205 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
206 int need_free = 0;
207
208 if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
209 BUG_ON(!ocfs2_is_refcount_inode(inode));
210 BUG_ON(!context->refcount_loc);
211
212 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
213 &ref_tree, NULL);
214 if (ret) {
215 mlog_errno(ret);
216 return ret;
217 }
218
219 ret = ocfs2_prepare_refcount_change_for_del(inode,
220 context->refcount_loc,
221 phys_blkno,
222 *len,
223 &credits,
224 &extra_blocks);
225 if (ret) {
226 mlog_errno(ret);
227 goto out;
228 }
229 }
230
231 ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
232 *len, 1,
233 &context->meta_ac,
234 extra_blocks, &credits);
235 if (ret) {
236 mlog_errno(ret);
237 goto out;
238 }
239
240 /*
241 * should be using allocation reservation strategy there?
242 *
243 * if (context->data_ac)
244 * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
245 */
246
247 inode_lock(tl_inode);
248
249 if (ocfs2_truncate_log_needs_flush(osb)) {
250 ret = __ocfs2_flush_truncate_log(osb);
251 if (ret < 0) {
252 mlog_errno(ret);
253 goto out_unlock_mutex;
254 }
255 }
256
257 /*
258 * Make sure ocfs2_reserve_cluster is called after
259 * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
260 *
261 * If ocfs2_reserve_cluster is called
262 * before __ocfs2_flush_truncate_log, dead lock on global bitmap
263 * may happen.
264 *
265 */
266 ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
267 if (ret) {
268 mlog_errno(ret);
269 goto out_unlock_mutex;
270 }
271
272 handle = ocfs2_start_trans(osb, credits);
273 if (IS_ERR(handle)) {
274 ret = PTR_ERR(handle);
275 mlog_errno(ret);
276 goto out_unlock_mutex;
277 }
278
279 ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
280 &new_phys_cpos, &new_len);
281 if (ret) {
282 mlog_errno(ret);
283 goto out_commit;
284 }
285
286 /*
287 * allowing partial extent moving is kind of 'pros and cons', it makes
288 * whole defragmentation less likely to fail, on the contrary, the bad
289 * thing is it may make the fs even more fragmented after moving, let
290 * userspace make a good decision here.
291 */
292 if (new_len != *len) {
293 mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
294 if (!partial) {
295 context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
296 ret = -ENOSPC;
297 need_free = 1;
298 goto out_commit;
299 }
300 }
301
302 mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
303 phys_cpos, new_phys_cpos);
304
305 ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
306 new_phys_cpos, ext_flags);
307 if (ret)
308 mlog_errno(ret);
309
310 if (partial && (new_len != *len))
311 *len = new_len;
312
313 /*
314 * Here we should write the new page out first if we are
315 * in write-back mode.
316 */
317 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
318 if (ret)
319 mlog_errno(ret);
320
321out_commit:
322 if (need_free && context->data_ac) {
323 struct ocfs2_alloc_context *data_ac = context->data_ac;
324
325 if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL)
326 ocfs2_free_local_alloc_bits(osb, handle, data_ac,
327 new_phys_cpos, new_len);
328 else
329 ocfs2_free_clusters(handle,
330 data_ac->ac_inode,
331 data_ac->ac_bh,
332 ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos),
333 new_len);
334 }
335
336 ocfs2_commit_trans(osb, handle);
337
338out_unlock_mutex:
339 inode_unlock(tl_inode);
340
341 if (context->data_ac) {
342 ocfs2_free_alloc_context(context->data_ac);
343 context->data_ac = NULL;
344 }
345
346 if (context->meta_ac) {
347 ocfs2_free_alloc_context(context->meta_ac);
348 context->meta_ac = NULL;
349 }
350
351out:
352 if (ref_tree)
353 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
354
355 return ret;
356}
357
358/*
359 * find the victim alloc group, where #blkno fits.
360 */
361static int ocfs2_find_victim_alloc_group(struct inode *inode,
362 u64 vict_blkno,
363 int type, int slot,
364 int *vict_bit,
365 struct buffer_head **ret_bh)
366{
367 int ret, i, bits_per_unit = 0;
368 u64 blkno;
369 char namebuf[40];
370
371 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
372 struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
373 struct ocfs2_chain_list *cl;
374 struct ocfs2_chain_rec *rec;
375 struct ocfs2_dinode *ac_dinode;
376 struct ocfs2_group_desc *bg;
377
378 ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
379 ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
380 strlen(namebuf), &blkno);
381 if (ret) {
382 ret = -ENOENT;
383 goto out;
384 }
385
386 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
387 if (ret) {
388 mlog_errno(ret);
389 goto out;
390 }
391
392 ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
393 cl = &(ac_dinode->id2.i_chain);
394 rec = &(cl->cl_recs[0]);
395
396 if (type == GLOBAL_BITMAP_SYSTEM_INODE)
397 bits_per_unit = osb->s_clustersize_bits -
398 inode->i_sb->s_blocksize_bits;
399 /*
400 * 'vict_blkno' was out of the valid range.
401 */
402 if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
403 (vict_blkno >= ((u64)le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
404 bits_per_unit))) {
405 ret = -EINVAL;
406 goto out;
407 }
408
409 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
410
411 rec = &(cl->cl_recs[i]);
412 if (!rec)
413 continue;
414
415 bg = NULL;
416
417 do {
418 if (!bg)
419 blkno = le64_to_cpu(rec->c_blkno);
420 else
421 blkno = le64_to_cpu(bg->bg_next_group);
422
423 if (gd_bh) {
424 brelse(gd_bh);
425 gd_bh = NULL;
426 }
427
428 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
429 if (ret) {
430 mlog_errno(ret);
431 goto out;
432 }
433
434 bg = (struct ocfs2_group_desc *)gd_bh->b_data;
435
436 if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
437 (le16_to_cpu(bg->bg_bits) << bits_per_unit))) {
438
439 *ret_bh = gd_bh;
440 *vict_bit = (vict_blkno - blkno) >>
441 bits_per_unit;
442 mlog(0, "find the victim group: #%llu, "
443 "total_bits: %u, vict_bit: %u\n",
444 blkno, le16_to_cpu(bg->bg_bits),
445 *vict_bit);
446 goto out;
447 }
448
449 } while (le64_to_cpu(bg->bg_next_group));
450 }
451
452 ret = -EINVAL;
453out:
454 brelse(ac_bh);
455
456 /*
457 * caller has to release the gd_bh properly.
458 */
459 return ret;
460}
461
462/*
463 * XXX: helper to validate and adjust moving goal.
464 */
465static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
466 struct ocfs2_move_extents *range)
467{
468 int ret, goal_bit = 0;
469
470 struct buffer_head *gd_bh = NULL;
471 struct ocfs2_group_desc *bg;
472 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
473 int c_to_b = 1 << (osb->s_clustersize_bits -
474 inode->i_sb->s_blocksize_bits);
475
476 /*
477 * make goal become cluster aligned.
478 */
479 range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
480 range->me_goal);
481 /*
482 * validate goal sits within global_bitmap, and return the victim
483 * group desc
484 */
485 ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
486 GLOBAL_BITMAP_SYSTEM_INODE,
487 OCFS2_INVALID_SLOT,
488 &goal_bit, &gd_bh);
489 if (ret)
490 goto out;
491
492 bg = (struct ocfs2_group_desc *)gd_bh->b_data;
493
494 /*
495 * moving goal is not allowd to start with a group desc blok(#0 blk)
496 * let's compromise to the latter cluster.
497 */
498 if (range->me_goal == le64_to_cpu(bg->bg_blkno))
499 range->me_goal += c_to_b;
500
501 /*
502 * movement is not gonna cross two groups.
503 */
504 if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
505 range->me_len) {
506 ret = -EINVAL;
507 goto out;
508 }
509 /*
510 * more exact validations/adjustments will be performed later during
511 * moving operation for each extent range.
512 */
513 mlog(0, "extents get ready to be moved to #%llu block\n",
514 range->me_goal);
515
516out:
517 brelse(gd_bh);
518
519 return ret;
520}
521
522static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
523 int *goal_bit, u32 move_len, u32 max_hop,
524 u32 *phys_cpos)
525{
526 int i, used, last_free_bits = 0, base_bit = *goal_bit;
527 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
528 u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
529 le64_to_cpu(gd->bg_blkno));
530
531 for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
532
533 used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
534 if (used) {
535 /*
536 * we even tried searching the free chunk by jumping
537 * a 'max_hop' distance, but still failed.
538 */
539 if ((i - base_bit) > max_hop) {
540 *phys_cpos = 0;
541 break;
542 }
543
544 if (last_free_bits)
545 last_free_bits = 0;
546
547 continue;
548 } else
549 last_free_bits++;
550
551 if (last_free_bits == move_len) {
552 i -= move_len;
553 *goal_bit = i;
554 *phys_cpos = base_cpos + i;
555 break;
556 }
557 }
558
559 mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
560}
561
562static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
563 u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
564 u32 len, int ext_flags)
565{
566 int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
567 handle_t *handle;
568 struct inode *inode = context->inode;
569 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
570 struct inode *tl_inode = osb->osb_tl_inode;
571 struct inode *gb_inode = NULL;
572 struct buffer_head *gb_bh = NULL;
573 struct buffer_head *gd_bh = NULL;
574 struct ocfs2_group_desc *gd;
575 struct ocfs2_refcount_tree *ref_tree = NULL;
576 u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
577 context->range->me_threshold);
578 u64 phys_blkno, new_phys_blkno;
579
580 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
581
582 if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
583 BUG_ON(!ocfs2_is_refcount_inode(inode));
584 BUG_ON(!context->refcount_loc);
585
586 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
587 &ref_tree, NULL);
588 if (ret) {
589 mlog_errno(ret);
590 return ret;
591 }
592
593 ret = ocfs2_prepare_refcount_change_for_del(inode,
594 context->refcount_loc,
595 phys_blkno,
596 len,
597 &credits,
598 &extra_blocks);
599 if (ret) {
600 mlog_errno(ret);
601 goto out;
602 }
603 }
604
605 ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
606 len, 1,
607 &context->meta_ac,
608 extra_blocks, &credits);
609 if (ret) {
610 mlog_errno(ret);
611 goto out;
612 }
613
614 /*
615 * need to count 2 extra credits for global_bitmap inode and
616 * group descriptor.
617 */
618 credits += OCFS2_INODE_UPDATE_CREDITS + 1;
619
620 /*
621 * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
622 * logic, while we still need to lock the global_bitmap.
623 */
624 gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
625 OCFS2_INVALID_SLOT);
626 if (!gb_inode) {
627 mlog(ML_ERROR, "unable to get global_bitmap inode\n");
628 ret = -EIO;
629 goto out;
630 }
631
632 inode_lock(gb_inode);
633
634 ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
635 if (ret) {
636 mlog_errno(ret);
637 goto out_unlock_gb_mutex;
638 }
639
640 inode_lock(tl_inode);
641
642 handle = ocfs2_start_trans(osb, credits);
643 if (IS_ERR(handle)) {
644 ret = PTR_ERR(handle);
645 mlog_errno(ret);
646 goto out_unlock_tl_inode;
647 }
648
649 new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
650 ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
651 GLOBAL_BITMAP_SYSTEM_INODE,
652 OCFS2_INVALID_SLOT,
653 &goal_bit, &gd_bh);
654 if (ret) {
655 mlog_errno(ret);
656 goto out_commit;
657 }
658
659 /*
660 * probe the victim cluster group to find a proper
661 * region to fit wanted movement, it even will perfrom
662 * a best-effort attempt by compromising to a threshold
663 * around the goal.
664 */
665 ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
666 new_phys_cpos);
667 if (!*new_phys_cpos) {
668 ret = -ENOSPC;
669 goto out_commit;
670 }
671
672 ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
673 *new_phys_cpos, ext_flags);
674 if (ret) {
675 mlog_errno(ret);
676 goto out_commit;
677 }
678
679 gd = (struct ocfs2_group_desc *)gd_bh->b_data;
680 ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
681 le16_to_cpu(gd->bg_chain));
682 if (ret) {
683 mlog_errno(ret);
684 goto out_commit;
685 }
686
687 ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
688 goal_bit, len);
689 if (ret) {
690 ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len,
691 le16_to_cpu(gd->bg_chain));
692 mlog_errno(ret);
693 }
694
695 /*
696 * Here we should write the new page out first if we are
697 * in write-back mode.
698 */
699 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
700 if (ret)
701 mlog_errno(ret);
702
703out_commit:
704 ocfs2_commit_trans(osb, handle);
705 brelse(gd_bh);
706
707out_unlock_tl_inode:
708 inode_unlock(tl_inode);
709
710 ocfs2_inode_unlock(gb_inode, 1);
711out_unlock_gb_mutex:
712 inode_unlock(gb_inode);
713 brelse(gb_bh);
714 iput(gb_inode);
715
716out:
717 if (context->meta_ac) {
718 ocfs2_free_alloc_context(context->meta_ac);
719 context->meta_ac = NULL;
720 }
721
722 if (ref_tree)
723 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
724
725 return ret;
726}
727
728/*
729 * Helper to calculate the defraging length in one run according to threshold.
730 */
731static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
732 u32 threshold, int *skip)
733{
734 if ((*alloc_size + *len_defraged) < threshold) {
735 /*
736 * proceed defragmentation until we meet the thresh
737 */
738 *len_defraged += *alloc_size;
739 } else if (*len_defraged == 0) {
740 /*
741 * XXX: skip a large extent.
742 */
743 *skip = 1;
744 } else {
745 /*
746 * split this extent to coalesce with former pieces as
747 * to reach the threshold.
748 *
749 * we're done here with one cycle of defragmentation
750 * in a size of 'thresh', resetting 'len_defraged'
751 * forces a new defragmentation.
752 */
753 *alloc_size = threshold - *len_defraged;
754 *len_defraged = 0;
755 }
756}
757
758static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
759 struct ocfs2_move_extents_context *context)
760{
761 int ret = 0, flags, do_defrag, skip = 0;
762 u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
763 u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
764
765 struct inode *inode = context->inode;
766 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
767 struct ocfs2_move_extents *range = context->range;
768 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
769
770 if ((i_size_read(inode) == 0) || (range->me_len == 0))
771 return 0;
772
773 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
774 return 0;
775
776 context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
777
778 ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
779 ocfs2_init_dealloc_ctxt(&context->dealloc);
780
781 /*
782 * TO-DO XXX:
783 *
784 * - xattr extents.
785 */
786
787 do_defrag = context->auto_defrag;
788
789 /*
790 * extents moving happens in unit of clusters, for the sake
791 * of simplicity, we may ignore two clusters where 'byte_start'
792 * and 'byte_start + len' were within.
793 */
794 move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
795 len_to_move = (range->me_start + range->me_len) >>
796 osb->s_clustersize_bits;
797 if (len_to_move >= move_start)
798 len_to_move -= move_start;
799 else
800 len_to_move = 0;
801
802 if (do_defrag) {
803 defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
804 if (defrag_thresh <= 1)
805 goto done;
806 } else
807 new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
808 range->me_goal);
809
810 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
811 "thresh: %u\n",
812 (unsigned long long)OCFS2_I(inode)->ip_blkno,
813 (unsigned long long)range->me_start,
814 (unsigned long long)range->me_len,
815 move_start, len_to_move, defrag_thresh);
816
817 cpos = move_start;
818 while (len_to_move) {
819 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
820 &flags);
821 if (ret) {
822 mlog_errno(ret);
823 goto out;
824 }
825
826 if (alloc_size > len_to_move)
827 alloc_size = len_to_move;
828
829 /*
830 * XXX: how to deal with a hole:
831 *
832 * - skip the hole of course
833 * - force a new defragmentation
834 */
835 if (!phys_cpos) {
836 if (do_defrag)
837 len_defraged = 0;
838
839 goto next;
840 }
841
842 if (do_defrag) {
843 ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
844 defrag_thresh, &skip);
845 /*
846 * skip large extents
847 */
848 if (skip) {
849 skip = 0;
850 goto next;
851 }
852
853 mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
854 "alloc_size: %u, len_defraged: %u\n",
855 cpos, phys_cpos, alloc_size, len_defraged);
856
857 ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
858 &alloc_size, flags);
859 } else {
860 ret = ocfs2_move_extent(context, cpos, phys_cpos,
861 &new_phys_cpos, alloc_size,
862 flags);
863
864 new_phys_cpos += alloc_size;
865 }
866
867 if (ret < 0) {
868 mlog_errno(ret);
869 goto out;
870 }
871
872 context->clusters_moved += alloc_size;
873next:
874 cpos += alloc_size;
875 len_to_move -= alloc_size;
876 }
877
878done:
879 range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
880
881out:
882 range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
883 context->clusters_moved);
884 range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
885 context->new_phys_cpos);
886
887 ocfs2_schedule_truncate_log_flush(osb, 1);
888 ocfs2_run_deallocs(osb, &context->dealloc);
889
890 return ret;
891}
892
893static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
894{
895 int status;
896 handle_t *handle;
897 struct inode *inode = context->inode;
898 struct ocfs2_dinode *di;
899 struct buffer_head *di_bh = NULL;
900 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
901
902 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
903 return -EROFS;
904
905 inode_lock(inode);
906
907 /*
908 * This prevents concurrent writes from other nodes
909 */
910 status = ocfs2_rw_lock(inode, 1);
911 if (status) {
912 mlog_errno(status);
913 goto out;
914 }
915
916 status = ocfs2_inode_lock(inode, &di_bh, 1);
917 if (status) {
918 mlog_errno(status);
919 goto out_rw_unlock;
920 }
921
922 /*
923 * rememer ip_xattr_sem also needs to be held if necessary
924 */
925 down_write(&OCFS2_I(inode)->ip_alloc_sem);
926
927 status = __ocfs2_move_extents_range(di_bh, context);
928
929 up_write(&OCFS2_I(inode)->ip_alloc_sem);
930 if (status) {
931 mlog_errno(status);
932 goto out_inode_unlock;
933 }
934
935 /*
936 * We update ctime for these changes
937 */
938 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
939 if (IS_ERR(handle)) {
940 status = PTR_ERR(handle);
941 mlog_errno(status);
942 goto out_inode_unlock;
943 }
944
945 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
946 OCFS2_JOURNAL_ACCESS_WRITE);
947 if (status) {
948 mlog_errno(status);
949 goto out_commit;
950 }
951
952 di = (struct ocfs2_dinode *)di_bh->b_data;
953 inode_set_ctime_current(inode);
954 di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
955 di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
956 ocfs2_update_inode_fsync_trans(handle, inode, 0);
957
958 ocfs2_journal_dirty(handle, di_bh);
959
960out_commit:
961 ocfs2_commit_trans(osb, handle);
962
963out_inode_unlock:
964 brelse(di_bh);
965 ocfs2_inode_unlock(inode, 1);
966out_rw_unlock:
967 ocfs2_rw_unlock(inode, 1);
968out:
969 inode_unlock(inode);
970
971 return status;
972}
973
974int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
975{
976 int status;
977
978 struct inode *inode = file_inode(filp);
979 struct ocfs2_move_extents range;
980 struct ocfs2_move_extents_context *context;
981
982 if (!argp)
983 return -EINVAL;
984
985 status = mnt_want_write_file(filp);
986 if (status)
987 return status;
988
989 if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) {
990 status = -EPERM;
991 goto out_drop;
992 }
993
994 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
995 status = -EPERM;
996 goto out_drop;
997 }
998
999 context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
1000 if (!context) {
1001 status = -ENOMEM;
1002 mlog_errno(status);
1003 goto out_drop;
1004 }
1005
1006 context->inode = inode;
1007 context->file = filp;
1008
1009 if (copy_from_user(&range, argp, sizeof(range))) {
1010 status = -EFAULT;
1011 goto out_free;
1012 }
1013
1014 if (range.me_start > i_size_read(inode)) {
1015 status = -EINVAL;
1016 goto out_free;
1017 }
1018
1019 if (range.me_start + range.me_len > i_size_read(inode))
1020 range.me_len = i_size_read(inode) - range.me_start;
1021
1022 context->range = ⦥
1023
1024 /*
1025 * ok, the default theshold for the defragmentation
1026 * is 1M, since our maximum clustersize was 1M also.
1027 * any thought?
1028 */
1029 if (!range.me_threshold)
1030 range.me_threshold = 1024 * 1024;
1031
1032 if (range.me_threshold > i_size_read(inode))
1033 range.me_threshold = i_size_read(inode);
1034
1035 if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
1036 context->auto_defrag = 1;
1037
1038 if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
1039 context->partial = 1;
1040 } else {
1041 /*
1042 * first best-effort attempt to validate and adjust the goal
1043 * (physical address in block), while it can't guarantee later
1044 * operation can succeed all the time since global_bitmap may
1045 * change a bit over time.
1046 */
1047
1048 status = ocfs2_validate_and_adjust_move_goal(inode, &range);
1049 if (status)
1050 goto out_copy;
1051 }
1052
1053 status = ocfs2_move_extents(context);
1054 if (status)
1055 mlog_errno(status);
1056out_copy:
1057 /*
1058 * movement/defragmentation may end up being partially completed,
1059 * that's the reason why we need to return userspace the finished
1060 * length and new_offset even if failure happens somewhere.
1061 */
1062 if (copy_to_user(argp, &range, sizeof(range)))
1063 status = -EFAULT;
1064
1065out_free:
1066 kfree(context);
1067out_drop:
1068 mnt_drop_write_file(filp);
1069
1070 return status;
1071}