Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/sched.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "locking.h" #include "accessors.h" #include "messages.h" #include "delalloc-space.h" #include "subpage.h" #include "defrag.h" #include "file-item.h" #include "super.h" static struct kmem_cache *btrfs_inode_defrag_cachep; /* * When auto defrag is enabled we queue up these defrag structs to remember * which inodes need defragging passes. */ struct inode_defrag { struct rb_node rb_node; /* Inode number */ u64 ino; /* * Transid where the defrag was added, we search for extents newer than * this. */ u64 transid; /* Root objectid */ u64 root; /* * The extent size threshold for autodefrag. * * This value is different for compressed/non-compressed extents, thus * needs to be passed from higher layer. * (aka, inode_should_defrag()) */ u32 extent_thresh; }; static int __compare_inode_defrag(struct inode_defrag *defrag1, struct inode_defrag *defrag2) { if (defrag1->root > defrag2->root) return 1; else if (defrag1->root < defrag2->root) return -1; else if (defrag1->ino > defrag2->ino) return 1; else if (defrag1->ino < defrag2->ino) return -1; else return 0; } /* * Pop a record for an inode into the defrag tree. The lock must be held * already. * * If you're inserting a record for an older transid than an existing record, * the transid already in the tree is lowered. * * If an existing record is found the defrag item you pass in is freed. */ static int __btrfs_add_inode_defrag(struct btrfs_inode *inode, struct inode_defrag *defrag) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct inode_defrag *entry; struct rb_node **p; struct rb_node *parent = NULL; int ret; p = &fs_info->defrag_inodes.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct inode_defrag, rb_node); ret = __compare_inode_defrag(defrag, entry); if (ret < 0) p = &parent->rb_left; else if (ret > 0) p = &parent->rb_right; else { /* * If we're reinserting an entry for an old defrag run, * make sure to lower the transid of our existing * record. */ if (defrag->transid < entry->transid) entry->transid = defrag->transid; entry->extent_thresh = min(defrag->extent_thresh, entry->extent_thresh); return -EEXIST; } } set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); rb_link_node(&defrag->rb_node, parent, p); rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); return 0; } static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info) { if (!btrfs_test_opt(fs_info, AUTO_DEFRAG)) return 0; if (btrfs_fs_closing(fs_info)) return 0; return 1; } /* * Insert a defrag record for this inode if auto defrag is enabled. */ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, u32 extent_thresh) { struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; struct inode_defrag *defrag; u64 transid; int ret; if (!__need_auto_defrag(fs_info)) return 0; if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) return 0; if (trans) transid = trans->transid; else transid = inode->root->last_trans; defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS); if (!defrag) return -ENOMEM; defrag->ino = btrfs_ino(inode); defrag->transid = transid; defrag->root = root->root_key.objectid; defrag->extent_thresh = extent_thresh; spin_lock(&fs_info->defrag_inodes_lock); if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { /* * If we set IN_DEFRAG flag and evict the inode from memory, * and then re-read this inode, this new inode doesn't have * IN_DEFRAG flag. At the case, we may find the existed defrag. */ ret = __btrfs_add_inode_defrag(inode, defrag); if (ret) kmem_cache_free(btrfs_inode_defrag_cachep, defrag); } else { kmem_cache_free(btrfs_inode_defrag_cachep, defrag); } spin_unlock(&fs_info->defrag_inodes_lock); return 0; } /* * Pick the defragable inode that we want, if it doesn't exist, we will get the * next one. */ static struct inode_defrag *btrfs_pick_defrag_inode( struct btrfs_fs_info *fs_info, u64 root, u64 ino) { struct inode_defrag *entry = NULL; struct inode_defrag tmp; struct rb_node *p; struct rb_node *parent = NULL; int ret; tmp.ino = ino; tmp.root = root; spin_lock(&fs_info->defrag_inodes_lock); p = fs_info->defrag_inodes.rb_node; while (p) { parent = p; entry = rb_entry(parent, struct inode_defrag, rb_node); ret = __compare_inode_defrag(&tmp, entry); if (ret < 0) p = parent->rb_left; else if (ret > 0) p = parent->rb_right; else goto out; } if (parent && __compare_inode_defrag(&tmp, entry) > 0) { parent = rb_next(parent); if (parent) entry = rb_entry(parent, struct inode_defrag, rb_node); else entry = NULL; } out: if (entry) rb_erase(parent, &fs_info->defrag_inodes); spin_unlock(&fs_info->defrag_inodes_lock); return entry; } void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info) { struct inode_defrag *defrag; struct rb_node *node; spin_lock(&fs_info->defrag_inodes_lock); node = rb_first(&fs_info->defrag_inodes); while (node) { rb_erase(node, &fs_info->defrag_inodes); defrag = rb_entry(node, struct inode_defrag, rb_node); kmem_cache_free(btrfs_inode_defrag_cachep, defrag); cond_resched_lock(&fs_info->defrag_inodes_lock); node = rb_first(&fs_info->defrag_inodes); } spin_unlock(&fs_info->defrag_inodes_lock); } #define BTRFS_DEFRAG_BATCH 1024 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, struct inode_defrag *defrag) { struct btrfs_root *inode_root; struct inode *inode; struct btrfs_ioctl_defrag_range_args range; int ret = 0; u64 cur = 0; again: if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) goto cleanup; if (!__need_auto_defrag(fs_info)) goto cleanup; /* Get the inode */ inode_root = btrfs_get_fs_root(fs_info, defrag->root, true); if (IS_ERR(inode_root)) { ret = PTR_ERR(inode_root); goto cleanup; } inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root); btrfs_put_root(inode_root); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto cleanup; } if (cur >= i_size_read(inode)) { iput(inode); goto cleanup; } /* Do a chunk of defrag */ clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); memset(&range, 0, sizeof(range)); range.len = (u64)-1; range.start = cur; range.extent_thresh = defrag->extent_thresh; sb_start_write(fs_info->sb); ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid, BTRFS_DEFRAG_BATCH); sb_end_write(fs_info->sb); iput(inode); if (ret < 0) goto cleanup; cur = max(cur + fs_info->sectorsize, range.start); goto again; cleanup: kmem_cache_free(btrfs_inode_defrag_cachep, defrag); return ret; } /* * Run through the list of inodes in the FS that need defragging. */ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) { struct inode_defrag *defrag; u64 first_ino = 0; u64 root_objectid = 0; atomic_inc(&fs_info->defrag_running); while (1) { /* Pause the auto defragger. */ if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) break; if (!__need_auto_defrag(fs_info)) break; /* find an inode to defrag */ defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, first_ino); if (!defrag) { if (root_objectid || first_ino) { root_objectid = 0; first_ino = 0; continue; } else { break; } } first_ino = defrag->ino + 1; root_objectid = defrag->root; __btrfs_run_defrag_inode(fs_info, defrag); } atomic_dec(&fs_info->defrag_running); /* * During unmount, we use the transaction_wait queue to wait for the * defragger to stop. */ wake_up(&fs_info->transaction_wait); return 0; } /* * Check if two blocks addresses are close, used by defrag. */ static bool close_blocks(u64 blocknr, u64 other, u32 blocksize) { if (blocknr < other && other - (blocknr + blocksize) < SZ_32K) return true; if (blocknr > other && blocknr - (other + blocksize) < SZ_32K) return true; return false; } /* * Go through all the leaves pointed to by a node and reallocate them so that * disk order is close to key order. */ static int btrfs_realloc_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *parent, int start_slot, u64 *last_ret, struct btrfs_key *progress) { struct btrfs_fs_info *fs_info = root->fs_info; const u32 blocksize = fs_info->nodesize; const int end_slot = btrfs_header_nritems(parent) - 1; u64 search_start = *last_ret; u64 last_block = 0; int ret = 0; bool progress_passed = false; /* * COWing must happen through a running transaction, which always * matches the current fs generation (it's a transaction with a state * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs * into error state to prevent the commit of any transaction. */ if (unlikely(trans->transaction != fs_info->running_transaction || trans->transid != fs_info->generation)) { btrfs_abort_transaction(trans, -EUCLEAN); btrfs_crit(fs_info, "unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu", parent->start, btrfs_root_id(root), trans->transid, fs_info->running_transaction->transid, fs_info->generation); return -EUCLEAN; } if (btrfs_header_nritems(parent) <= 1) return 0; for (int i = start_slot; i <= end_slot; i++) { struct extent_buffer *cur; struct btrfs_disk_key disk_key; u64 blocknr; u64 other; bool close = true; btrfs_node_key(parent, &disk_key, i); if (!progress_passed && btrfs_comp_keys(&disk_key, progress) < 0) continue; progress_passed = true; blocknr = btrfs_node_blockptr(parent, i); if (last_block == 0) last_block = blocknr; if (i > 0) { other = btrfs_node_blockptr(parent, i - 1); close = close_blocks(blocknr, other, blocksize); } if (!close && i < end_slot) { other = btrfs_node_blockptr(parent, i + 1); close = close_blocks(blocknr, other, blocksize); } if (close) { last_block = blocknr; continue; } cur = btrfs_read_node_slot(parent, i); if (IS_ERR(cur)) return PTR_ERR(cur); if (search_start == 0) search_start = last_block; btrfs_tree_lock(cur); ret = btrfs_force_cow_block(trans, root, cur, parent, i, &cur, search_start, min(16 * blocksize, (end_slot - i) * blocksize), BTRFS_NESTING_COW); if (ret) { btrfs_tree_unlock(cur); free_extent_buffer(cur); break; } search_start = cur->start; last_block = cur->start; *last_ret = search_start; btrfs_tree_unlock(cur); free_extent_buffer(cur); } return ret; } /* * Defrag all the leaves in a given btree. * Read all the leaves and try to get key order to * better reflect disk order */ static int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_path *path = NULL; struct btrfs_key key; int ret = 0; int wret; int level; int next_key_ret = 0; u64 last_ret = 0; if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) goto out; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } level = btrfs_header_level(root->node); if (level == 0) goto out; if (root->defrag_progress.objectid == 0) { struct extent_buffer *root_node; u32 nritems; root_node = btrfs_lock_root_node(root); nritems = btrfs_header_nritems(root_node); root->defrag_max.objectid = 0; /* from above we know this is not a leaf */ btrfs_node_key_to_cpu(root_node, &root->defrag_max, nritems - 1); btrfs_tree_unlock(root_node); free_extent_buffer(root_node); memset(&key, 0, sizeof(key)); } else { memcpy(&key, &root->defrag_progress, sizeof(key)); } path->keep_locks = 1; ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION); if (ret < 0) goto out; if (ret > 0) { ret = 0; goto out; } btrfs_release_path(path); /* * We don't need a lock on a leaf. btrfs_realloc_node() will lock all * leafs from path->nodes[1], so set lowest_level to 1 to avoid later * a deadlock (attempting to write lock an already write locked leaf). */ path->lowest_level = 1; wret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (wret < 0) { ret = wret; goto out; } if (!path->nodes[1]) { ret = 0; goto out; } /* * The node at level 1 must always be locked when our path has * keep_locks set and lowest_level is 1, regardless of the value of * path->slots[1]. */ ASSERT(path->locks[1] != 0); ret = btrfs_realloc_node(trans, root, path->nodes[1], 0, &last_ret, &root->defrag_progress); if (ret) { WARN_ON(ret == -EAGAIN); goto out; } /* * Now that we reallocated the node we can find the next key. Note that * btrfs_find_next_key() can release our path and do another search * without COWing, this is because even with path->keep_locks = 1, * btrfs_search_slot() / ctree.c:unlock_up() does not keeps a lock on a * node when path->slots[node_level - 1] does not point to the last * item or a slot beyond the last item (ctree.c:unlock_up()). Therefore * we search for the next key after reallocating our node. */ path->slots[1] = btrfs_header_nritems(path->nodes[1]); next_key_ret = btrfs_find_next_key(root, path, &key, 1, BTRFS_OLDEST_GENERATION); if (next_key_ret == 0) { memcpy(&root->defrag_progress, &key, sizeof(key)); ret = -EAGAIN; } out: btrfs_free_path(path); if (ret == -EAGAIN) { if (root->defrag_max.objectid > root->defrag_progress.objectid) goto done; if (root->defrag_max.type > root->defrag_progress.type) goto done; if (root->defrag_max.offset > root->defrag_progress.offset) goto done; ret = 0; } done: if (ret != -EAGAIN) memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); return ret; } /* * Defrag a given btree. Every leaf in the btree is read and defragmented. */ int btrfs_defrag_root(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; int ret; if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state)) return 0; while (1) { struct btrfs_trans_handle *trans; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); break; } ret = btrfs_defrag_leaves(trans, root); btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); cond_resched(); if (btrfs_fs_closing(fs_info) || ret != -EAGAIN) break; if (btrfs_defrag_cancelled(fs_info)) { btrfs_debug(fs_info, "defrag_root cancelled"); ret = -EAGAIN; break; } } clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state); return ret; } /* * Defrag specific helper to get an extent map. * * Differences between this and btrfs_get_extent() are: * * - No extent_map will be added to inode->extent_tree * To reduce memory usage in the long run. * * - Extra optimization to skip file extents older than @newer_than * By using btrfs_search_forward() we can skip entire file ranges that * have extents created in past transactions, because btrfs_search_forward() * will not visit leaves and nodes with a generation smaller than given * minimal generation threshold (@newer_than). * * Return valid em if we find a file extent matching the requirement. * Return NULL if we can not find a file extent matching the requirement. * * Return ERR_PTR() for error. */ static struct extent_map *defrag_get_extent(struct btrfs_inode *inode, u64 start, u64 newer_than) { struct btrfs_root *root = inode->root; struct btrfs_file_extent_item *fi; struct btrfs_path path = { 0 }; struct extent_map *em; struct btrfs_key key; u64 ino = btrfs_ino(inode); int ret; em = alloc_extent_map(); if (!em) { ret = -ENOMEM; goto err; } key.objectid = ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = start; if (newer_than) { ret = btrfs_search_forward(root, &key, &path, newer_than); if (ret < 0) goto err; /* Can't find anything newer */ if (ret > 0) goto not_found; } else { ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0); if (ret < 0) goto err; } if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) { /* * If btrfs_search_slot() makes path to point beyond nritems, * we should not have an empty leaf, as this inode must at * least have its INODE_ITEM. */ ASSERT(btrfs_header_nritems(path.nodes[0])); path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1; } btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]); /* Perfect match, no need to go one slot back */ if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY && key.offset == start) goto iterate; /* We didn't find a perfect match, needs to go one slot back */ if (path.slots[0] > 0) { btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]); if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY) path.slots[0]--; } iterate: /* Iterate through the path to find a file extent covering @start */ while (true) { u64 extent_end; if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) goto next; btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]); /* * We may go one slot back to INODE_REF/XATTR item, then * need to go forward until we reach an EXTENT_DATA. * But we should still has the correct ino as key.objectid. */ if (WARN_ON(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY) goto next; /* It's beyond our target range, definitely not extent found */ if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY) goto not_found; /* * | |<- File extent ->| * \- start * * This means there is a hole between start and key.offset. */ if (key.offset > start) { em->start = start; em->orig_start = start; em->block_start = EXTENT_MAP_HOLE; em->len = key.offset - start; break; } fi = btrfs_item_ptr(path.nodes[0], path.slots[0], struct btrfs_file_extent_item); extent_end = btrfs_file_extent_end(&path); /* * |<- file extent ->| | * \- start * * We haven't reached start, search next slot. */ if (extent_end <= start) goto next; /* Now this extent covers @start, convert it to em */ btrfs_extent_item_to_extent_map(inode, &path, fi, em); break; next: ret = btrfs_next_item(root, &path); if (ret < 0) goto err; if (ret > 0) goto not_found; } btrfs_release_path(&path); return em; not_found: btrfs_release_path(&path); free_extent_map(em); return NULL; err: btrfs_release_path(&path); free_extent_map(em); return ERR_PTR(ret); } static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start, u64 newer_than, bool locked) { struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em; const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize; /* * Hopefully we have this extent in the tree already, try without the * full extent lock. */ read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, sectorsize); read_unlock(&em_tree->lock); /* * We can get a merged extent, in that case, we need to re-search * tree to get the original em for defrag. * * If @newer_than is 0 or em::generation < newer_than, we can trust * this em, as either we don't care about the generation, or the * merged extent map will be rejected anyway. */ if (em && (em->flags & EXTENT_FLAG_MERGED) && newer_than && em->generation >= newer_than) { free_extent_map(em); em = NULL; } if (!em) { struct extent_state *cached = NULL; u64 end = start + sectorsize - 1; /* Get the big lock and read metadata off disk. */ if (!locked) lock_extent(io_tree, start, end, &cached); em = defrag_get_extent(BTRFS_I(inode), start, newer_than); if (!locked) unlock_extent(io_tree, start, end, &cached); if (IS_ERR(em)) return NULL; } return em; } static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info, const struct extent_map *em) { if (extent_map_is_compressed(em)) return BTRFS_MAX_COMPRESSED; return fs_info->max_extent_size; } static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em, u32 extent_thresh, u64 newer_than, bool locked) { struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct extent_map *next; bool ret = false; /* This is the last extent */ if (em->start + em->len >= i_size_read(inode)) return false; /* * Here we need to pass @newer_then when checking the next extent, or * we will hit a case we mark current extent for defrag, but the next * one will not be a target. * This will just cause extra IO without really reducing the fragments. */ next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked); /* No more em or hole */ if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) goto out; if (next->flags & EXTENT_FLAG_PREALLOC) goto out; /* * If the next extent is at its max capacity, defragging current extent * makes no sense, as the total number of extents won't change. */ if (next->len >= get_extent_max_capacity(fs_info, em)) goto out; /* Skip older extent */ if (next->generation < newer_than) goto out; /* Also check extent size */ if (next->len >= extent_thresh) goto out; ret = true; out: free_extent_map(next); return ret; } /* * Prepare one page to be defragged. * * This will ensure: * * - Returned page is locked and has been set up properly. * - No ordered extent exists in the page. * - The page is uptodate. * * NOTE: Caller should also wait for page writeback after the cluster is * prepared, here we don't do writeback wait for each page. */ static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t index) { struct address_space *mapping = inode->vfs_inode.i_mapping; gfp_t mask = btrfs_alloc_write_mask(mapping); u64 page_start = (u64)index << PAGE_SHIFT; u64 page_end = page_start + PAGE_SIZE - 1; struct extent_state *cached_state = NULL; struct folio *folio; int ret; again: folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); if (IS_ERR(folio)) return folio; /* * Since we can defragment files opened read-only, we can encounter * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). We * can't do I/O using huge pages yet, so return an error for now. * Filesystem transparent huge pages are typically only used for * executables that explicitly enable them, so this isn't very * restrictive. */ if (folio_test_large(folio)) { folio_unlock(folio); folio_put(folio); return ERR_PTR(-ETXTBSY); } ret = set_folio_extent_mapped(folio); if (ret < 0) { folio_unlock(folio); folio_put(folio); return ERR_PTR(ret); } /* Wait for any existing ordered extent in the range */ while (1) { struct btrfs_ordered_extent *ordered; lock_extent(&inode->io_tree, page_start, page_end, &cached_state); ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); if (!ordered) break; folio_unlock(folio); btrfs_start_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); folio_lock(folio); /* * We unlocked the folio above, so we need check if it was * released or not. */ if (folio->mapping != mapping || !folio->private) { folio_unlock(folio); folio_put(folio); goto again; } } /* * Now the page range has no ordered extent any more. Read the page to * make it uptodate. */ if (!folio_test_uptodate(folio)) { btrfs_read_folio(NULL, folio); folio_lock(folio); if (folio->mapping != mapping || !folio->private) { folio_unlock(folio); folio_put(folio); goto again; } if (!folio_test_uptodate(folio)) { folio_unlock(folio); folio_put(folio); return ERR_PTR(-EIO); } } return folio; } struct defrag_target_range { struct list_head list; u64 start; u64 len; }; /* * Collect all valid target extents. * * @start: file offset to lookup * @len: length to lookup * @extent_thresh: file extent size threshold, any extent size >= this value * will be ignored * @newer_than: only defrag extents newer than this value * @do_compress: whether the defrag is doing compression * if true, @extent_thresh will be ignored and all regular * file extents meeting @newer_than will be targets. * @locked: if the range has already held extent lock * @target_list: list of targets file extents */ static int defrag_collect_targets(struct btrfs_inode *inode, u64 start, u64 len, u32 extent_thresh, u64 newer_than, bool do_compress, bool locked, struct list_head *target_list, u64 *last_scanned_ret) { struct btrfs_fs_info *fs_info = inode->root->fs_info; bool last_is_target = false; u64 cur = start; int ret = 0; while (cur < start + len) { struct extent_map *em; struct defrag_target_range *new; bool next_mergeable = true; u64 range_len; last_is_target = false; em = defrag_lookup_extent(&inode->vfs_inode, cur, newer_than, locked); if (!em) break; /* * If the file extent is an inlined one, we may still want to * defrag it (fallthrough) if it will cause a regular extent. * This is for users who want to convert inline extents to * regular ones through max_inline= mount option. */ if (em->block_start == EXTENT_MAP_INLINE && em->len <= inode->root->fs_info->max_inline) goto next; /* Skip holes and preallocated extents. */ if (em->block_start == EXTENT_MAP_HOLE || (em->flags & EXTENT_FLAG_PREALLOC)) goto next; /* Skip older extent */ if (em->generation < newer_than) goto next; /* This em is under writeback, no need to defrag */ if (em->generation == (u64)-1) goto next; /* * Our start offset might be in the middle of an existing extent * map, so take that into account. */ range_len = em->len - (cur - em->start); /* * If this range of the extent map is already flagged for delalloc, * skip it, because: * * 1) We could deadlock later, when trying to reserve space for * delalloc, because in case we can't immediately reserve space * the flusher can start delalloc and wait for the respective * ordered extents to complete. The deadlock would happen * because we do the space reservation while holding the range * locked, and starting writeback, or finishing an ordered * extent, requires locking the range; * * 2) If there's delalloc there, it means there's dirty pages for * which writeback has not started yet (we clean the delalloc * flag when starting writeback and after creating an ordered * extent). If we mark pages in an adjacent range for defrag, * then we will have a larger contiguous range for delalloc, * very likely resulting in a larger extent after writeback is * triggered (except in a case of free space fragmentation). */ if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1, EXTENT_DELALLOC)) goto next; /* * For do_compress case, we want to compress all valid file * extents, thus no @extent_thresh or mergeable check. */ if (do_compress) goto add; /* Skip too large extent */ if (em->len >= extent_thresh) goto next; /* * Skip extents already at its max capacity, this is mostly for * compressed extents, which max cap is only 128K. */ if (em->len >= get_extent_max_capacity(fs_info, em)) goto next; /* * Normally there are no more extents after an inline one, thus * @next_mergeable will normally be false and not defragged. * So if an inline extent passed all above checks, just add it * for defrag, and be converted to regular extents. */ if (em->block_start == EXTENT_MAP_INLINE) goto add; next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em, extent_thresh, newer_than, locked); if (!next_mergeable) { struct defrag_target_range *last; /* Empty target list, no way to merge with last entry */ if (list_empty(target_list)) goto next; last = list_entry(target_list->prev, struct defrag_target_range, list); /* Not mergeable with last entry */ if (last->start + last->len != cur) goto next; /* Mergeable, fall through to add it to @target_list. */ } add: last_is_target = true; range_len = min(extent_map_end(em), start + len) - cur; /* * This one is a good target, check if it can be merged into * last range of the target list. */ if (!list_empty(target_list)) { struct defrag_target_range *last; last = list_entry(target_list->prev, struct defrag_target_range, list); ASSERT(last->start + last->len <= cur); if (last->start + last->len == cur) { /* Mergeable, enlarge the last entry */ last->len += range_len; goto next; } /* Fall through to allocate a new entry */ } /* Allocate new defrag_target_range */ new = kmalloc(sizeof(*new), GFP_NOFS); if (!new) { free_extent_map(em); ret = -ENOMEM; break; } new->start = cur; new->len = range_len; list_add_tail(&new->list, target_list); next: cur = extent_map_end(em); free_extent_map(em); } if (ret < 0) { struct defrag_target_range *entry; struct defrag_target_range *tmp; list_for_each_entry_safe(entry, tmp, target_list, list) { list_del_init(&entry->list); kfree(entry); } } if (!ret && last_scanned_ret) { /* * If the last extent is not a target, the caller can skip to * the end of that extent. * Otherwise, we can only go the end of the specified range. */ if (!last_is_target) *last_scanned_ret = max(cur, *last_scanned_ret); else *last_scanned_ret = max(start + len, *last_scanned_ret); } return ret; } #define CLUSTER_SIZE (SZ_256K) static_assert(PAGE_ALIGNED(CLUSTER_SIZE)); /* * Defrag one contiguous target range. * * @inode: target inode * @target: target range to defrag * @pages: locked pages covering the defrag range * @nr_pages: number of locked pages * * Caller should ensure: * * - Pages are prepared * Pages should be locked, no ordered extent in the pages range, * no writeback. * * - Extent bits are locked */ static int defrag_one_locked_target(struct btrfs_inode *inode, struct defrag_target_range *target, struct folio **folios, int nr_pages, struct extent_state **cached_state) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_changeset *data_reserved = NULL; const u64 start = target->start; const u64 len = target->len; unsigned long last_index = (start + len - 1) >> PAGE_SHIFT; unsigned long start_index = start >> PAGE_SHIFT; unsigned long first_index = folios[0]->index; int ret = 0; int i; ASSERT(last_index - first_index + 1 <= nr_pages); ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len); if (ret < 0) return ret; clear_extent_bit(&inode->io_tree, start, start + len - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, cached_state); set_extent_bit(&inode->io_tree, start, start + len - 1, EXTENT_DELALLOC | EXTENT_DEFRAG, cached_state); /* Update the page status */ for (i = start_index - first_index; i <= last_index - first_index; i++) { folio_clear_checked(folios[i]); btrfs_folio_clamp_set_dirty(fs_info, folios[i], start, len); } btrfs_delalloc_release_extents(inode, len); extent_changeset_free(data_reserved); return ret; } static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len, u32 extent_thresh, u64 newer_than, bool do_compress, u64 *last_scanned_ret) { struct extent_state *cached_state = NULL; struct defrag_target_range *entry; struct defrag_target_range *tmp; LIST_HEAD(target_list); struct folio **folios; const u32 sectorsize = inode->root->fs_info->sectorsize; u64 last_index = (start + len - 1) >> PAGE_SHIFT; u64 start_index = start >> PAGE_SHIFT; unsigned int nr_pages = last_index - start_index + 1; int ret = 0; int i; ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE); ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize)); folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS); if (!folios) return -ENOMEM; /* Prepare all pages */ for (i = 0; i < nr_pages; i++) { folios[i] = defrag_prepare_one_folio(inode, start_index + i); if (IS_ERR(folios[i])) { ret = PTR_ERR(folios[i]); nr_pages = i; goto free_folios; } } for (i = 0; i < nr_pages; i++) folio_wait_writeback(folios[i]); /* Lock the pages range */ lock_extent(&inode->io_tree, start_index << PAGE_SHIFT, (last_index << PAGE_SHIFT) + PAGE_SIZE - 1, &cached_state); /* * Now we have a consistent view about the extent map, re-check * which range really needs to be defragged. * * And this time we have extent locked already, pass @locked = true * so that we won't relock the extent range and cause deadlock. */ ret = defrag_collect_targets(inode, start, len, extent_thresh, newer_than, do_compress, true, &target_list, last_scanned_ret); if (ret < 0) goto unlock_extent; list_for_each_entry(entry, &target_list, list) { ret = defrag_one_locked_target(inode, entry, folios, nr_pages, &cached_state); if (ret < 0) break; } list_for_each_entry_safe(entry, tmp, &target_list, list) { list_del_init(&entry->list); kfree(entry); } unlock_extent: unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT, (last_index << PAGE_SHIFT) + PAGE_SIZE - 1, &cached_state); free_folios: for (i = 0; i < nr_pages; i++) { folio_unlock(folios[i]); folio_put(folios[i]); } kfree(folios); return ret; } static int defrag_one_cluster(struct btrfs_inode *inode, struct file_ra_state *ra, u64 start, u32 len, u32 extent_thresh, u64 newer_than, bool do_compress, unsigned long *sectors_defragged, unsigned long max_sectors, u64 *last_scanned_ret) { const u32 sectorsize = inode->root->fs_info->sectorsize; struct defrag_target_range *entry; struct defrag_target_range *tmp; LIST_HEAD(target_list); int ret; ret = defrag_collect_targets(inode, start, len, extent_thresh, newer_than, do_compress, false, &target_list, NULL); if (ret < 0) goto out; list_for_each_entry(entry, &target_list, list) { u32 range_len = entry->len; /* Reached or beyond the limit */ if (max_sectors && *sectors_defragged >= max_sectors) { ret = 1; break; } if (max_sectors) range_len = min_t(u32, range_len, (max_sectors - *sectors_defragged) * sectorsize); /* * If defrag_one_range() has updated last_scanned_ret, * our range may already be invalid (e.g. hole punched). * Skip if our range is before last_scanned_ret, as there is * no need to defrag the range anymore. */ if (entry->start + range_len <= *last_scanned_ret) continue; if (ra) page_cache_sync_readahead(inode->vfs_inode.i_mapping, ra, NULL, entry->start >> PAGE_SHIFT, ((entry->start + range_len - 1) >> PAGE_SHIFT) - (entry->start >> PAGE_SHIFT) + 1); /* * Here we may not defrag any range if holes are punched before * we locked the pages. * But that's fine, it only affects the @sectors_defragged * accounting. */ ret = defrag_one_range(inode, entry->start, range_len, extent_thresh, newer_than, do_compress, last_scanned_ret); if (ret < 0) break; *sectors_defragged += range_len >> inode->root->fs_info->sectorsize_bits; } out: list_for_each_entry_safe(entry, tmp, &target_list, list) { list_del_init(&entry->list); kfree(entry); } if (ret >= 0) *last_scanned_ret = max(*last_scanned_ret, start + len); return ret; } /* * Entry point to file defragmentation. * * @inode: inode to be defragged * @ra: readahead state (can be NUL) * @range: defrag options including range and flags * @newer_than: minimum transid to defrag * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode * will be defragged. * * Return <0 for error. * Return >=0 for the number of sectors defragged, and range->start will be updated * to indicate the file offset where next defrag should be started at. * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without * defragging all the range). */ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra, struct btrfs_ioctl_defrag_range_args *range, u64 newer_than, unsigned long max_to_defrag) { struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); unsigned long sectors_defragged = 0; u64 isize = i_size_read(inode); u64 cur; u64 last_byte; bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS); bool ra_allocated = false; int compress_type = BTRFS_COMPRESS_ZLIB; int ret = 0; u32 extent_thresh = range->extent_thresh; pgoff_t start_index; if (isize == 0) return 0; if (range->start >= isize) return -EINVAL; if (do_compress) { if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES) return -EINVAL; if (range->compress_type) compress_type = range->compress_type; } if (extent_thresh == 0) extent_thresh = SZ_256K; if (range->start + range->len > range->start) { /* Got a specific range */ last_byte = min(isize, range->start + range->len); } else { /* Defrag until file end */ last_byte = isize; } /* Align the range */ cur = round_down(range->start, fs_info->sectorsize); last_byte = round_up(last_byte, fs_info->sectorsize) - 1; /* * If we were not given a ra, allocate a readahead context. As * readahead is just an optimization, defrag will work without it so * we don't error out. */ if (!ra) { ra_allocated = true; ra = kzalloc(sizeof(*ra), GFP_KERNEL); if (ra) file_ra_state_init(ra, inode->i_mapping); } /* * Make writeback start from the beginning of the range, so that the * defrag range can be written sequentially. */ start_index = cur >> PAGE_SHIFT; if (start_index < inode->i_mapping->writeback_index) inode->i_mapping->writeback_index = start_index; while (cur < last_byte) { const unsigned long prev_sectors_defragged = sectors_defragged; u64 last_scanned = cur; u64 cluster_end; if (btrfs_defrag_cancelled(fs_info)) { ret = -EAGAIN; break; } /* We want the cluster end at page boundary when possible */ cluster_end = (((cur >> PAGE_SHIFT) + (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1; cluster_end = min(cluster_end, last_byte); btrfs_inode_lock(BTRFS_I(inode), 0); if (IS_SWAPFILE(inode)) { ret = -ETXTBSY; btrfs_inode_unlock(BTRFS_I(inode), 0); break; } if (!(inode->i_sb->s_flags & SB_ACTIVE)) { btrfs_inode_unlock(BTRFS_I(inode), 0); break; } if (do_compress) BTRFS_I(inode)->defrag_compress = compress_type; ret = defrag_one_cluster(BTRFS_I(inode), ra, cur, cluster_end + 1 - cur, extent_thresh, newer_than, do_compress, §ors_defragged, max_to_defrag, &last_scanned); if (sectors_defragged > prev_sectors_defragged) balance_dirty_pages_ratelimited(inode->i_mapping); btrfs_inode_unlock(BTRFS_I(inode), 0); if (ret < 0) break; cur = max(cluster_end + 1, last_scanned); if (ret > 0) { ret = 0; break; } cond_resched(); } if (ra_allocated) kfree(ra); /* * Update range.start for autodefrag, this will indicate where to start * in next run. */ range->start = cur; if (sectors_defragged) { /* * We have defragged some sectors, for compression case they * need to be written back immediately. */ if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) { filemap_flush(inode->i_mapping); if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &BTRFS_I(inode)->runtime_flags)) filemap_flush(inode->i_mapping); } if (range->compress_type == BTRFS_COMPRESS_LZO) btrfs_set_fs_incompat(fs_info, COMPRESS_LZO); else if (range->compress_type == BTRFS_COMPRESS_ZSTD) btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); ret = sectors_defragged; } if (do_compress) { btrfs_inode_lock(BTRFS_I(inode), 0); BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE; btrfs_inode_unlock(BTRFS_I(inode), 0); } return ret; } void __cold btrfs_auto_defrag_exit(void) { kmem_cache_destroy(btrfs_inode_defrag_cachep); } int __init btrfs_auto_defrag_init(void) { btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag", sizeof(struct inode_defrag), 0, 0, NULL); if (!btrfs_inode_defrag_cachep) return -ENOMEM; return 0; } |