Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_shared.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_extent_busy.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" #include "xfs_log.h" #include "xfs_log_priv.h" #include "xfs_trace.h" #include "xfs_discard.h" /* * Allocate a new ticket. Failing to get a new ticket makes it really hard to * recover, so we don't allow failure here. Also, we allocate in a context that * we don't want to be issuing transactions from, so we need to tell the * allocation code this as well. * * We don't reserve any space for the ticket - we are going to steal whatever * space we require from transactions as they commit. To ensure we reserve all * the space required, we need to set the current reservation of the ticket to * zero so that we know to steal the initial transaction overhead from the * first transaction commit. */ static struct xlog_ticket * xlog_cil_ticket_alloc( struct xlog *log) { struct xlog_ticket *tic; tic = xlog_ticket_alloc(log, 0, 1, 0); /* * set the current reservation to zero so we know to steal the basic * transaction overhead reservation from the first transaction commit. */ tic->t_curr_res = 0; tic->t_iclog_hdrs = 0; return tic; } static inline void xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil) { struct xlog *log = cil->xc_log; atomic_set(&cil->xc_iclog_hdrs, (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) / (log->l_iclog_size - log->l_iclog_hsize))); } /* * Check if the current log item was first committed in this sequence. * We can't rely on just the log item being in the CIL, we have to check * the recorded commit sequence number. * * Note: for this to be used in a non-racy manner, it has to be called with * CIL flushing locked out. As a result, it should only be used during the * transaction commit process when deciding what to format into the item. */ static bool xlog_item_in_current_chkpt( struct xfs_cil *cil, struct xfs_log_item *lip) { if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) return false; /* * li_seq is written on the first commit of a log item to record the * first checkpoint it is written to. Hence if it is different to the * current sequence, we're in a new checkpoint. */ return lip->li_seq == READ_ONCE(cil->xc_current_sequence); } bool xfs_log_item_in_current_chkpt( struct xfs_log_item *lip) { return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip); } /* * Unavoidable forward declaration - xlog_cil_push_work() calls * xlog_cil_ctx_alloc() itself. */ static void xlog_cil_push_work(struct work_struct *work); static struct xfs_cil_ctx * xlog_cil_ctx_alloc(void) { struct xfs_cil_ctx *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL); INIT_LIST_HEAD(&ctx->committing); INIT_LIST_HEAD(&ctx->busy_extents.extent_list); INIT_LIST_HEAD(&ctx->log_items); INIT_LIST_HEAD(&ctx->lv_chain); INIT_WORK(&ctx->push_work, xlog_cil_push_work); return ctx; } /* * Aggregate the CIL per cpu structures into global counts, lists, etc and * clear the percpu state ready for the next context to use. This is called * from the push code with the context lock held exclusively, hence nothing else * will be accessing or modifying the per-cpu counters. */ static void xlog_cil_push_pcp_aggregate( struct xfs_cil *cil, struct xfs_cil_ctx *ctx) { struct xlog_cil_pcp *cilpcp; int cpu; for_each_cpu(cpu, &ctx->cil_pcpmask) { cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); ctx->ticket->t_curr_res += cilpcp->space_reserved; cilpcp->space_reserved = 0; if (!list_empty(&cilpcp->busy_extents)) { list_splice_init(&cilpcp->busy_extents, &ctx->busy_extents.extent_list); } if (!list_empty(&cilpcp->log_items)) list_splice_init(&cilpcp->log_items, &ctx->log_items); /* * We're in the middle of switching cil contexts. Reset the * counter we use to detect when the current context is nearing * full. */ cilpcp->space_used = 0; } } /* * Aggregate the CIL per-cpu space used counters into the global atomic value. * This is called when the per-cpu counter aggregation will first pass the soft * limit threshold so we can switch to atomic counter aggregation for accurate * detection of hard limit traversal. */ static void xlog_cil_insert_pcp_aggregate( struct xfs_cil *cil, struct xfs_cil_ctx *ctx) { struct xlog_cil_pcp *cilpcp; int cpu; int count = 0; /* Trigger atomic updates then aggregate only for the first caller */ if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) return; /* * We can race with other cpus setting cil_pcpmask. However, we've * atomically cleared PCP_SPACE which forces other threads to add to * the global space used count. cil_pcpmask is a superset of cilpcp * structures that could have a nonzero space_used. */ for_each_cpu(cpu, &ctx->cil_pcpmask) { int old, prev; cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); do { old = cilpcp->space_used; prev = cmpxchg(&cilpcp->space_used, old, 0); } while (old != prev); count += old; } atomic_add(count, &ctx->space_used); } static void xlog_cil_ctx_switch( struct xfs_cil *cil, struct xfs_cil_ctx *ctx) { xlog_cil_set_iclog_hdr_count(cil); set_bit(XLOG_CIL_EMPTY, &cil->xc_flags); set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags); ctx->sequence = ++cil->xc_current_sequence; ctx->cil = cil; cil->xc_ctx = ctx; } /* * After the first stage of log recovery is done, we know where the head and * tail of the log are. We need this log initialisation done before we can * initialise the first CIL checkpoint context. * * Here we allocate a log ticket to track space usage during a CIL push. This * ticket is passed to xlog_write() directly so that we don't slowly leak log * space by failing to account for space used by log headers and additional * region headers for split regions. */ void xlog_cil_init_post_recovery( struct xlog *log) { log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); log->l_cilp->xc_ctx->sequence = 1; xlog_cil_set_iclog_hdr_count(log->l_cilp); } static inline int xlog_cil_iovec_space( uint niovecs) { return round_up((sizeof(struct xfs_log_vec) + niovecs * sizeof(struct xfs_log_iovec)), sizeof(uint64_t)); } /* * Allocate or pin log vector buffers for CIL insertion. * * The CIL currently uses disposable buffers for copying a snapshot of the * modified items into the log during a push. The biggest problem with this is * the requirement to allocate the disposable buffer during the commit if: * a) does not exist; or * b) it is too small * * If we do this allocation within xlog_cil_insert_format_items(), it is done * under the xc_ctx_lock, which means that a CIL push cannot occur during * the memory allocation. This means that we have a potential deadlock situation * under low memory conditions when we have lots of dirty metadata pinned in * the CIL and we need a CIL commit to occur to free memory. * * To avoid this, we need to move the memory allocation outside the * xc_ctx_lock, but because the log vector buffers are disposable, that opens * up a TOCTOU race condition w.r.t. the CIL committing and removing the log * vector buffers between the check and the formatting of the item into the * log vector buffer within the xc_ctx_lock. * * Because the log vector buffer needs to be unchanged during the CIL push * process, we cannot share the buffer between the transaction commit (which * modifies the buffer) and the CIL push context that is writing the changes * into the log. This means skipping preallocation of buffer space is * unreliable, but we most definitely do not want to be allocating and freeing * buffers unnecessarily during commits when overwrites can be done safely. * * The simplest solution to this problem is to allocate a shadow buffer when a * log item is committed for the second time, and then to only use this buffer * if necessary. The buffer can remain attached to the log item until such time * it is needed, and this is the buffer that is reallocated to match the size of * the incoming modification. Then during the formatting of the item we can swap * the active buffer with the new one if we can't reuse the existing buffer. We * don't free the old buffer as it may be reused on the next modification if * it's size is right, otherwise we'll free and reallocate it at that point. * * This function builds a vector for the changes in each log item in the * transaction. It then works out the length of the buffer needed for each log * item, allocates them and attaches the vector to the log item in preparation * for the formatting step which occurs under the xc_ctx_lock. * * While this means the memory footprint goes up, it avoids the repeated * alloc/free pattern that repeated modifications of an item would otherwise * cause, and hence minimises the CPU overhead of such behaviour. */ static void xlog_cil_alloc_shadow_bufs( struct xlog *log, struct xfs_trans *tp) { struct xfs_log_item *lip; list_for_each_entry(lip, &tp->t_items, li_trans) { struct xfs_log_vec *lv; int niovecs = 0; int nbytes = 0; int buf_size; bool ordered = false; /* Skip items which aren't dirty in this transaction. */ if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) continue; /* get number of vecs and size of data to be stored */ lip->li_ops->iop_size(lip, &niovecs, &nbytes); /* * Ordered items need to be tracked but we do not wish to write * them. We need a logvec to track the object, but we do not * need an iovec or buffer to be allocated for copying data. */ if (niovecs == XFS_LOG_VEC_ORDERED) { ordered = true; niovecs = 0; nbytes = 0; } /* * We 64-bit align the length of each iovec so that the start of * the next one is naturally aligned. We'll need to account for * that slack space here. * * We also add the xlog_op_header to each region when * formatting, but that's not accounted to the size of the item * at this point. Hence we'll need an addition number of bytes * for each vector to hold an opheader. * * Then round nbytes up to 64-bit alignment so that the initial * buffer alignment is easy to calculate and verify. */ nbytes += niovecs * (sizeof(uint64_t) + sizeof(struct xlog_op_header)); nbytes = round_up(nbytes, sizeof(uint64_t)); /* * The data buffer needs to start 64-bit aligned, so round up * that space to ensure we can align it appropriately and not * overrun the buffer. */ buf_size = nbytes + xlog_cil_iovec_space(niovecs); /* * if we have no shadow buffer, or it is too small, we need to * reallocate it. */ if (!lip->li_lv_shadow || buf_size > lip->li_lv_shadow->lv_size) { /* * We free and allocate here as a realloc would copy * unnecessary data. We don't use kvzalloc() for the * same reason - we don't need to zero the data area in * the buffer, only the log vector header and the iovec * storage. */ kvfree(lip->li_lv_shadow); lv = xlog_kvmalloc(buf_size); memset(lv, 0, xlog_cil_iovec_space(niovecs)); INIT_LIST_HEAD(&lv->lv_list); lv->lv_item = lip; lv->lv_size = buf_size; if (ordered) lv->lv_buf_len = XFS_LOG_VEC_ORDERED; else lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; lip->li_lv_shadow = lv; } else { /* same or smaller, optimise common overwrite case */ lv = lip->li_lv_shadow; if (ordered) lv->lv_buf_len = XFS_LOG_VEC_ORDERED; else lv->lv_buf_len = 0; lv->lv_bytes = 0; } /* Ensure the lv is set up according to ->iop_size */ lv->lv_niovecs = niovecs; /* The allocated data region lies beyond the iovec region */ lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); } } /* * Prepare the log item for insertion into the CIL. Calculate the difference in * log space it will consume, and if it is a new item pin it as well. */ STATIC void xfs_cil_prepare_item( struct xlog *log, struct xfs_log_vec *lv, struct xfs_log_vec *old_lv, int *diff_len) { /* Account for the new LV being passed in */ if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) *diff_len += lv->lv_bytes; /* * If there is no old LV, this is the first time we've seen the item in * this CIL context and so we need to pin it. If we are replacing the * old_lv, then remove the space it accounts for and make it the shadow * buffer for later freeing. In both cases we are now switching to the * shadow buffer, so update the pointer to it appropriately. */ if (!old_lv) { if (lv->lv_item->li_ops->iop_pin) lv->lv_item->li_ops->iop_pin(lv->lv_item); lv->lv_item->li_lv_shadow = NULL; } else if (old_lv != lv) { ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); *diff_len -= old_lv->lv_bytes; lv->lv_item->li_lv_shadow = old_lv; } /* attach new log vector to log item */ lv->lv_item->li_lv = lv; /* * If this is the first time the item is being committed to the * CIL, store the sequence number on the log item so we can * tell in future commits whether this is the first checkpoint * the item is being committed into. */ if (!lv->lv_item->li_seq) lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; } /* * Format log item into a flat buffers * * For delayed logging, we need to hold a formatted buffer containing all the * changes on the log item. This enables us to relog the item in memory and * write it out asynchronously without needing to relock the object that was * modified at the time it gets written into the iclog. * * This function takes the prepared log vectors attached to each log item, and * formats the changes into the log vector buffer. The buffer it uses is * dependent on the current state of the vector in the CIL - the shadow lv is * guaranteed to be large enough for the current modification, but we will only * use that if we can't reuse the existing lv. If we can't reuse the existing * lv, then simple swap it out for the shadow lv. We don't free it - that is * done lazily either by th enext modification or the freeing of the log item. * * We don't set up region headers during this process; we simply copy the * regions into the flat buffer. We can do this because we still have to do a * formatting step to write the regions into the iclog buffer. Writing the * ophdrs during the iclog write means that we can support splitting large * regions across iclog boundares without needing a change in the format of the * item/region encapsulation. * * Hence what we need to do now is change the rewrite the vector array to point * to the copied region inside the buffer we just allocated. This allows us to * format the regions into the iclog as though they are being formatted * directly out of the objects themselves. */ static void xlog_cil_insert_format_items( struct xlog *log, struct xfs_trans *tp, int *diff_len) { struct xfs_log_item *lip; /* Bail out if we didn't find a log item. */ if (list_empty(&tp->t_items)) { ASSERT(0); return; } list_for_each_entry(lip, &tp->t_items, li_trans) { struct xfs_log_vec *lv; struct xfs_log_vec *old_lv = NULL; struct xfs_log_vec *shadow; bool ordered = false; /* Skip items which aren't dirty in this transaction. */ if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) continue; /* * The formatting size information is already attached to * the shadow lv on the log item. */ shadow = lip->li_lv_shadow; if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) ordered = true; /* Skip items that do not have any vectors for writing */ if (!shadow->lv_niovecs && !ordered) continue; /* compare to existing item size */ old_lv = lip->li_lv; if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { /* same or smaller, optimise common overwrite case */ lv = lip->li_lv; if (ordered) goto insert; /* * set the item up as though it is a new insertion so * that the space reservation accounting is correct. */ *diff_len -= lv->lv_bytes; /* Ensure the lv is set up according to ->iop_size */ lv->lv_niovecs = shadow->lv_niovecs; /* reset the lv buffer information for new formatting */ lv->lv_buf_len = 0; lv->lv_bytes = 0; lv->lv_buf = (char *)lv + xlog_cil_iovec_space(lv->lv_niovecs); } else { /* switch to shadow buffer! */ lv = shadow; lv->lv_item = lip; if (ordered) { /* track as an ordered logvec */ ASSERT(lip->li_lv == NULL); goto insert; } } ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); lip->li_ops->iop_format(lip, lv); insert: xfs_cil_prepare_item(log, lv, old_lv, diff_len); } } /* * The use of lockless waitqueue_active() requires that the caller has * serialised itself against the wakeup call in xlog_cil_push_work(). That * can be done by either holding the push lock or the context lock. */ static inline bool xlog_cil_over_hard_limit( struct xlog *log, int32_t space_used) { if (waitqueue_active(&log->l_cilp->xc_push_wait)) return true; if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) return true; return false; } /* * Insert the log items into the CIL and calculate the difference in space * consumed by the item. Add the space to the checkpoint ticket and calculate * if the change requires additional log metadata. If it does, take that space * as well. Remove the amount of space we added to the checkpoint ticket from * the current transaction ticket so that the accounting works out correctly. */ static void xlog_cil_insert_items( struct xlog *log, struct xfs_trans *tp, uint32_t released_space) { struct xfs_cil *cil = log->l_cilp; struct xfs_cil_ctx *ctx = cil->xc_ctx; struct xfs_log_item *lip; int len = 0; int iovhdr_res = 0, split_res = 0, ctx_res = 0; int space_used; int order; unsigned int cpu_nr; struct xlog_cil_pcp *cilpcp; ASSERT(tp); /* * We can do this safely because the context can't checkpoint until we * are done so it doesn't matter exactly how we update the CIL. */ xlog_cil_insert_format_items(log, tp, &len); /* * Subtract the space released by intent cancelation from the space we * consumed so that we remove it from the CIL space and add it back to * the current transaction reservation context. */ len -= released_space; /* * Grab the per-cpu pointer for the CIL before we start any accounting. * That ensures that we are running with pre-emption disabled and so we * can't be scheduled away between split sample/update operations that * are done without outside locking to serialise them. */ cpu_nr = get_cpu(); cilpcp = this_cpu_ptr(cil->xc_pcp); /* Tell the future push that there was work added by this CPU. */ if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask)) cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask); /* * We need to take the CIL checkpoint unit reservation on the first * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't * unnecessarily do an atomic op in the fast path here. We can clear the * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit. */ if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) && test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) ctx_res = ctx->ticket->t_unit_res; /* * Check if we need to steal iclog headers. atomic_read() is not a * locked atomic operation, so we can check the value before we do any * real atomic ops in the fast path. If we've already taken the CIL unit * reservation from this commit, we've already got one iclog header * space reserved so we have to account for that otherwise we risk * overrunning the reservation on this ticket. * * If the CIL is already at the hard limit, we might need more header * space that originally reserved. So steal more header space from every * commit that occurs once we are over the hard limit to ensure the CIL * push won't run out of reservation space. * * This can steal more than we need, but that's OK. * * The cil->xc_ctx_lock provides the serialisation necessary for safely * calling xlog_cil_over_hard_limit() in this context. */ space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len; if (atomic_read(&cil->xc_iclog_hdrs) > 0 || xlog_cil_over_hard_limit(log, space_used)) { split_res = log->l_iclog_hsize + sizeof(struct xlog_op_header); if (ctx_res) ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1); else ctx_res = split_res * tp->t_ticket->t_iclog_hdrs; atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs); } cilpcp->space_reserved += ctx_res; /* * Accurately account when over the soft limit, otherwise fold the * percpu count into the global count if over the per-cpu threshold. */ if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) { atomic_add(len, &ctx->space_used); } else if (cilpcp->space_used + len > (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) { space_used = atomic_add_return(cilpcp->space_used + len, &ctx->space_used); cilpcp->space_used = 0; /* * If we just transitioned over the soft limit, we need to * transition to the global atomic counter. */ if (space_used >= XLOG_CIL_SPACE_LIMIT(log)) xlog_cil_insert_pcp_aggregate(cil, ctx); } else { cilpcp->space_used += len; } /* attach the transaction to the CIL if it has any busy extents */ if (!list_empty(&tp->t_busy)) list_splice_init(&tp->t_busy, &cilpcp->busy_extents); /* * Now update the order of everything modified in the transaction * and insert items into the CIL if they aren't already there. * We do this here so we only need to take the CIL lock once during * the transaction commit. */ order = atomic_inc_return(&ctx->order_id); list_for_each_entry(lip, &tp->t_items, li_trans) { /* Skip items which aren't dirty in this transaction. */ if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) continue; lip->li_order_id = order; if (!list_empty(&lip->li_cil)) continue; list_add_tail(&lip->li_cil, &cilpcp->log_items); } put_cpu(); /* * If we've overrun the reservation, dump the tx details before we move * the log items. Shutdown is imminent... */ tp->t_ticket->t_curr_res -= ctx_res + len; if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { xfs_warn(log->l_mp, "Transaction log reservation overrun:"); xfs_warn(log->l_mp, " log items: %d bytes (iov hdrs: %d bytes)", len, iovhdr_res); xfs_warn(log->l_mp, " split region headers: %d bytes", split_res); xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res); xlog_print_trans(tp); xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); } } static void xlog_cil_free_logvec( struct list_head *lv_chain) { struct xfs_log_vec *lv; while (!list_empty(lv_chain)) { lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list); list_del_init(&lv->lv_list); kvfree(lv); } } /* * Mark all items committed and clear busy extents. We free the log vector * chains in a separate pass so that we unpin the log items as quickly as * possible. */ static void xlog_cil_committed( struct xfs_cil_ctx *ctx) { struct xfs_mount *mp = ctx->cil->xc_log->l_mp; bool abort = xlog_is_shutdown(ctx->cil->xc_log); /* * If the I/O failed, we're aborting the commit and already shutdown. * Wake any commit waiters before aborting the log items so we don't * block async log pushers on callbacks. Async log pushers explicitly do * not wait on log force completion because they may be holding locks * required to unpin items. */ if (abort) { spin_lock(&ctx->cil->xc_push_lock); wake_up_all(&ctx->cil->xc_start_wait); wake_up_all(&ctx->cil->xc_commit_wait); spin_unlock(&ctx->cil->xc_push_lock); } xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain, ctx->start_lsn, abort); xfs_extent_busy_sort(&ctx->busy_extents.extent_list); xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list, xfs_has_discard(mp) && !abort); spin_lock(&ctx->cil->xc_push_lock); list_del(&ctx->committing); spin_unlock(&ctx->cil->xc_push_lock); xlog_cil_free_logvec(&ctx->lv_chain); if (!list_empty(&ctx->busy_extents.extent_list)) { ctx->busy_extents.mount = mp; ctx->busy_extents.owner = ctx; xfs_discard_extents(mp, &ctx->busy_extents); return; } kfree(ctx); } void xlog_cil_process_committed( struct list_head *list) { struct xfs_cil_ctx *ctx; while ((ctx = list_first_entry_or_null(list, struct xfs_cil_ctx, iclog_entry))) { list_del(&ctx->iclog_entry); xlog_cil_committed(ctx); } } /* * Record the LSN of the iclog we were just granted space to start writing into. * If the context doesn't have a start_lsn recorded, then this iclog will * contain the start record for the checkpoint. Otherwise this write contains * the commit record for the checkpoint. */ void xlog_cil_set_ctx_write_state( struct xfs_cil_ctx *ctx, struct xlog_in_core *iclog) { struct xfs_cil *cil = ctx->cil; xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); ASSERT(!ctx->commit_lsn); if (!ctx->start_lsn) { spin_lock(&cil->xc_push_lock); /* * The LSN we need to pass to the log items on transaction * commit is the LSN reported by the first log vector write, not * the commit lsn. If we use the commit record lsn then we can * move the grant write head beyond the tail LSN and overwrite * it. */ ctx->start_lsn = lsn; wake_up_all(&cil->xc_start_wait); spin_unlock(&cil->xc_push_lock); /* * Make sure the metadata we are about to overwrite in the log * has been flushed to stable storage before this iclog is * issued. */ spin_lock(&cil->xc_log->l_icloglock); iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; spin_unlock(&cil->xc_log->l_icloglock); return; } /* * Take a reference to the iclog for the context so that we still hold * it when xlog_write is done and has released it. This means the * context controls when the iclog is released for IO. */ atomic_inc(&iclog->ic_refcnt); /* * xlog_state_get_iclog_space() guarantees there is enough space in the * iclog for an entire commit record, so we can attach the context * callbacks now. This needs to be done before we make the commit_lsn * visible to waiters so that checkpoints with commit records in the * same iclog order their IO completion callbacks in the same order that * the commit records appear in the iclog. */ spin_lock(&cil->xc_log->l_icloglock); list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks); spin_unlock(&cil->xc_log->l_icloglock); /* * Now we can record the commit LSN and wake anyone waiting for this * sequence to have the ordered commit record assigned to a physical * location in the log. */ spin_lock(&cil->xc_push_lock); ctx->commit_iclog = iclog; ctx->commit_lsn = lsn; wake_up_all(&cil->xc_commit_wait); spin_unlock(&cil->xc_push_lock); } /* * Ensure that the order of log writes follows checkpoint sequence order. This * relies on the context LSN being zero until the log write has guaranteed the * LSN that the log write will start at via xlog_state_get_iclog_space(). */ enum _record_type { _START_RECORD, _COMMIT_RECORD, }; static int xlog_cil_order_write( struct xfs_cil *cil, xfs_csn_t sequence, enum _record_type record) { struct xfs_cil_ctx *ctx; restart: spin_lock(&cil->xc_push_lock); list_for_each_entry(ctx, &cil->xc_committing, committing) { /* * Avoid getting stuck in this loop because we were woken by the * shutdown, but then went back to sleep once already in the * shutdown state. */ if (xlog_is_shutdown(cil->xc_log)) { spin_unlock(&cil->xc_push_lock); return -EIO; } /* * Higher sequences will wait for this one so skip them. * Don't wait for our own sequence, either. */ if (ctx->sequence >= sequence) continue; /* Wait until the LSN for the record has been recorded. */ switch (record) { case _START_RECORD: if (!ctx->start_lsn) { xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock); goto restart; } break; case _COMMIT_RECORD: if (!ctx->commit_lsn) { xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); goto restart; } break; } } spin_unlock(&cil->xc_push_lock); return 0; } /* * Write out the log vector change now attached to the CIL context. This will * write a start record that needs to be strictly ordered in ascending CIL * sequence order so that log recovery will always use in-order start LSNs when * replaying checkpoints. */ static int xlog_cil_write_chain( struct xfs_cil_ctx *ctx, uint32_t chain_len) { struct xlog *log = ctx->cil->xc_log; int error; error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD); if (error) return error; return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len); } /* * Write out the commit record of a checkpoint transaction to close off a * running log write. These commit records are strictly ordered in ascending CIL * sequence order so that log recovery will always replay the checkpoints in the * correct order. */ static int xlog_cil_write_commit_record( struct xfs_cil_ctx *ctx) { struct xlog *log = ctx->cil->xc_log; struct xlog_op_header ophdr = { .oh_clientid = XFS_TRANSACTION, .oh_tid = cpu_to_be32(ctx->ticket->t_tid), .oh_flags = XLOG_COMMIT_TRANS, }; struct xfs_log_iovec reg = { .i_addr = &ophdr, .i_len = sizeof(struct xlog_op_header), .i_type = XLOG_REG_TYPE_COMMIT, }; struct xfs_log_vec vec = { .lv_niovecs = 1, .lv_iovecp = ®, }; int error; LIST_HEAD(lv_chain); list_add(&vec.lv_list, &lv_chain); if (xlog_is_shutdown(log)) return -EIO; error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD); if (error) return error; /* account for space used by record data */ ctx->ticket->t_curr_res -= reg.i_len; error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len); if (error) xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); return error; } struct xlog_cil_trans_hdr { struct xlog_op_header oph[2]; struct xfs_trans_header thdr; struct xfs_log_iovec lhdr[2]; }; /* * Build a checkpoint transaction header to begin the journal transaction. We * need to account for the space used by the transaction header here as it is * not accounted for in xlog_write(). * * This is the only place we write a transaction header, so we also build the * log opheaders that indicate the start of a log transaction and wrap the * transaction header. We keep the start record in it's own log vector rather * than compacting them into a single region as this ends up making the logic * in xlog_write() for handling empty opheaders for start, commit and unmount * records much simpler. */ static void xlog_cil_build_trans_hdr( struct xfs_cil_ctx *ctx, struct xlog_cil_trans_hdr *hdr, struct xfs_log_vec *lvhdr, int num_iovecs) { struct xlog_ticket *tic = ctx->ticket; __be32 tid = cpu_to_be32(tic->t_tid); memset(hdr, 0, sizeof(*hdr)); /* Log start record */ hdr->oph[0].oh_tid = tid; hdr->oph[0].oh_clientid = XFS_TRANSACTION; hdr->oph[0].oh_flags = XLOG_START_TRANS; /* log iovec region pointer */ hdr->lhdr[0].i_addr = &hdr->oph[0]; hdr->lhdr[0].i_len = sizeof(struct xlog_op_header); hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER; /* log opheader */ hdr->oph[1].oh_tid = tid; hdr->oph[1].oh_clientid = XFS_TRANSACTION; hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header)); /* transaction header in host byte order format */ hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC; hdr->thdr.th_type = XFS_TRANS_CHECKPOINT; hdr->thdr.th_tid = tic->t_tid; hdr->thdr.th_num_items = num_iovecs; /* log iovec region pointer */ hdr->lhdr[1].i_addr = &hdr->oph[1]; hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) + sizeof(struct xfs_trans_header); hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR; lvhdr->lv_niovecs = 2; lvhdr->lv_iovecp = &hdr->lhdr[0]; lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len; tic->t_curr_res -= lvhdr->lv_bytes; } /* * CIL item reordering compare function. We want to order in ascending ID order, * but we want to leave items with the same ID in the order they were added to * the list. This is important for operations like reflink where we log 4 order * dependent intents in a single transaction when we overwrite an existing * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop), * CUI (inc), BUI(remap)... */ static int xlog_cil_order_cmp( void *priv, const struct list_head *a, const struct list_head *b) { struct xfs_log_vec *l1 = container_of(a, struct xfs_log_vec, lv_list); struct xfs_log_vec *l2 = container_of(b, struct xfs_log_vec, lv_list); return l1->lv_order_id > l2->lv_order_id; } /* * Pull all the log vectors off the items in the CIL, and remove the items from * the CIL. We don't need the CIL lock here because it's only needed on the * transaction commit side which is currently locked out by the flush lock. * * If a log item is marked with a whiteout, we do not need to write it to the * journal and so we just move them to the whiteout list for the caller to * dispose of appropriately. */ static void xlog_cil_build_lv_chain( struct xfs_cil_ctx *ctx, struct list_head *whiteouts, uint32_t *num_iovecs, uint32_t *num_bytes) { while (!list_empty(&ctx->log_items)) { struct xfs_log_item *item; struct xfs_log_vec *lv; item = list_first_entry(&ctx->log_items, struct xfs_log_item, li_cil); if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) { list_move(&item->li_cil, whiteouts); trace_xfs_cil_whiteout_skip(item); continue; } lv = item->li_lv; lv->lv_order_id = item->li_order_id; /* we don't write ordered log vectors */ if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) *num_bytes += lv->lv_bytes; *num_iovecs += lv->lv_niovecs; list_add_tail(&lv->lv_list, &ctx->lv_chain); list_del_init(&item->li_cil); item->li_order_id = 0; item->li_lv = NULL; } } static void xlog_cil_cleanup_whiteouts( struct list_head *whiteouts) { while (!list_empty(whiteouts)) { struct xfs_log_item *item = list_first_entry(whiteouts, struct xfs_log_item, li_cil); list_del_init(&item->li_cil); trace_xfs_cil_whiteout_unpin(item); item->li_ops->iop_unpin(item, 1); } } /* * Push the Committed Item List to the log. * * If the current sequence is the same as xc_push_seq we need to do a flush. If * xc_push_seq is less than the current sequence, then it has already been * flushed and we don't need to do anything - the caller will wait for it to * complete if necessary. * * xc_push_seq is checked unlocked against the sequence number for a match. * Hence we can allow log forces to run racily and not issue pushes for the * same sequence twice. If we get a race between multiple pushes for the same * sequence they will block on the first one and then abort, hence avoiding * needless pushes. * * This runs from a workqueue so it does not inherent any specific memory * allocation context. However, we do not want to block on memory reclaim * recursing back into the filesystem because this push may have been triggered * by memory reclaim itself. Hence we really need to run under full GFP_NOFS * contraints here. */ static void xlog_cil_push_work( struct work_struct *work) { unsigned int nofs_flags = memalloc_nofs_save(); struct xfs_cil_ctx *ctx = container_of(work, struct xfs_cil_ctx, push_work); struct xfs_cil *cil = ctx->cil; struct xlog *log = cil->xc_log; struct xfs_cil_ctx *new_ctx; int num_iovecs = 0; int num_bytes = 0; int error = 0; struct xlog_cil_trans_hdr thdr; struct xfs_log_vec lvhdr = {}; xfs_csn_t push_seq; bool push_commit_stable; LIST_HEAD (whiteouts); struct xlog_ticket *ticket; new_ctx = xlog_cil_ctx_alloc(); new_ctx->ticket = xlog_cil_ticket_alloc(log); down_write(&cil->xc_ctx_lock); spin_lock(&cil->xc_push_lock); push_seq = cil->xc_push_seq; ASSERT(push_seq <= ctx->sequence); push_commit_stable = cil->xc_push_commit_stable; cil->xc_push_commit_stable = false; /* * As we are about to switch to a new, empty CIL context, we no longer * need to throttle tasks on CIL space overruns. Wake any waiters that * the hard push throttle may have caught so they can start committing * to the new context. The ctx->xc_push_lock provides the serialisation * necessary for safely using the lockless waitqueue_active() check in * this context. */ if (waitqueue_active(&cil->xc_push_wait)) wake_up_all(&cil->xc_push_wait); xlog_cil_push_pcp_aggregate(cil, ctx); /* * Check if we've anything to push. If there is nothing, then we don't * move on to a new sequence number and so we have to be able to push * this sequence again later. */ if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { cil->xc_push_seq = 0; spin_unlock(&cil->xc_push_lock); goto out_skip; } /* check for a previously pushed sequence */ if (push_seq < ctx->sequence) { spin_unlock(&cil->xc_push_lock); goto out_skip; } /* * We are now going to push this context, so add it to the committing * list before we do anything else. This ensures that anyone waiting on * this push can easily detect the difference between a "push in * progress" and "CIL is empty, nothing to do". * * IOWs, a wait loop can now check for: * the current sequence not being found on the committing list; * an empty CIL; and * an unchanged sequence number * to detect a push that had nothing to do and therefore does not need * waiting on. If the CIL is not empty, we get put on the committing * list before emptying the CIL and bumping the sequence number. Hence * an empty CIL and an unchanged sequence number means we jumped out * above after doing nothing. * * Hence the waiter will either find the commit sequence on the * committing list or the sequence number will be unchanged and the CIL * still dirty. In that latter case, the push has not yet started, and * so the waiter will have to continue trying to check the CIL * committing list until it is found. In extreme cases of delay, the * sequence may fully commit between the attempts the wait makes to wait * on the commit sequence. */ list_add(&ctx->committing, &cil->xc_committing); spin_unlock(&cil->xc_push_lock); xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes); /* * Switch the contexts so we can drop the context lock and move out * of a shared context. We can't just go straight to the commit record, * though - we need to synchronise with previous and future commits so * that the commit records are correctly ordered in the log to ensure * that we process items during log IO completion in the correct order. * * For example, if we get an EFI in one checkpoint and the EFD in the * next (e.g. due to log forces), we do not want the checkpoint with * the EFD to be committed before the checkpoint with the EFI. Hence * we must strictly order the commit records of the checkpoints so * that: a) the checkpoint callbacks are attached to the iclogs in the * correct order; and b) the checkpoints are replayed in correct order * in log recovery. * * Hence we need to add this context to the committing context list so * that higher sequences will wait for us to write out a commit record * before they do. * * xfs_log_force_seq requires us to mirror the new sequence into the cil * structure atomically with the addition of this sequence to the * committing list. This also ensures that we can do unlocked checks * against the current sequence in log forces without risking * deferencing a freed context pointer. */ spin_lock(&cil->xc_push_lock); xlog_cil_ctx_switch(cil, new_ctx); spin_unlock(&cil->xc_push_lock); up_write(&cil->xc_ctx_lock); /* * Sort the log vector chain before we add the transaction headers. * This ensures we always have the transaction headers at the start * of the chain. */ list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp); /* * Build a checkpoint transaction header and write it to the log to * begin the transaction. We need to account for the space used by the * transaction header here as it is not accounted for in xlog_write(). * Add the lvhdr to the head of the lv chain we pass to xlog_write() so * it gets written into the iclog first. */ xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs); num_bytes += lvhdr.lv_bytes; list_add(&lvhdr.lv_list, &ctx->lv_chain); /* * Take the lvhdr back off the lv_chain immediately after calling * xlog_cil_write_chain() as it should not be passed to log IO * completion. */ error = xlog_cil_write_chain(ctx, num_bytes); list_del(&lvhdr.lv_list); if (error) goto out_abort_free_ticket; error = xlog_cil_write_commit_record(ctx); if (error) goto out_abort_free_ticket; /* * Grab the ticket from the ctx so we can ungrant it after releasing the * commit_iclog. The ctx may be freed by the time we return from * releasing the commit_iclog (i.e. checkpoint has been completed and * callback run) so we can't reference the ctx after the call to * xlog_state_release_iclog(). */ ticket = ctx->ticket; /* * If the checkpoint spans multiple iclogs, wait for all previous iclogs * to complete before we submit the commit_iclog. We can't use state * checks for this - ACTIVE can be either a past completed iclog or a * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a * past or future iclog awaiting IO or ordered IO completion to be run. * In the latter case, if it's a future iclog and we wait on it, the we * will hang because it won't get processed through to ic_force_wait * wakeup until this commit_iclog is written to disk. Hence we use the * iclog header lsn and compare it to the commit lsn to determine if we * need to wait on iclogs or not. */ spin_lock(&log->l_icloglock); if (ctx->start_lsn != ctx->commit_lsn) { xfs_lsn_t plsn; plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn); if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { /* * Waiting on ic_force_wait orders the completion of * iclogs older than ic_prev. Hence we only need to wait * on the most recent older iclog here. */ xlog_wait_on_iclog(ctx->commit_iclog->ic_prev); spin_lock(&log->l_icloglock); } /* * We need to issue a pre-flush so that the ordering for this * checkpoint is correctly preserved down to stable storage. */ ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; } /* * The commit iclog must be written to stable storage to guarantee * journal IO vs metadata writeback IO is correctly ordered on stable * storage. * * If the push caller needs the commit to be immediately stable and the * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it * will be written when released, switch it's state to WANT_SYNC right * now. */ ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; if (push_commit_stable && ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) xlog_state_switch_iclogs(log, ctx->commit_iclog, 0); ticket = ctx->ticket; xlog_state_release_iclog(log, ctx->commit_iclog, ticket); /* Not safe to reference ctx now! */ spin_unlock(&log->l_icloglock); xlog_cil_cleanup_whiteouts(&whiteouts); xfs_log_ticket_ungrant(log, ticket); memalloc_nofs_restore(nofs_flags); return; out_skip: up_write(&cil->xc_ctx_lock); xfs_log_ticket_put(new_ctx->ticket); kfree(new_ctx); memalloc_nofs_restore(nofs_flags); return; out_abort_free_ticket: ASSERT(xlog_is_shutdown(log)); xlog_cil_cleanup_whiteouts(&whiteouts); if (!ctx->commit_iclog) { xfs_log_ticket_ungrant(log, ctx->ticket); xlog_cil_committed(ctx); memalloc_nofs_restore(nofs_flags); return; } spin_lock(&log->l_icloglock); ticket = ctx->ticket; xlog_state_release_iclog(log, ctx->commit_iclog, ticket); /* Not safe to reference ctx now! */ spin_unlock(&log->l_icloglock); xfs_log_ticket_ungrant(log, ticket); memalloc_nofs_restore(nofs_flags); } /* * We need to push CIL every so often so we don't cache more than we can fit in * the log. The limit really is that a checkpoint can't be more than half the * log (the current checkpoint is not allowed to overwrite the previous * checkpoint), but commit latency and memory usage limit this to a smaller * size. */ static void xlog_cil_push_background( struct xlog *log) __releases(cil->xc_ctx_lock) { struct xfs_cil *cil = log->l_cilp; int space_used = atomic_read(&cil->xc_ctx->space_used); /* * The cil won't be empty because we are called while holding the * context lock so whatever we added to the CIL will still be there. */ ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); /* * We are done if: * - we haven't used up all the space available yet; or * - we've already queued up a push; and * - we're not over the hard limit; and * - nothing has been over the hard limit. * * If so, we don't need to take the push lock as there's nothing to do. */ if (space_used < XLOG_CIL_SPACE_LIMIT(log) || (cil->xc_push_seq == cil->xc_current_sequence && space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) && !waitqueue_active(&cil->xc_push_wait))) { up_read(&cil->xc_ctx_lock); return; } spin_lock(&cil->xc_push_lock); if (cil->xc_push_seq < cil->xc_current_sequence) { cil->xc_push_seq = cil->xc_current_sequence; queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); } /* * Drop the context lock now, we can't hold that if we need to sleep * because we are over the blocking threshold. The push_lock is still * held, so blocking threshold sleep/wakeup is still correctly * serialised here. */ up_read(&cil->xc_ctx_lock); /* * If we are well over the space limit, throttle the work that is being * done until the push work on this context has begun. Enforce the hard * throttle on all transaction commits once it has been activated, even * if the committing transactions have resulted in the space usage * dipping back down under the hard limit. * * The ctx->xc_push_lock provides the serialisation necessary for safely * calling xlog_cil_over_hard_limit() in this context. */ if (xlog_cil_over_hard_limit(log, space_used)) { trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); ASSERT(space_used < log->l_logsize); xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); return; } spin_unlock(&cil->xc_push_lock); } /* * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence * number that is passed. When it returns, the work will be queued for * @push_seq, but it won't be completed. * * If the caller is performing a synchronous force, we will flush the workqueue * to get previously queued work moving to minimise the wait time they will * undergo waiting for all outstanding pushes to complete. The caller is * expected to do the required waiting for push_seq to complete. * * If the caller is performing an async push, we need to ensure that the * checkpoint is fully flushed out of the iclogs when we finish the push. If we * don't do this, then the commit record may remain sitting in memory in an * ACTIVE iclog. This then requires another full log force to push to disk, * which defeats the purpose of having an async, non-blocking CIL force * mechanism. Hence in this case we need to pass a flag to the push work to * indicate it needs to flush the commit record itself. */ static void xlog_cil_push_now( struct xlog *log, xfs_lsn_t push_seq, bool async) { struct xfs_cil *cil = log->l_cilp; if (!cil) return; ASSERT(push_seq && push_seq <= cil->xc_current_sequence); /* start on any pending background push to minimise wait time on it */ if (!async) flush_workqueue(cil->xc_push_wq); spin_lock(&cil->xc_push_lock); /* * If this is an async flush request, we always need to set the * xc_push_commit_stable flag even if something else has already queued * a push. The flush caller is asking for the CIL to be on stable * storage when the next push completes, so regardless of who has queued * the push, the flush requires stable semantics from it. */ cil->xc_push_commit_stable = async; /* * If the CIL is empty or we've already pushed the sequence then * there's no more work that we need to do. */ if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) || push_seq <= cil->xc_push_seq) { spin_unlock(&cil->xc_push_lock); return; } cil->xc_push_seq = push_seq; queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); spin_unlock(&cil->xc_push_lock); } bool xlog_cil_empty( struct xlog *log) { struct xfs_cil *cil = log->l_cilp; bool empty = false; spin_lock(&cil->xc_push_lock); if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) empty = true; spin_unlock(&cil->xc_push_lock); return empty; } /* * If there are intent done items in this transaction and the related intent was * committed in the current (same) CIL checkpoint, we don't need to write either * the intent or intent done item to the journal as the change will be * journalled atomically within this checkpoint. As we cannot remove items from * the CIL here, mark the related intent with a whiteout so that the CIL push * can remove it rather than writing it to the journal. Then remove the intent * done item from the current transaction and release it so it doesn't get put * into the CIL at all. */ static uint32_t xlog_cil_process_intents( struct xfs_cil *cil, struct xfs_trans *tp) { struct xfs_log_item *lip, *ilip, *next; uint32_t len = 0; list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE)) continue; ilip = lip->li_ops->iop_intent(lip); if (!ilip || !xlog_item_in_current_chkpt(cil, ilip)) continue; set_bit(XFS_LI_WHITEOUT, &ilip->li_flags); trace_xfs_cil_whiteout_mark(ilip); len += ilip->li_lv->lv_bytes; kvfree(ilip->li_lv); ilip->li_lv = NULL; xfs_trans_del_item(lip); lip->li_ops->iop_release(lip); } return len; } /* * Commit a transaction with the given vector to the Committed Item List. * * To do this, we need to format the item, pin it in memory if required and * account for the space used by the transaction. Once we have done that we * need to release the unused reservation for the transaction, attach the * transaction to the checkpoint context so we carry the busy extents through * to checkpoint completion, and then unlock all the items in the transaction. * * Called with the context lock already held in read mode to lock out * background commit, returns without it held once background commits are * allowed again. */ void xlog_cil_commit( struct xlog *log, struct xfs_trans *tp, xfs_csn_t *commit_seq, bool regrant) { struct xfs_cil *cil = log->l_cilp; struct xfs_log_item *lip, *next; uint32_t released_space = 0; /* * Do all necessary memory allocation before we lock the CIL. * This ensures the allocation does not deadlock with a CIL * push in memory reclaim (e.g. from kswapd). */ xlog_cil_alloc_shadow_bufs(log, tp); /* lock out background commit */ down_read(&cil->xc_ctx_lock); if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE) released_space = xlog_cil_process_intents(cil, tp); xlog_cil_insert_items(log, tp, released_space); if (regrant && !xlog_is_shutdown(log)) xfs_log_ticket_regrant(log, tp->t_ticket); else xfs_log_ticket_ungrant(log, tp->t_ticket); tp->t_ticket = NULL; xfs_trans_unreserve_and_mod_sb(tp); /* * Once all the items of the transaction have been copied to the CIL, * the items can be unlocked and possibly freed. * * This needs to be done before we drop the CIL context lock because we * have to update state in the log items and unlock them before they go * to disk. If we don't, then the CIL checkpoint can race with us and * we can run checkpoint completion before we've updated and unlocked * the log items. This affects (at least) processing of stale buffers, * inodes and EFIs. */ trace_xfs_trans_commit_items(tp, _RET_IP_); list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { xfs_trans_del_item(lip); if (lip->li_ops->iop_committing) lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); } if (commit_seq) *commit_seq = cil->xc_ctx->sequence; /* xlog_cil_push_background() releases cil->xc_ctx_lock */ xlog_cil_push_background(log); } /* * Flush the CIL to stable storage but don't wait for it to complete. This * requires the CIL push to ensure the commit record for the push hits the disk, * but otherwise is no different to a push done from a log force. */ void xlog_cil_flush( struct xlog *log) { xfs_csn_t seq = log->l_cilp->xc_current_sequence; trace_xfs_log_force(log->l_mp, seq, _RET_IP_); xlog_cil_push_now(log, seq, true); /* * If the CIL is empty, make sure that any previous checkpoint that may * still be in an active iclog is pushed to stable storage. */ if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags)) xfs_log_force(log->l_mp, 0); } /* * Conditionally push the CIL based on the sequence passed in. * * We only need to push if we haven't already pushed the sequence number given. * Hence the only time we will trigger a push here is if the push sequence is * the same as the current context. * * We return the current commit lsn to allow the callers to determine if a * iclog flush is necessary following this call. */ xfs_lsn_t xlog_cil_force_seq( struct xlog *log, xfs_csn_t sequence) { struct xfs_cil *cil = log->l_cilp; struct xfs_cil_ctx *ctx; xfs_lsn_t commit_lsn = NULLCOMMITLSN; ASSERT(sequence <= cil->xc_current_sequence); if (!sequence) sequence = cil->xc_current_sequence; trace_xfs_log_force(log->l_mp, sequence, _RET_IP_); /* * check to see if we need to force out the current context. * xlog_cil_push() handles racing pushes for the same sequence, * so no need to deal with it here. */ restart: xlog_cil_push_now(log, sequence, false); /* * See if we can find a previous sequence still committing. * We need to wait for all previous sequence commits to complete * before allowing the force of push_seq to go ahead. Hence block * on commits for those as well. */ spin_lock(&cil->xc_push_lock); list_for_each_entry(ctx, &cil->xc_committing, committing) { /* * Avoid getting stuck in this loop because we were woken by the * shutdown, but then went back to sleep once already in the * shutdown state. */ if (xlog_is_shutdown(log)) goto out_shutdown; if (ctx->sequence > sequence) continue; if (!ctx->commit_lsn) { /* * It is still being pushed! Wait for the push to * complete, then start again from the beginning. */ XFS_STATS_INC(log->l_mp, xs_log_force_sleep); xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); goto restart; } if (ctx->sequence != sequence) continue; /* found it! */ commit_lsn = ctx->commit_lsn; } /* * The call to xlog_cil_push_now() executes the push in the background. * Hence by the time we have got here it our sequence may not have been * pushed yet. This is true if the current sequence still matches the * push sequence after the above wait loop and the CIL still contains * dirty objects. This is guaranteed by the push code first adding the * context to the committing list before emptying the CIL. * * Hence if we don't find the context in the committing list and the * current sequence number is unchanged then the CIL contents are * significant. If the CIL is empty, if means there was nothing to push * and that means there is nothing to wait for. If the CIL is not empty, * it means we haven't yet started the push, because if it had started * we would have found the context on the committing list. */ if (sequence == cil->xc_current_sequence && !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { spin_unlock(&cil->xc_push_lock); goto restart; } spin_unlock(&cil->xc_push_lock); return commit_lsn; /* * We detected a shutdown in progress. We need to trigger the log force * to pass through it's iclog state machine error handling, even though * we are already in a shutdown state. Hence we can't return * NULLCOMMITLSN here as that has special meaning to log forces (i.e. * LSN is already stable), so we return a zero LSN instead. */ out_shutdown: spin_unlock(&cil->xc_push_lock); return 0; } /* * Perform initial CIL structure initialisation. */ int xlog_cil_init( struct xlog *log) { struct xfs_cil *cil; struct xfs_cil_ctx *ctx; struct xlog_cil_pcp *cilpcp; int cpu; cil = kzalloc(sizeof(*cil), GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!cil) return -ENOMEM; /* * Limit the CIL pipeline depth to 4 concurrent works to bound the * concurrency the log spinlocks will be exposed to. */ cil->xc_push_wq = alloc_workqueue("xfs-cil/%s", XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND), 4, log->l_mp->m_super->s_id); if (!cil->xc_push_wq) goto out_destroy_cil; cil->xc_log = log; cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp); if (!cil->xc_pcp) goto out_destroy_wq; for_each_possible_cpu(cpu) { cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); INIT_LIST_HEAD(&cilpcp->busy_extents); INIT_LIST_HEAD(&cilpcp->log_items); } INIT_LIST_HEAD(&cil->xc_committing); spin_lock_init(&cil->xc_push_lock); init_waitqueue_head(&cil->xc_push_wait); init_rwsem(&cil->xc_ctx_lock); init_waitqueue_head(&cil->xc_start_wait); init_waitqueue_head(&cil->xc_commit_wait); log->l_cilp = cil; ctx = xlog_cil_ctx_alloc(); xlog_cil_ctx_switch(cil, ctx); return 0; out_destroy_wq: destroy_workqueue(cil->xc_push_wq); out_destroy_cil: kfree(cil); return -ENOMEM; } void xlog_cil_destroy( struct xlog *log) { struct xfs_cil *cil = log->l_cilp; if (cil->xc_ctx) { if (cil->xc_ctx->ticket) xfs_log_ticket_put(cil->xc_ctx->ticket); kfree(cil->xc_ctx); } ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); free_percpu(cil->xc_pcp); destroy_workqueue(cil->xc_push_wq); kfree(cil); } |