Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
   4 * Written by Alex Tomas <alex@clusterfs.com>
   5 */
   6
   7
   8/*
   9 * mballoc.c contains the multiblocks allocation routines
  10 */
  11
  12#include "ext4_jbd2.h"
  13#include "mballoc.h"
  14#include <linux/log2.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/nospec.h>
  18#include <linux/backing-dev.h>
  19#include <linux/freezer.h>
  20#include <trace/events/ext4.h>
  21#include <kunit/static_stub.h>
  22
  23/*
  24 * MUSTDO:
  25 *   - test ext4_ext_search_left() and ext4_ext_search_right()
  26 *   - search for metadata in few groups
  27 *
  28 * TODO v4:
  29 *   - normalization should take into account whether file is still open
  30 *   - discard preallocations if no free space left (policy?)
  31 *   - don't normalize tails
  32 *   - quota
  33 *   - reservation for superuser
  34 *
  35 * TODO v3:
  36 *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
  37 *   - track min/max extents in each group for better group selection
  38 *   - mb_mark_used() may allocate chunk right after splitting buddy
  39 *   - tree of groups sorted by number of free blocks
  40 *   - error handling
  41 */
  42
  43/*
  44 * The allocation request involve request for multiple number of blocks
  45 * near to the goal(block) value specified.
  46 *
  47 * During initialization phase of the allocator we decide to use the
  48 * group preallocation or inode preallocation depending on the size of
  49 * the file. The size of the file could be the resulting file size we
  50 * would have after allocation, or the current file size, which ever
  51 * is larger. If the size is less than sbi->s_mb_stream_request we
  52 * select to use the group preallocation. The default value of
  53 * s_mb_stream_request is 16 blocks. This can also be tuned via
  54 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
  55 * terms of number of blocks.
  56 *
  57 * The main motivation for having small file use group preallocation is to
  58 * ensure that we have small files closer together on the disk.
  59 *
  60 * First stage the allocator looks at the inode prealloc list,
  61 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
  62 * spaces for this particular inode. The inode prealloc space is
  63 * represented as:
  64 *
  65 * pa_lstart -> the logical start block for this prealloc space
  66 * pa_pstart -> the physical start block for this prealloc space
  67 * pa_len    -> length for this prealloc space (in clusters)
  68 * pa_free   ->  free space available in this prealloc space (in clusters)
  69 *
  70 * The inode preallocation space is used looking at the _logical_ start
  71 * block. If only the logical file block falls within the range of prealloc
  72 * space we will consume the particular prealloc space. This makes sure that
  73 * we have contiguous physical blocks representing the file blocks
  74 *
  75 * The important thing to be noted in case of inode prealloc space is that
  76 * we don't modify the values associated to inode prealloc space except
  77 * pa_free.
  78 *
  79 * If we are not able to find blocks in the inode prealloc space and if we
  80 * have the group allocation flag set then we look at the locality group
  81 * prealloc space. These are per CPU prealloc list represented as
  82 *
  83 * ext4_sb_info.s_locality_groups[smp_processor_id()]
  84 *
  85 * The reason for having a per cpu locality group is to reduce the contention
  86 * between CPUs. It is possible to get scheduled at this point.
  87 *
  88 * The locality group prealloc space is used looking at whether we have
  89 * enough free space (pa_free) within the prealloc space.
  90 *
  91 * If we can't allocate blocks via inode prealloc or/and locality group
  92 * prealloc then we look at the buddy cache. The buddy cache is represented
  93 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
  94 * mapped to the buddy and bitmap information regarding different
  95 * groups. The buddy information is attached to buddy cache inode so that
  96 * we can access them through the page cache. The information regarding
  97 * each group is loaded via ext4_mb_load_buddy.  The information involve
  98 * block bitmap and buddy information. The information are stored in the
  99 * inode as:
 100 *
 101 *  {                        page                        }
 102 *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
 103 *
 104 *
 105 * one block each for bitmap and buddy information.  So for each group we
 106 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
 107 * blocksize) blocks.  So it can have information regarding groups_per_page
 108 * which is blocks_per_page/2
 109 *
 110 * The buddy cache inode is not stored on disk. The inode is thrown
 111 * away when the filesystem is unmounted.
 112 *
 113 * We look for count number of blocks in the buddy cache. If we were able
 114 * to locate that many free blocks we return with additional information
 115 * regarding rest of the contiguous physical block available
 116 *
 117 * Before allocating blocks via buddy cache we normalize the request
 118 * blocks. This ensure we ask for more blocks that we needed. The extra
 119 * blocks that we get after allocation is added to the respective prealloc
 120 * list. In case of inode preallocation we follow a list of heuristics
 121 * based on file size. This can be found in ext4_mb_normalize_request. If
 122 * we are doing a group prealloc we try to normalize the request to
 123 * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
 124 * dependent on the cluster size; for non-bigalloc file systems, it is
 125 * 512 blocks. This can be tuned via
 126 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
 127 * terms of number of blocks. If we have mounted the file system with -O
 128 * stripe=<value> option the group prealloc request is normalized to the
 129 * smallest multiple of the stripe value (sbi->s_stripe) which is
 130 * greater than the default mb_group_prealloc.
 131 *
 132 * If "mb_optimize_scan" mount option is set, we maintain in memory group info
 133 * structures in two data structures:
 134 *
 135 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
 136 *
 137 *    Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
 138 *
 139 *    This is an array of lists where the index in the array represents the
 140 *    largest free order in the buddy bitmap of the participating group infos of
 141 *    that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
 142 *    number of buddy bitmap orders possible) number of lists. Group-infos are
 143 *    placed in appropriate lists.
 144 *
 145 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
 146 *
 147 *    Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
 148 *
 149 *    This is an array of lists where in the i-th list there are groups with
 150 *    average fragment size >= 2^i and < 2^(i+1). The average fragment size
 151 *    is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
 152 *    Note that we don't bother with a special list for completely empty groups
 153 *    so we only have MB_NUM_ORDERS(sb) lists.
 154 *
 155 * When "mb_optimize_scan" mount option is set, mballoc consults the above data
 156 * structures to decide the order in which groups are to be traversed for
 157 * fulfilling an allocation request.
 158 *
 159 * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order
 160 * >= the order of the request. We directly look at the largest free order list
 161 * in the data structure (1) above where largest_free_order = order of the
 162 * request. If that list is empty, we look at remaining list in the increasing
 163 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED
 164 * lookup in O(1) time.
 165 *
 166 * At CR_GOAL_LEN_FAST, we only consider groups where
 167 * average fragment size > request size. So, we lookup a group which has average
 168 * fragment size just above or equal to request size using our average fragment
 169 * size group lists (data structure 2) in O(1) time.
 170 *
 171 * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied
 172 * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in
 173 * CR_GOAL_LEN_FAST suggests that there is no BG that has avg
 174 * fragment size > goal length. So before falling to the slower
 175 * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and
 176 * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big
 177 * enough average fragment size. This increases the chances of finding a
 178 * suitable block group in O(1) time and results in faster allocation at the
 179 * cost of reduced size of allocation.
 180 *
 181 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
 182 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and
 183 * CR_GOAL_LEN_FAST phase.
 184 *
 185 * The regular allocator (using the buddy cache) supports a few tunables.
 186 *
 187 * /sys/fs/ext4/<partition>/mb_min_to_scan
 188 * /sys/fs/ext4/<partition>/mb_max_to_scan
 189 * /sys/fs/ext4/<partition>/mb_order2_req
 190 * /sys/fs/ext4/<partition>/mb_linear_limit
 191 *
 192 * The regular allocator uses buddy scan only if the request len is power of
 193 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
 194 * value of s_mb_order2_reqs can be tuned via
 195 * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
 196 * stripe size (sbi->s_stripe), we try to search for contiguous block in
 197 * stripe size. This should result in better allocation on RAID setups. If
 198 * not, we search in the specific group using bitmap for best extents. The
 199 * tunable min_to_scan and max_to_scan control the behaviour here.
 200 * min_to_scan indicate how long the mballoc __must__ look for a best
 201 * extent and max_to_scan indicates how long the mballoc __can__ look for a
 202 * best extent in the found extents. Searching for the blocks starts with
 203 * the group specified as the goal value in allocation context via
 204 * ac_g_ex. Each group is first checked based on the criteria whether it
 205 * can be used for allocation. ext4_mb_good_group explains how the groups are
 206 * checked.
 207 *
 208 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
 209 * get traversed linearly. That may result in subsequent allocations being not
 210 * close to each other. And so, the underlying device may get filled up in a
 211 * non-linear fashion. While that may not matter on non-rotational devices, for
 212 * rotational devices that may result in higher seek times. "mb_linear_limit"
 213 * tells mballoc how many groups mballoc should search linearly before
 214 * performing consulting above data structures for more efficient lookups. For
 215 * non rotational devices, this value defaults to 0 and for rotational devices
 216 * this is set to MB_DEFAULT_LINEAR_LIMIT.
 217 *
 218 * Both the prealloc space are getting populated as above. So for the first
 219 * request we will hit the buddy cache which will result in this prealloc
 220 * space getting filled. The prealloc space is then later used for the
 221 * subsequent request.
 222 */
 223
 224/*
 225 * mballoc operates on the following data:
 226 *  - on-disk bitmap
 227 *  - in-core buddy (actually includes buddy and bitmap)
 228 *  - preallocation descriptors (PAs)
 229 *
 230 * there are two types of preallocations:
 231 *  - inode
 232 *    assiged to specific inode and can be used for this inode only.
 233 *    it describes part of inode's space preallocated to specific
 234 *    physical blocks. any block from that preallocated can be used
 235 *    independent. the descriptor just tracks number of blocks left
 236 *    unused. so, before taking some block from descriptor, one must
 237 *    make sure corresponded logical block isn't allocated yet. this
 238 *    also means that freeing any block within descriptor's range
 239 *    must discard all preallocated blocks.
 240 *  - locality group
 241 *    assigned to specific locality group which does not translate to
 242 *    permanent set of inodes: inode can join and leave group. space
 243 *    from this type of preallocation can be used for any inode. thus
 244 *    it's consumed from the beginning to the end.
 245 *
 246 * relation between them can be expressed as:
 247 *    in-core buddy = on-disk bitmap + preallocation descriptors
 248 *
 249 * this mean blocks mballoc considers used are:
 250 *  - allocated blocks (persistent)
 251 *  - preallocated blocks (non-persistent)
 252 *
 253 * consistency in mballoc world means that at any time a block is either
 254 * free or used in ALL structures. notice: "any time" should not be read
 255 * literally -- time is discrete and delimited by locks.
 256 *
 257 *  to keep it simple, we don't use block numbers, instead we count number of
 258 *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
 259 *
 260 * all operations can be expressed as:
 261 *  - init buddy:			buddy = on-disk + PAs
 262 *  - new PA:				buddy += N; PA = N
 263 *  - use inode PA:			on-disk += N; PA -= N
 264 *  - discard inode PA			buddy -= on-disk - PA; PA = 0
 265 *  - use locality group PA		on-disk += N; PA -= N
 266 *  - discard locality group PA		buddy -= PA; PA = 0
 267 *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
 268 *        is used in real operation because we can't know actual used
 269 *        bits from PA, only from on-disk bitmap
 270 *
 271 * if we follow this strict logic, then all operations above should be atomic.
 272 * given some of them can block, we'd have to use something like semaphores
 273 * killing performance on high-end SMP hardware. let's try to relax it using
 274 * the following knowledge:
 275 *  1) if buddy is referenced, it's already initialized
 276 *  2) while block is used in buddy and the buddy is referenced,
 277 *     nobody can re-allocate that block
 278 *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
 279 *     bit set and PA claims same block, it's OK. IOW, one can set bit in
 280 *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
 281 *     block
 282 *
 283 * so, now we're building a concurrency table:
 284 *  - init buddy vs.
 285 *    - new PA
 286 *      blocks for PA are allocated in the buddy, buddy must be referenced
 287 *      until PA is linked to allocation group to avoid concurrent buddy init
 288 *    - use inode PA
 289 *      we need to make sure that either on-disk bitmap or PA has uptodate data
 290 *      given (3) we care that PA-=N operation doesn't interfere with init
 291 *    - discard inode PA
 292 *      the simplest way would be to have buddy initialized by the discard
 293 *    - use locality group PA
 294 *      again PA-=N must be serialized with init
 295 *    - discard locality group PA
 296 *      the simplest way would be to have buddy initialized by the discard
 297 *  - new PA vs.
 298 *    - use inode PA
 299 *      i_data_sem serializes them
 300 *    - discard inode PA
 301 *      discard process must wait until PA isn't used by another process
 302 *    - use locality group PA
 303 *      some mutex should serialize them
 304 *    - discard locality group PA
 305 *      discard process must wait until PA isn't used by another process
 306 *  - use inode PA
 307 *    - use inode PA
 308 *      i_data_sem or another mutex should serializes them
 309 *    - discard inode PA
 310 *      discard process must wait until PA isn't used by another process
 311 *    - use locality group PA
 312 *      nothing wrong here -- they're different PAs covering different blocks
 313 *    - discard locality group PA
 314 *      discard process must wait until PA isn't used by another process
 315 *
 316 * now we're ready to make few consequences:
 317 *  - PA is referenced and while it is no discard is possible
 318 *  - PA is referenced until block isn't marked in on-disk bitmap
 319 *  - PA changes only after on-disk bitmap
 320 *  - discard must not compete with init. either init is done before
 321 *    any discard or they're serialized somehow
 322 *  - buddy init as sum of on-disk bitmap and PAs is done atomically
 323 *
 324 * a special case when we've used PA to emptiness. no need to modify buddy
 325 * in this case, but we should care about concurrent init
 326 *
 327 */
 328
 329 /*
 330 * Logic in few words:
 331 *
 332 *  - allocation:
 333 *    load group
 334 *    find blocks
 335 *    mark bits in on-disk bitmap
 336 *    release group
 337 *
 338 *  - use preallocation:
 339 *    find proper PA (per-inode or group)
 340 *    load group
 341 *    mark bits in on-disk bitmap
 342 *    release group
 343 *    release PA
 344 *
 345 *  - free:
 346 *    load group
 347 *    mark bits in on-disk bitmap
 348 *    release group
 349 *
 350 *  - discard preallocations in group:
 351 *    mark PAs deleted
 352 *    move them onto local list
 353 *    load on-disk bitmap
 354 *    load group
 355 *    remove PA from object (inode or locality group)
 356 *    mark free blocks in-core
 357 *
 358 *  - discard inode's preallocations:
 359 */
 360
 361/*
 362 * Locking rules
 363 *
 364 * Locks:
 365 *  - bitlock on a group	(group)
 366 *  - object (inode/locality)	(object)
 367 *  - per-pa lock		(pa)
 368 *  - cr_power2_aligned lists lock	(cr_power2_aligned)
 369 *  - cr_goal_len_fast lists lock	(cr_goal_len_fast)
 370 *
 371 * Paths:
 372 *  - new pa
 373 *    object
 374 *    group
 375 *
 376 *  - find and use pa:
 377 *    pa
 378 *
 379 *  - release consumed pa:
 380 *    pa
 381 *    group
 382 *    object
 383 *
 384 *  - generate in-core bitmap:
 385 *    group
 386 *        pa
 387 *
 388 *  - discard all for given object (inode, locality group):
 389 *    object
 390 *        pa
 391 *    group
 392 *
 393 *  - discard all for given group:
 394 *    group
 395 *        pa
 396 *    group
 397 *        object
 398 *
 399 *  - allocation path (ext4_mb_regular_allocator)
 400 *    group
 401 *    cr_power2_aligned/cr_goal_len_fast
 402 */
 403static struct kmem_cache *ext4_pspace_cachep;
 404static struct kmem_cache *ext4_ac_cachep;
 405static struct kmem_cache *ext4_free_data_cachep;
 406
 407/* We create slab caches for groupinfo data structures based on the
 408 * superblock block size.  There will be one per mounted filesystem for
 409 * each unique s_blocksize_bits */
 410#define NR_GRPINFO_CACHES 8
 411static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
 412
 413static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
 414	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
 415	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
 416	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
 417};
 418
 419static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
 420					ext4_group_t group);
 421static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
 422
 423static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
 424			       ext4_group_t group, enum criteria cr);
 425
 426static int ext4_try_to_trim_range(struct super_block *sb,
 427		struct ext4_buddy *e4b, ext4_grpblk_t start,
 428		ext4_grpblk_t max, ext4_grpblk_t minblocks);
 429
 430/*
 431 * The algorithm using this percpu seq counter goes below:
 432 * 1. We sample the percpu discard_pa_seq counter before trying for block
 433 *    allocation in ext4_mb_new_blocks().
 434 * 2. We increment this percpu discard_pa_seq counter when we either allocate
 435 *    or free these blocks i.e. while marking those blocks as used/free in
 436 *    mb_mark_used()/mb_free_blocks().
 437 * 3. We also increment this percpu seq counter when we successfully identify
 438 *    that the bb_prealloc_list is not empty and hence proceed for discarding
 439 *    of those PAs inside ext4_mb_discard_group_preallocations().
 440 *
 441 * Now to make sure that the regular fast path of block allocation is not
 442 * affected, as a small optimization we only sample the percpu seq counter
 443 * on that cpu. Only when the block allocation fails and when freed blocks
 444 * found were 0, that is when we sample percpu seq counter for all cpus using
 445 * below function ext4_get_discard_pa_seq_sum(). This happens after making
 446 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
 447 */
 448static DEFINE_PER_CPU(u64, discard_pa_seq);
 449static inline u64 ext4_get_discard_pa_seq_sum(void)
 450{
 451	int __cpu;
 452	u64 __seq = 0;
 453
 454	for_each_possible_cpu(__cpu)
 455		__seq += per_cpu(discard_pa_seq, __cpu);
 456	return __seq;
 457}
 458
 459static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
 460{
 461#if BITS_PER_LONG == 64
 462	*bit += ((unsigned long) addr & 7UL) << 3;
 463	addr = (void *) ((unsigned long) addr & ~7UL);
 464#elif BITS_PER_LONG == 32
 465	*bit += ((unsigned long) addr & 3UL) << 3;
 466	addr = (void *) ((unsigned long) addr & ~3UL);
 467#else
 468#error "how many bits you are?!"
 469#endif
 470	return addr;
 471}
 472
 473static inline int mb_test_bit(int bit, void *addr)
 474{
 475	/*
 476	 * ext4_test_bit on architecture like powerpc
 477	 * needs unsigned long aligned address
 478	 */
 479	addr = mb_correct_addr_and_bit(&bit, addr);
 480	return ext4_test_bit(bit, addr);
 481}
 482
 483static inline void mb_set_bit(int bit, void *addr)
 484{
 485	addr = mb_correct_addr_and_bit(&bit, addr);
 486	ext4_set_bit(bit, addr);
 487}
 488
 489static inline void mb_clear_bit(int bit, void *addr)
 490{
 491	addr = mb_correct_addr_and_bit(&bit, addr);
 492	ext4_clear_bit(bit, addr);
 493}
 494
 495static inline int mb_test_and_clear_bit(int bit, void *addr)
 496{
 497	addr = mb_correct_addr_and_bit(&bit, addr);
 498	return ext4_test_and_clear_bit(bit, addr);
 499}
 500
 501static inline int mb_find_next_zero_bit(void *addr, int max, int start)
 502{
 503	int fix = 0, ret, tmpmax;
 504	addr = mb_correct_addr_and_bit(&fix, addr);
 505	tmpmax = max + fix;
 506	start += fix;
 507
 508	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
 509	if (ret > max)
 510		return max;
 511	return ret;
 512}
 513
 514static inline int mb_find_next_bit(void *addr, int max, int start)
 515{
 516	int fix = 0, ret, tmpmax;
 517	addr = mb_correct_addr_and_bit(&fix, addr);
 518	tmpmax = max + fix;
 519	start += fix;
 520
 521	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
 522	if (ret > max)
 523		return max;
 524	return ret;
 525}
 526
 527static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
 528{
 529	char *bb;
 530
 531	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
 532	BUG_ON(max == NULL);
 533
 534	if (order > e4b->bd_blkbits + 1) {
 535		*max = 0;
 536		return NULL;
 537	}
 538
 539	/* at order 0 we see each particular block */
 540	if (order == 0) {
 541		*max = 1 << (e4b->bd_blkbits + 3);
 542		return e4b->bd_bitmap;
 543	}
 544
 545	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
 546	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
 547
 548	return bb;
 549}
 550
 551#ifdef DOUBLE_CHECK
 552static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
 553			   int first, int count)
 554{
 555	int i;
 556	struct super_block *sb = e4b->bd_sb;
 557
 558	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 559		return;
 560	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
 561	for (i = 0; i < count; i++) {
 562		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
 563			ext4_fsblk_t blocknr;
 564
 565			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
 566			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
 567			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
 568					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
 569			ext4_grp_locked_error(sb, e4b->bd_group,
 570					      inode ? inode->i_ino : 0,
 571					      blocknr,
 572					      "freeing block already freed "
 573					      "(bit %u)",
 574					      first + i);
 575		}
 576		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
 577	}
 578}
 579
 580static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
 581{
 582	int i;
 583
 584	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 585		return;
 586	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
 587	for (i = 0; i < count; i++) {
 588		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
 589		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
 590	}
 591}
 592
 593static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
 594{
 595	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 596		return;
 597	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
 598		unsigned char *b1, *b2;
 599		int i;
 600		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
 601		b2 = (unsigned char *) bitmap;
 602		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
 603			if (b1[i] != b2[i]) {
 604				ext4_msg(e4b->bd_sb, KERN_ERR,
 605					 "corruption in group %u "
 606					 "at byte %u(%u): %x in copy != %x "
 607					 "on disk/prealloc",
 608					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
 609				BUG();
 610			}
 611		}
 612	}
 613}
 614
 615static void mb_group_bb_bitmap_alloc(struct super_block *sb,
 616			struct ext4_group_info *grp, ext4_group_t group)
 617{
 618	struct buffer_head *bh;
 619
 620	grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
 621	if (!grp->bb_bitmap)
 622		return;
 623
 624	bh = ext4_read_block_bitmap(sb, group);
 625	if (IS_ERR_OR_NULL(bh)) {
 626		kfree(grp->bb_bitmap);
 627		grp->bb_bitmap = NULL;
 628		return;
 629	}
 630
 631	memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
 632	put_bh(bh);
 633}
 634
 635static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
 636{
 637	kfree(grp->bb_bitmap);
 638}
 639
 640#else
 641static inline void mb_free_blocks_double(struct inode *inode,
 642				struct ext4_buddy *e4b, int first, int count)
 643{
 644	return;
 645}
 646static inline void mb_mark_used_double(struct ext4_buddy *e4b,
 647						int first, int count)
 648{
 649	return;
 650}
 651static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
 652{
 653	return;
 654}
 655
 656static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
 657			struct ext4_group_info *grp, ext4_group_t group)
 658{
 659	return;
 660}
 661
 662static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
 663{
 664	return;
 665}
 666#endif
 667
 668#ifdef AGGRESSIVE_CHECK
 669
 670#define MB_CHECK_ASSERT(assert)						\
 671do {									\
 672	if (!(assert)) {						\
 673		printk(KERN_EMERG					\
 674			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
 675			function, file, line, # assert);		\
 676		BUG();							\
 677	}								\
 678} while (0)
 679
 680static void __mb_check_buddy(struct ext4_buddy *e4b, char *file,
 681				const char *function, int line)
 682{
 683	struct super_block *sb = e4b->bd_sb;
 684	int order = e4b->bd_blkbits + 1;
 685	int max;
 686	int max2;
 687	int i;
 688	int j;
 689	int k;
 690	int count;
 691	struct ext4_group_info *grp;
 692	int fragments = 0;
 693	int fstart;
 694	struct list_head *cur;
 695	void *buddy;
 696	void *buddy2;
 697
 698	if (e4b->bd_info->bb_check_counter++ % 10)
 699		return;
 700
 701	while (order > 1) {
 702		buddy = mb_find_buddy(e4b, order, &max);
 703		MB_CHECK_ASSERT(buddy);
 704		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
 705		MB_CHECK_ASSERT(buddy2);
 706		MB_CHECK_ASSERT(buddy != buddy2);
 707		MB_CHECK_ASSERT(max * 2 == max2);
 708
 709		count = 0;
 710		for (i = 0; i < max; i++) {
 711
 712			if (mb_test_bit(i, buddy)) {
 713				/* only single bit in buddy2 may be 0 */
 714				if (!mb_test_bit(i << 1, buddy2)) {
 715					MB_CHECK_ASSERT(
 716						mb_test_bit((i<<1)+1, buddy2));
 717				}
 718				continue;
 719			}
 720
 721			/* both bits in buddy2 must be 1 */
 722			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
 723			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
 724
 725			for (j = 0; j < (1 << order); j++) {
 726				k = (i * (1 << order)) + j;
 727				MB_CHECK_ASSERT(
 728					!mb_test_bit(k, e4b->bd_bitmap));
 729			}
 730			count++;
 731		}
 732		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
 733		order--;
 734	}
 735
 736	fstart = -1;
 737	buddy = mb_find_buddy(e4b, 0, &max);
 738	for (i = 0; i < max; i++) {
 739		if (!mb_test_bit(i, buddy)) {
 740			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
 741			if (fstart == -1) {
 742				fragments++;
 743				fstart = i;
 744			}
 745			continue;
 746		}
 747		fstart = -1;
 748		/* check used bits only */
 749		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
 750			buddy2 = mb_find_buddy(e4b, j, &max2);
 751			k = i >> j;
 752			MB_CHECK_ASSERT(k < max2);
 753			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
 754		}
 755	}
 756	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
 757	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
 758
 759	grp = ext4_get_group_info(sb, e4b->bd_group);
 760	if (!grp)
 761		return;
 762	list_for_each(cur, &grp->bb_prealloc_list) {
 763		ext4_group_t groupnr;
 764		struct ext4_prealloc_space *pa;
 765		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
 766		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
 767		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
 768		for (i = 0; i < pa->pa_len; i++)
 769			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
 770	}
 771}
 772#undef MB_CHECK_ASSERT
 773#define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
 774					__FILE__, __func__, __LINE__)
 775#else
 776#define mb_check_buddy(e4b)
 777#endif
 778
 779/*
 780 * Divide blocks started from @first with length @len into
 781 * smaller chunks with power of 2 blocks.
 782 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
 783 * then increase bb_counters[] for corresponded chunk size.
 784 */
 785static void ext4_mb_mark_free_simple(struct super_block *sb,
 786				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
 787					struct ext4_group_info *grp)
 788{
 789	struct ext4_sb_info *sbi = EXT4_SB(sb);
 790	ext4_grpblk_t min;
 791	ext4_grpblk_t max;
 792	ext4_grpblk_t chunk;
 793	unsigned int border;
 794
 795	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
 796
 797	border = 2 << sb->s_blocksize_bits;
 798
 799	while (len > 0) {
 800		/* find how many blocks can be covered since this position */
 801		max = ffs(first | border) - 1;
 802
 803		/* find how many blocks of power 2 we need to mark */
 804		min = fls(len) - 1;
 805
 806		if (max < min)
 807			min = max;
 808		chunk = 1 << min;
 809
 810		/* mark multiblock chunks only */
 811		grp->bb_counters[min]++;
 812		if (min > 0)
 813			mb_clear_bit(first >> min,
 814				     buddy + sbi->s_mb_offsets[min]);
 815
 816		len -= chunk;
 817		first += chunk;
 818	}
 819}
 820
 821static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
 822{
 823	int order;
 824
 825	/*
 826	 * We don't bother with a special lists groups with only 1 block free
 827	 * extents and for completely empty groups.
 828	 */
 829	order = fls(len) - 2;
 830	if (order < 0)
 831		return 0;
 832	if (order == MB_NUM_ORDERS(sb))
 833		order--;
 
 
 834	return order;
 835}
 836
 837/* Move group to appropriate avg_fragment_size list */
 838static void
 839mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
 840{
 841	struct ext4_sb_info *sbi = EXT4_SB(sb);
 842	int new_order;
 843
 844	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
 845		return;
 846
 847	new_order = mb_avg_fragment_size_order(sb,
 848					grp->bb_free / grp->bb_fragments);
 849	if (new_order == grp->bb_avg_fragment_size_order)
 850		return;
 851
 852	if (grp->bb_avg_fragment_size_order != -1) {
 853		write_lock(&sbi->s_mb_avg_fragment_size_locks[
 854					grp->bb_avg_fragment_size_order]);
 855		list_del(&grp->bb_avg_fragment_size_node);
 856		write_unlock(&sbi->s_mb_avg_fragment_size_locks[
 857					grp->bb_avg_fragment_size_order]);
 858	}
 859	grp->bb_avg_fragment_size_order = new_order;
 860	write_lock(&sbi->s_mb_avg_fragment_size_locks[
 861					grp->bb_avg_fragment_size_order]);
 862	list_add_tail(&grp->bb_avg_fragment_size_node,
 863		&sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
 864	write_unlock(&sbi->s_mb_avg_fragment_size_locks[
 865					grp->bb_avg_fragment_size_order]);
 866}
 867
 868/*
 869 * Choose next group by traversing largest_free_order lists. Updates *new_cr if
 870 * cr level needs an update.
 871 */
 872static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
 873			enum criteria *new_cr, ext4_group_t *group)
 874{
 875	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 876	struct ext4_group_info *iter;
 877	int i;
 878
 879	if (ac->ac_status == AC_STATUS_FOUND)
 880		return;
 881
 882	if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
 883		atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
 884
 885	for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
 886		if (list_empty(&sbi->s_mb_largest_free_orders[i]))
 887			continue;
 888		read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
 889		if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
 890			read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
 891			continue;
 892		}
 893		list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
 894				    bb_largest_free_order_node) {
 895			if (sbi->s_mb_stats)
 896				atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
 897			if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
 898				*group = iter->bb_group;
 899				ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
 900				read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
 901				return;
 902			}
 903		}
 904		read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
 905	}
 906
 907	/* Increment cr and search again if no group is found */
 908	*new_cr = CR_GOAL_LEN_FAST;
 909}
 910
 911/*
 912 * Find a suitable group of given order from the average fragments list.
 913 */
 914static struct ext4_group_info *
 915ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
 916{
 917	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 918	struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
 919	rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
 920	struct ext4_group_info *grp = NULL, *iter;
 921	enum criteria cr = ac->ac_criteria;
 922
 923	if (list_empty(frag_list))
 924		return NULL;
 925	read_lock(frag_list_lock);
 926	if (list_empty(frag_list)) {
 927		read_unlock(frag_list_lock);
 928		return NULL;
 929	}
 930	list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
 931		if (sbi->s_mb_stats)
 932			atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
 933		if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
 934			grp = iter;
 935			break;
 936		}
 937	}
 938	read_unlock(frag_list_lock);
 939	return grp;
 940}
 941
 942/*
 943 * Choose next group by traversing average fragment size list of suitable
 944 * order. Updates *new_cr if cr level needs an update.
 945 */
 946static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
 947		enum criteria *new_cr, ext4_group_t *group)
 948{
 949	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 950	struct ext4_group_info *grp = NULL;
 951	int i;
 952
 953	if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) {
 954		if (sbi->s_mb_stats)
 955			atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions);
 956	}
 957
 958	for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
 959	     i < MB_NUM_ORDERS(ac->ac_sb); i++) {
 960		grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
 961		if (grp) {
 962			*group = grp->bb_group;
 963			ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
 964			return;
 965		}
 966	}
 967
 968	/*
 969	 * CR_BEST_AVAIL_LEN works based on the concept that we have
 970	 * a larger normalized goal len request which can be trimmed to
 971	 * a smaller goal len such that it can still satisfy original
 972	 * request len. However, allocation request for non-regular
 973	 * files never gets normalized.
 974	 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
 975	 */
 976	if (ac->ac_flags & EXT4_MB_HINT_DATA)
 977		*new_cr = CR_BEST_AVAIL_LEN;
 978	else
 979		*new_cr = CR_GOAL_LEN_SLOW;
 980}
 981
 982/*
 983 * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment
 984 * order we have and proactively trim the goal request length to that order to
 985 * find a suitable group faster.
 986 *
 987 * This optimizes allocation speed at the cost of slightly reduced
 988 * preallocations. However, we make sure that we don't trim the request too
 989 * much and fall to CR_GOAL_LEN_SLOW in that case.
 990 */
 991static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
 992		enum criteria *new_cr, ext4_group_t *group)
 993{
 994	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 995	struct ext4_group_info *grp = NULL;
 996	int i, order, min_order;
 997	unsigned long num_stripe_clusters = 0;
 998
 999	if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) {
1000		if (sbi->s_mb_stats)
1001			atomic_inc(&sbi->s_bal_best_avail_bad_suggestions);
1002	}
1003
1004	/*
1005	 * mb_avg_fragment_size_order() returns order in a way that makes
1006	 * retrieving back the length using (1 << order) inaccurate. Hence, use
1007	 * fls() instead since we need to know the actual length while modifying
1008	 * goal length.
1009	 */
1010	order = fls(ac->ac_g_ex.fe_len) - 1;
 
 
1011	min_order = order - sbi->s_mb_best_avail_max_trim_order;
1012	if (min_order < 0)
1013		min_order = 0;
1014
1015	if (sbi->s_stripe > 0) {
1016		/*
1017		 * We are assuming that stripe size is always a multiple of
1018		 * cluster ratio otherwise __ext4_fill_super exists early.
1019		 */
1020		num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe);
1021		if (1 << min_order < num_stripe_clusters)
1022			/*
1023			 * We consider 1 order less because later we round
1024			 * up the goal len to num_stripe_clusters
1025			 */
1026			min_order = fls(num_stripe_clusters) - 1;
1027	}
1028
1029	if (1 << min_order < ac->ac_o_ex.fe_len)
1030		min_order = fls(ac->ac_o_ex.fe_len);
1031
1032	for (i = order; i >= min_order; i--) {
1033		int frag_order;
1034		/*
1035		 * Scale down goal len to make sure we find something
1036		 * in the free fragments list. Basically, reduce
1037		 * preallocations.
1038		 */
1039		ac->ac_g_ex.fe_len = 1 << i;
1040
1041		if (num_stripe_clusters > 0) {
1042			/*
1043			 * Try to round up the adjusted goal length to
1044			 * stripe size (in cluster units) multiple for
1045			 * efficiency.
1046			 */
1047			ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len,
1048						     num_stripe_clusters);
1049		}
1050
1051		frag_order = mb_avg_fragment_size_order(ac->ac_sb,
1052							ac->ac_g_ex.fe_len);
1053
1054		grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
1055		if (grp) {
1056			*group = grp->bb_group;
1057			ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
1058			return;
1059		}
1060	}
1061
1062	/* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
1063	ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
1064	*new_cr = CR_GOAL_LEN_SLOW;
1065}
1066
1067static inline int should_optimize_scan(struct ext4_allocation_context *ac)
1068{
1069	if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
1070		return 0;
1071	if (ac->ac_criteria >= CR_GOAL_LEN_SLOW)
1072		return 0;
1073	if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
1074		return 0;
1075	return 1;
1076}
1077
1078/*
1079 * Return next linear group for allocation. If linear traversal should not be
1080 * performed, this function just returns the same group
1081 */
1082static ext4_group_t
1083next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
1084		  ext4_group_t ngroups)
1085{
1086	if (!should_optimize_scan(ac))
1087		goto inc_and_return;
1088
1089	if (ac->ac_groups_linear_remaining) {
1090		ac->ac_groups_linear_remaining--;
1091		goto inc_and_return;
1092	}
1093
1094	return group;
1095inc_and_return:
1096	/*
1097	 * Artificially restricted ngroups for non-extent
1098	 * files makes group > ngroups possible on first loop.
1099	 */
1100	return group + 1 >= ngroups ? 0 : group + 1;
1101}
1102
1103/*
1104 * ext4_mb_choose_next_group: choose next group for allocation.
1105 *
1106 * @ac        Allocation Context
1107 * @new_cr    This is an output parameter. If the there is no good group
1108 *            available at current CR level, this field is updated to indicate
1109 *            the new cr level that should be used.
1110 * @group     This is an input / output parameter. As an input it indicates the
1111 *            next group that the allocator intends to use for allocation. As
1112 *            output, this field indicates the next group that should be used as
1113 *            determined by the optimization functions.
1114 * @ngroups   Total number of groups
1115 */
1116static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1117		enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1118{
1119	*new_cr = ac->ac_criteria;
1120
1121	if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1122		*group = next_linear_group(ac, *group, ngroups);
 
 
 
 
 
 
 
 
 
 
 
1123		return;
1124	}
1125
1126	if (*new_cr == CR_POWER2_ALIGNED) {
1127		ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group);
1128	} else if (*new_cr == CR_GOAL_LEN_FAST) {
1129		ext4_mb_choose_next_group_goal_fast(ac, new_cr, group);
1130	} else if (*new_cr == CR_BEST_AVAIL_LEN) {
1131		ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
1132	} else {
1133		/*
1134		 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1135		 * bb_free. But until that happens, we should never come here.
 
1136		 */
1137		WARN_ON(1);
1138	}
1139}
1140
1141/*
1142 * Cache the order of the largest free extent we have available in this block
1143 * group.
1144 */
1145static void
1146mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1147{
1148	struct ext4_sb_info *sbi = EXT4_SB(sb);
1149	int i;
1150
1151	for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1152		if (grp->bb_counters[i] > 0)
1153			break;
1154	/* No need to move between order lists? */
1155	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1156	    i == grp->bb_largest_free_order) {
1157		grp->bb_largest_free_order = i;
1158		return;
1159	}
1160
1161	if (grp->bb_largest_free_order >= 0) {
1162		write_lock(&sbi->s_mb_largest_free_orders_locks[
1163					      grp->bb_largest_free_order]);
1164		list_del_init(&grp->bb_largest_free_order_node);
1165		write_unlock(&sbi->s_mb_largest_free_orders_locks[
1166					      grp->bb_largest_free_order]);
1167	}
1168	grp->bb_largest_free_order = i;
1169	if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1170		write_lock(&sbi->s_mb_largest_free_orders_locks[
1171					      grp->bb_largest_free_order]);
1172		list_add_tail(&grp->bb_largest_free_order_node,
1173		      &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1174		write_unlock(&sbi->s_mb_largest_free_orders_locks[
1175					      grp->bb_largest_free_order]);
1176	}
1177}
1178
1179static noinline_for_stack
1180void ext4_mb_generate_buddy(struct super_block *sb,
1181			    void *buddy, void *bitmap, ext4_group_t group,
1182			    struct ext4_group_info *grp)
1183{
1184	struct ext4_sb_info *sbi = EXT4_SB(sb);
1185	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1186	ext4_grpblk_t i = 0;
1187	ext4_grpblk_t first;
1188	ext4_grpblk_t len;
1189	unsigned free = 0;
1190	unsigned fragments = 0;
1191	unsigned long long period = get_cycles();
1192
1193	/* initialize buddy from bitmap which is aggregation
1194	 * of on-disk bitmap and preallocations */
1195	i = mb_find_next_zero_bit(bitmap, max, 0);
1196	grp->bb_first_free = i;
1197	while (i < max) {
1198		fragments++;
1199		first = i;
1200		i = mb_find_next_bit(bitmap, max, i);
1201		len = i - first;
1202		free += len;
1203		if (len > 1)
1204			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1205		else
1206			grp->bb_counters[0]++;
1207		if (i < max)
1208			i = mb_find_next_zero_bit(bitmap, max, i);
1209	}
1210	grp->bb_fragments = fragments;
1211
1212	if (free != grp->bb_free) {
1213		ext4_grp_locked_error(sb, group, 0, 0,
1214				      "block bitmap and bg descriptor "
1215				      "inconsistent: %u vs %u free clusters",
1216				      free, grp->bb_free);
1217		/*
1218		 * If we intend to continue, we consider group descriptor
1219		 * corrupt and update bb_free using bitmap value
1220		 */
1221		grp->bb_free = free;
1222		ext4_mark_group_bitmap_corrupted(sb, group,
1223					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1224	}
1225	mb_set_largest_free_order(sb, grp);
1226	mb_update_avg_fragment_size(sb, grp);
1227
1228	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1229
1230	period = get_cycles() - period;
1231	atomic_inc(&sbi->s_mb_buddies_generated);
1232	atomic64_add(period, &sbi->s_mb_generation_time);
1233}
1234
1235static void mb_regenerate_buddy(struct ext4_buddy *e4b)
1236{
1237	int count;
1238	int order = 1;
1239	void *buddy;
1240
1241	while ((buddy = mb_find_buddy(e4b, order++, &count)))
1242		mb_set_bits(buddy, 0, count);
1243
1244	e4b->bd_info->bb_fragments = 0;
1245	memset(e4b->bd_info->bb_counters, 0,
1246		sizeof(*e4b->bd_info->bb_counters) *
1247		(e4b->bd_sb->s_blocksize_bits + 2));
1248
1249	ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
1250		e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
1251}
1252
1253/* The buddy information is attached the buddy cache inode
1254 * for convenience. The information regarding each group
1255 * is loaded via ext4_mb_load_buddy. The information involve
1256 * block bitmap and buddy information. The information are
1257 * stored in the inode as
1258 *
1259 * {                        page                        }
1260 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1261 *
1262 *
1263 * one block each for bitmap and buddy information.
1264 * So for each group we take up 2 blocks. A page can
1265 * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
1266 * So it can have information regarding groups_per_page which
1267 * is blocks_per_page/2
1268 *
1269 * Locking note:  This routine takes the block group lock of all groups
1270 * for this page; do not hold this lock when calling this routine!
1271 */
1272
1273static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1274{
1275	ext4_group_t ngroups;
1276	unsigned int blocksize;
1277	int blocks_per_page;
1278	int groups_per_page;
1279	int err = 0;
1280	int i;
1281	ext4_group_t first_group, group;
1282	int first_block;
1283	struct super_block *sb;
1284	struct buffer_head *bhs;
1285	struct buffer_head **bh = NULL;
1286	struct inode *inode;
1287	char *data;
1288	char *bitmap;
1289	struct ext4_group_info *grinfo;
1290
1291	inode = page->mapping->host;
1292	sb = inode->i_sb;
1293	ngroups = ext4_get_groups_count(sb);
1294	blocksize = i_blocksize(inode);
1295	blocks_per_page = PAGE_SIZE / blocksize;
1296
1297	mb_debug(sb, "init page %lu\n", page->index);
1298
1299	groups_per_page = blocks_per_page >> 1;
1300	if (groups_per_page == 0)
1301		groups_per_page = 1;
1302
1303	/* allocate buffer_heads to read bitmaps */
1304	if (groups_per_page > 1) {
1305		i = sizeof(struct buffer_head *) * groups_per_page;
1306		bh = kzalloc(i, gfp);
1307		if (bh == NULL)
1308			return -ENOMEM;
1309	} else
1310		bh = &bhs;
1311
1312	first_group = page->index * blocks_per_page / 2;
1313
1314	/* read all groups the page covers into the cache */
1315	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1316		if (group >= ngroups)
1317			break;
1318
1319		grinfo = ext4_get_group_info(sb, group);
1320		if (!grinfo)
1321			continue;
1322		/*
1323		 * If page is uptodate then we came here after online resize
1324		 * which added some new uninitialized group info structs, so
1325		 * we must skip all initialized uptodate buddies on the page,
1326		 * which may be currently in use by an allocating task.
1327		 */
1328		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
 
1329			bh[i] = NULL;
1330			continue;
1331		}
1332		bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1333		if (IS_ERR(bh[i])) {
1334			err = PTR_ERR(bh[i]);
1335			bh[i] = NULL;
1336			goto out;
1337		}
1338		mb_debug(sb, "read bitmap for group %u\n", group);
1339	}
1340
1341	/* wait for I/O completion */
1342	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1343		int err2;
1344
1345		if (!bh[i])
1346			continue;
1347		err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1348		if (!err)
1349			err = err2;
1350	}
1351
1352	first_block = page->index * blocks_per_page;
1353	for (i = 0; i < blocks_per_page; i++) {
1354		group = (first_block + i) >> 1;
1355		if (group >= ngroups)
1356			break;
1357
1358		if (!bh[group - first_group])
1359			/* skip initialized uptodate buddy */
1360			continue;
1361
1362		if (!buffer_verified(bh[group - first_group]))
1363			/* Skip faulty bitmaps */
1364			continue;
1365		err = 0;
1366
1367		/*
1368		 * data carry information regarding this
1369		 * particular group in the format specified
1370		 * above
1371		 *
1372		 */
1373		data = page_address(page) + (i * blocksize);
1374		bitmap = bh[group - first_group]->b_data;
1375
1376		/*
1377		 * We place the buddy block and bitmap block
1378		 * close together
1379		 */
1380		grinfo = ext4_get_group_info(sb, group);
1381		if (!grinfo) {
1382			err = -EFSCORRUPTED;
1383		        goto out;
1384		}
1385		if ((first_block + i) & 1) {
1386			/* this is block of buddy */
1387			BUG_ON(incore == NULL);
1388			mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1389				group, page->index, i * blocksize);
1390			trace_ext4_mb_buddy_bitmap_load(sb, group);
1391			grinfo->bb_fragments = 0;
1392			memset(grinfo->bb_counters, 0,
1393			       sizeof(*grinfo->bb_counters) *
1394			       (MB_NUM_ORDERS(sb)));
1395			/*
1396			 * incore got set to the group block bitmap below
1397			 */
1398			ext4_lock_group(sb, group);
1399			/* init the buddy */
1400			memset(data, 0xff, blocksize);
1401			ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
1402			ext4_unlock_group(sb, group);
1403			incore = NULL;
1404		} else {
1405			/* this is block of bitmap */
1406			BUG_ON(incore != NULL);
1407			mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1408				group, page->index, i * blocksize);
1409			trace_ext4_mb_bitmap_load(sb, group);
1410
1411			/* see comments in ext4_mb_put_pa() */
1412			ext4_lock_group(sb, group);
1413			memcpy(data, bitmap, blocksize);
1414
1415			/* mark all preallocated blks used in in-core bitmap */
1416			ext4_mb_generate_from_pa(sb, data, group);
1417			WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
1418			ext4_unlock_group(sb, group);
1419
1420			/* set incore so that the buddy information can be
1421			 * generated using this
1422			 */
1423			incore = data;
1424		}
1425	}
1426	SetPageUptodate(page);
1427
1428out:
1429	if (bh) {
1430		for (i = 0; i < groups_per_page; i++)
1431			brelse(bh[i]);
1432		if (bh != &bhs)
1433			kfree(bh);
1434	}
1435	return err;
1436}
1437
1438/*
1439 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1440 * on the same buddy page doesn't happen whild holding the buddy page lock.
1441 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1442 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1443 */
1444static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1445		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1446{
1447	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1448	int block, pnum, poff;
1449	int blocks_per_page;
1450	struct page *page;
1451
1452	e4b->bd_buddy_page = NULL;
1453	e4b->bd_bitmap_page = NULL;
1454
1455	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1456	/*
1457	 * the buddy cache inode stores the block bitmap
1458	 * and buddy information in consecutive blocks.
1459	 * So for each group we need two blocks.
1460	 */
1461	block = group * 2;
1462	pnum = block / blocks_per_page;
1463	poff = block % blocks_per_page;
1464	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1465	if (!page)
1466		return -ENOMEM;
1467	BUG_ON(page->mapping != inode->i_mapping);
1468	e4b->bd_bitmap_page = page;
1469	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
 
1470
1471	if (blocks_per_page >= 2) {
1472		/* buddy and bitmap are on the same page */
1473		return 0;
1474	}
1475
1476	/* blocks_per_page == 1, hence we need another page for the buddy */
1477	page = find_or_create_page(inode->i_mapping, block + 1, gfp);
1478	if (!page)
1479		return -ENOMEM;
1480	BUG_ON(page->mapping != inode->i_mapping);
1481	e4b->bd_buddy_page = page;
 
1482	return 0;
1483}
1484
1485static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1486{
1487	if (e4b->bd_bitmap_page) {
1488		unlock_page(e4b->bd_bitmap_page);
1489		put_page(e4b->bd_bitmap_page);
1490	}
1491	if (e4b->bd_buddy_page) {
1492		unlock_page(e4b->bd_buddy_page);
1493		put_page(e4b->bd_buddy_page);
1494	}
1495}
1496
1497/*
1498 * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1499 * block group lock of all groups for this page; do not hold the BG lock when
1500 * calling this routine!
1501 */
1502static noinline_for_stack
1503int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1504{
1505
1506	struct ext4_group_info *this_grp;
1507	struct ext4_buddy e4b;
1508	struct page *page;
1509	int ret = 0;
1510
1511	might_sleep();
1512	mb_debug(sb, "init group %u\n", group);
1513	this_grp = ext4_get_group_info(sb, group);
1514	if (!this_grp)
1515		return -EFSCORRUPTED;
1516
1517	/*
1518	 * This ensures that we don't reinit the buddy cache
1519	 * page which map to the group from which we are already
1520	 * allocating. If we are looking at the buddy cache we would
1521	 * have taken a reference using ext4_mb_load_buddy and that
1522	 * would have pinned buddy page to page cache.
1523	 * The call to ext4_mb_get_buddy_page_lock will mark the
1524	 * page accessed.
1525	 */
1526	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1527	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1528		/*
1529		 * somebody initialized the group
1530		 * return without doing anything
1531		 */
1532		goto err;
1533	}
1534
1535	page = e4b.bd_bitmap_page;
1536	ret = ext4_mb_init_cache(page, NULL, gfp);
1537	if (ret)
1538		goto err;
1539	if (!PageUptodate(page)) {
1540		ret = -EIO;
1541		goto err;
1542	}
1543
1544	if (e4b.bd_buddy_page == NULL) {
1545		/*
1546		 * If both the bitmap and buddy are in
1547		 * the same page we don't need to force
1548		 * init the buddy
1549		 */
1550		ret = 0;
1551		goto err;
1552	}
1553	/* init buddy cache */
1554	page = e4b.bd_buddy_page;
1555	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1556	if (ret)
1557		goto err;
1558	if (!PageUptodate(page)) {
1559		ret = -EIO;
1560		goto err;
1561	}
1562err:
1563	ext4_mb_put_buddy_page_lock(&e4b);
1564	return ret;
1565}
1566
1567/*
1568 * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1569 * block group lock of all groups for this page; do not hold the BG lock when
1570 * calling this routine!
1571 */
1572static noinline_for_stack int
1573ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1574		       struct ext4_buddy *e4b, gfp_t gfp)
1575{
1576	int blocks_per_page;
1577	int block;
1578	int pnum;
1579	int poff;
1580	struct page *page;
1581	int ret;
1582	struct ext4_group_info *grp;
1583	struct ext4_sb_info *sbi = EXT4_SB(sb);
1584	struct inode *inode = sbi->s_buddy_cache;
1585
1586	might_sleep();
1587	mb_debug(sb, "load group %u\n", group);
1588
1589	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1590	grp = ext4_get_group_info(sb, group);
1591	if (!grp)
1592		return -EFSCORRUPTED;
1593
1594	e4b->bd_blkbits = sb->s_blocksize_bits;
1595	e4b->bd_info = grp;
1596	e4b->bd_sb = sb;
1597	e4b->bd_group = group;
1598	e4b->bd_buddy_page = NULL;
1599	e4b->bd_bitmap_page = NULL;
1600
1601	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1602		/*
1603		 * we need full data about the group
1604		 * to make a good selection
1605		 */
1606		ret = ext4_mb_init_group(sb, group, gfp);
1607		if (ret)
1608			return ret;
1609	}
1610
1611	/*
1612	 * the buddy cache inode stores the block bitmap
1613	 * and buddy information in consecutive blocks.
1614	 * So for each group we need two blocks.
1615	 */
1616	block = group * 2;
1617	pnum = block / blocks_per_page;
1618	poff = block % blocks_per_page;
1619
1620	/* we could use find_or_create_page(), but it locks page
1621	 * what we'd like to avoid in fast path ... */
1622	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1623	if (page == NULL || !PageUptodate(page)) {
1624		if (page)
1625			/*
1626			 * drop the page reference and try
1627			 * to get the page with lock. If we
1628			 * are not uptodate that implies
1629			 * somebody just created the page but
1630			 * is yet to initialize the same. So
1631			 * wait for it to initialize.
1632			 */
1633			put_page(page);
1634		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1635		if (page) {
1636			if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
1637	"ext4: bitmap's paging->mapping != inode->i_mapping\n")) {
 
1638				/* should never happen */
1639				unlock_page(page);
1640				ret = -EINVAL;
1641				goto err;
1642			}
1643			if (!PageUptodate(page)) {
1644				ret = ext4_mb_init_cache(page, NULL, gfp);
1645				if (ret) {
1646					unlock_page(page);
1647					goto err;
1648				}
1649				mb_cmp_bitmaps(e4b, page_address(page) +
1650					       (poff * sb->s_blocksize));
1651			}
1652			unlock_page(page);
1653		}
1654	}
1655	if (page == NULL) {
1656		ret = -ENOMEM;
1657		goto err;
1658	}
1659	if (!PageUptodate(page)) {
1660		ret = -EIO;
1661		goto err;
1662	}
1663
1664	/* Pages marked accessed already */
1665	e4b->bd_bitmap_page = page;
1666	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1667
1668	block++;
1669	pnum = block / blocks_per_page;
1670	poff = block % blocks_per_page;
1671
1672	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1673	if (page == NULL || !PageUptodate(page)) {
1674		if (page)
1675			put_page(page);
1676		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1677		if (page) {
1678			if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
1679	"ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) {
 
1680				/* should never happen */
1681				unlock_page(page);
1682				ret = -EINVAL;
1683				goto err;
1684			}
1685			if (!PageUptodate(page)) {
1686				ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1687							 gfp);
1688				if (ret) {
1689					unlock_page(page);
1690					goto err;
1691				}
1692			}
1693			unlock_page(page);
1694		}
1695	}
1696	if (page == NULL) {
1697		ret = -ENOMEM;
1698		goto err;
1699	}
1700	if (!PageUptodate(page)) {
1701		ret = -EIO;
1702		goto err;
1703	}
1704
1705	/* Pages marked accessed already */
1706	e4b->bd_buddy_page = page;
1707	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1708
1709	return 0;
1710
1711err:
1712	if (page)
1713		put_page(page);
1714	if (e4b->bd_bitmap_page)
1715		put_page(e4b->bd_bitmap_page);
1716
1717	e4b->bd_buddy = NULL;
1718	e4b->bd_bitmap = NULL;
1719	return ret;
1720}
1721
1722static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1723			      struct ext4_buddy *e4b)
1724{
1725	return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1726}
1727
1728static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1729{
1730	if (e4b->bd_bitmap_page)
1731		put_page(e4b->bd_bitmap_page);
1732	if (e4b->bd_buddy_page)
1733		put_page(e4b->bd_buddy_page);
1734}
1735
1736
1737static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1738{
1739	int order = 1, max;
1740	void *bb;
1741
1742	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1743	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1744
1745	while (order <= e4b->bd_blkbits + 1) {
1746		bb = mb_find_buddy(e4b, order, &max);
1747		if (!mb_test_bit(block >> order, bb)) {
1748			/* this block is part of buddy of order 'order' */
1749			return order;
1750		}
1751		order++;
1752	}
1753	return 0;
1754}
1755
1756static void mb_clear_bits(void *bm, int cur, int len)
1757{
1758	__u32 *addr;
1759
1760	len = cur + len;
1761	while (cur < len) {
1762		if ((cur & 31) == 0 && (len - cur) >= 32) {
1763			/* fast path: clear whole word at once */
1764			addr = bm + (cur >> 3);
1765			*addr = 0;
1766			cur += 32;
1767			continue;
1768		}
1769		mb_clear_bit(cur, bm);
1770		cur++;
1771	}
1772}
1773
1774/* clear bits in given range
1775 * will return first found zero bit if any, -1 otherwise
1776 */
1777static int mb_test_and_clear_bits(void *bm, int cur, int len)
1778{
1779	__u32 *addr;
1780	int zero_bit = -1;
1781
1782	len = cur + len;
1783	while (cur < len) {
1784		if ((cur & 31) == 0 && (len - cur) >= 32) {
1785			/* fast path: clear whole word at once */
1786			addr = bm + (cur >> 3);
1787			if (*addr != (__u32)(-1) && zero_bit == -1)
1788				zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1789			*addr = 0;
1790			cur += 32;
1791			continue;
1792		}
1793		if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1794			zero_bit = cur;
1795		cur++;
1796	}
1797
1798	return zero_bit;
1799}
1800
1801void mb_set_bits(void *bm, int cur, int len)
1802{
1803	__u32 *addr;
1804
1805	len = cur + len;
1806	while (cur < len) {
1807		if ((cur & 31) == 0 && (len - cur) >= 32) {
1808			/* fast path: set whole word at once */
1809			addr = bm + (cur >> 3);
1810			*addr = 0xffffffff;
1811			cur += 32;
1812			continue;
1813		}
1814		mb_set_bit(cur, bm);
1815		cur++;
1816	}
1817}
1818
1819static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1820{
1821	if (mb_test_bit(*bit + side, bitmap)) {
1822		mb_clear_bit(*bit, bitmap);
1823		(*bit) -= side;
1824		return 1;
1825	}
1826	else {
1827		(*bit) += side;
1828		mb_set_bit(*bit, bitmap);
1829		return -1;
1830	}
1831}
1832
1833static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1834{
1835	int max;
1836	int order = 1;
1837	void *buddy = mb_find_buddy(e4b, order, &max);
1838
1839	while (buddy) {
1840		void *buddy2;
1841
1842		/* Bits in range [first; last] are known to be set since
1843		 * corresponding blocks were allocated. Bits in range
1844		 * (first; last) will stay set because they form buddies on
1845		 * upper layer. We just deal with borders if they don't
1846		 * align with upper layer and then go up.
1847		 * Releasing entire group is all about clearing
1848		 * single bit of highest order buddy.
1849		 */
1850
1851		/* Example:
1852		 * ---------------------------------
1853		 * |   1   |   1   |   1   |   1   |
1854		 * ---------------------------------
1855		 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1856		 * ---------------------------------
1857		 *   0   1   2   3   4   5   6   7
1858		 *      \_____________________/
1859		 *
1860		 * Neither [1] nor [6] is aligned to above layer.
1861		 * Left neighbour [0] is free, so mark it busy,
1862		 * decrease bb_counters and extend range to
1863		 * [0; 6]
1864		 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1865		 * mark [6] free, increase bb_counters and shrink range to
1866		 * [0; 5].
1867		 * Then shift range to [0; 2], go up and do the same.
1868		 */
1869
1870
1871		if (first & 1)
1872			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1873		if (!(last & 1))
1874			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1875		if (first > last)
1876			break;
1877		order++;
1878
1879		buddy2 = mb_find_buddy(e4b, order, &max);
1880		if (!buddy2) {
1881			mb_clear_bits(buddy, first, last - first + 1);
1882			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1883			break;
1884		}
1885		first >>= 1;
1886		last >>= 1;
1887		buddy = buddy2;
1888	}
1889}
1890
1891static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1892			   int first, int count)
1893{
1894	int left_is_free = 0;
1895	int right_is_free = 0;
1896	int block;
1897	int last = first + count - 1;
1898	struct super_block *sb = e4b->bd_sb;
1899
1900	if (WARN_ON(count == 0))
1901		return;
1902	BUG_ON(last >= (sb->s_blocksize << 3));
1903	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1904	/* Don't bother if the block group is corrupt. */
1905	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1906		return;
1907
1908	mb_check_buddy(e4b);
1909	mb_free_blocks_double(inode, e4b, first, count);
1910
1911	/* access memory sequentially: check left neighbour,
1912	 * clear range and then check right neighbour
1913	 */
1914	if (first != 0)
1915		left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1916	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1917	if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1918		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1919
1920	if (unlikely(block != -1)) {
1921		struct ext4_sb_info *sbi = EXT4_SB(sb);
1922		ext4_fsblk_t blocknr;
1923
1924		/*
1925		 * Fastcommit replay can free already freed blocks which
1926		 * corrupts allocation info. Regenerate it.
1927		 */
1928		if (sbi->s_mount_state & EXT4_FC_REPLAY) {
1929			mb_regenerate_buddy(e4b);
1930			goto check;
1931		}
1932
1933		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1934		blocknr += EXT4_C2B(sbi, block);
1935		ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
1936				EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1937		ext4_grp_locked_error(sb, e4b->bd_group,
1938				      inode ? inode->i_ino : 0, blocknr,
1939				      "freeing already freed block (bit %u); block bitmap corrupt.",
1940				      block);
1941		return;
1942	}
1943
1944	this_cpu_inc(discard_pa_seq);
1945	e4b->bd_info->bb_free += count;
1946	if (first < e4b->bd_info->bb_first_free)
1947		e4b->bd_info->bb_first_free = first;
1948
1949	/* let's maintain fragments counter */
1950	if (left_is_free && right_is_free)
1951		e4b->bd_info->bb_fragments--;
1952	else if (!left_is_free && !right_is_free)
1953		e4b->bd_info->bb_fragments++;
1954
1955	/* buddy[0] == bd_bitmap is a special case, so handle
1956	 * it right away and let mb_buddy_mark_free stay free of
1957	 * zero order checks.
1958	 * Check if neighbours are to be coaleasced,
1959	 * adjust bitmap bb_counters and borders appropriately.
1960	 */
1961	if (first & 1) {
1962		first += !left_is_free;
1963		e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1964	}
1965	if (!(last & 1)) {
1966		last -= !right_is_free;
1967		e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1968	}
1969
1970	if (first <= last)
1971		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1972
1973	mb_set_largest_free_order(sb, e4b->bd_info);
1974	mb_update_avg_fragment_size(sb, e4b->bd_info);
1975check:
1976	mb_check_buddy(e4b);
1977}
1978
1979static int mb_find_extent(struct ext4_buddy *e4b, int block,
1980				int needed, struct ext4_free_extent *ex)
1981{
1982	int max, order, next;
1983	void *buddy;
1984
1985	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1986	BUG_ON(ex == NULL);
1987
1988	buddy = mb_find_buddy(e4b, 0, &max);
1989	BUG_ON(buddy == NULL);
1990	BUG_ON(block >= max);
1991	if (mb_test_bit(block, buddy)) {
1992		ex->fe_len = 0;
1993		ex->fe_start = 0;
1994		ex->fe_group = 0;
1995		return 0;
1996	}
1997
1998	/* find actual order */
1999	order = mb_find_order_for_block(e4b, block);
2000
2001	ex->fe_len = (1 << order) - (block & ((1 << order) - 1));
2002	ex->fe_start = block;
2003	ex->fe_group = e4b->bd_group;
2004
2005	block = block >> order;
2006
2007	while (needed > ex->fe_len &&
2008	       mb_find_buddy(e4b, order, &max)) {
2009
2010		if (block + 1 >= max)
2011			break;
2012
2013		next = (block + 1) * (1 << order);
2014		if (mb_test_bit(next, e4b->bd_bitmap))
2015			break;
2016
2017		order = mb_find_order_for_block(e4b, next);
2018
2019		block = next >> order;
2020		ex->fe_len += 1 << order;
2021	}
2022
2023	if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
2024		/* Should never happen! (but apparently sometimes does?!?) */
2025		WARN_ON(1);
2026		ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
2027			"corruption or bug in mb_find_extent "
2028			"block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
2029			block, order, needed, ex->fe_group, ex->fe_start,
2030			ex->fe_len, ex->fe_logical);
2031		ex->fe_len = 0;
2032		ex->fe_start = 0;
2033		ex->fe_group = 0;
2034	}
2035	return ex->fe_len;
2036}
2037
2038static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
2039{
2040	int ord;
2041	int mlen = 0;
2042	int max = 0;
2043	int cur;
2044	int start = ex->fe_start;
2045	int len = ex->fe_len;
2046	unsigned ret = 0;
2047	int len0 = len;
2048	void *buddy;
2049	bool split = false;
2050
2051	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
2052	BUG_ON(e4b->bd_group != ex->fe_group);
2053	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
2054	mb_check_buddy(e4b);
2055	mb_mark_used_double(e4b, start, len);
2056
2057	this_cpu_inc(discard_pa_seq);
2058	e4b->bd_info->bb_free -= len;
2059	if (e4b->bd_info->bb_first_free == start)
2060		e4b->bd_info->bb_first_free += len;
2061
2062	/* let's maintain fragments counter */
2063	if (start != 0)
2064		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
2065	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
2066		max = !mb_test_bit(start + len, e4b->bd_bitmap);
2067	if (mlen && max)
2068		e4b->bd_info->bb_fragments++;
2069	else if (!mlen && !max)
2070		e4b->bd_info->bb_fragments--;
2071
2072	/* let's maintain buddy itself */
2073	while (len) {
2074		if (!split)
2075			ord = mb_find_order_for_block(e4b, start);
2076
2077		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
2078			/* the whole chunk may be allocated at once! */
2079			mlen = 1 << ord;
2080			if (!split)
2081				buddy = mb_find_buddy(e4b, ord, &max);
2082			else
2083				split = false;
2084			BUG_ON((start >> ord) >= max);
2085			mb_set_bit(start >> ord, buddy);
2086			e4b->bd_info->bb_counters[ord]--;
2087			start += mlen;
2088			len -= mlen;
2089			BUG_ON(len < 0);
2090			continue;
2091		}
2092
2093		/* store for history */
2094		if (ret == 0)
2095			ret = len | (ord << 16);
2096
2097		/* we have to split large buddy */
2098		BUG_ON(ord <= 0);
2099		buddy = mb_find_buddy(e4b, ord, &max);
2100		mb_set_bit(start >> ord, buddy);
2101		e4b->bd_info->bb_counters[ord]--;
2102
2103		ord--;
2104		cur = (start >> ord) & ~1U;
2105		buddy = mb_find_buddy(e4b, ord, &max);
2106		mb_clear_bit(cur, buddy);
2107		mb_clear_bit(cur + 1, buddy);
2108		e4b->bd_info->bb_counters[ord]++;
2109		e4b->bd_info->bb_counters[ord]++;
2110		split = true;
 
 
 
 
 
 
 
 
 
 
2111	}
2112	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
2113
2114	mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
2115	mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
2116	mb_check_buddy(e4b);
2117
2118	return ret;
2119}
2120
2121/*
2122 * Must be called under group lock!
2123 */
2124static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2125					struct ext4_buddy *e4b)
2126{
2127	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2128	int ret;
2129
2130	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2131	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2132
2133	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2134	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2135	ret = mb_mark_used(e4b, &ac->ac_b_ex);
2136
2137	/* preallocation can change ac_b_ex, thus we store actually
2138	 * allocated blocks for history */
2139	ac->ac_f_ex = ac->ac_b_ex;
2140
2141	ac->ac_status = AC_STATUS_FOUND;
2142	ac->ac_tail = ret & 0xffff;
2143	ac->ac_buddy = ret >> 16;
2144
2145	/*
2146	 * take the page reference. We want the page to be pinned
2147	 * so that we don't get a ext4_mb_init_cache_call for this
2148	 * group until we update the bitmap. That would mean we
2149	 * double allocate blocks. The reference is dropped
2150	 * in ext4_mb_release_context
2151	 */
2152	ac->ac_bitmap_page = e4b->bd_bitmap_page;
2153	get_page(ac->ac_bitmap_page);
2154	ac->ac_buddy_page = e4b->bd_buddy_page;
2155	get_page(ac->ac_buddy_page);
2156	/* store last allocated for subsequent stream allocation */
2157	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2158		spin_lock(&sbi->s_md_lock);
2159		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2160		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2161		spin_unlock(&sbi->s_md_lock);
2162	}
2163	/*
2164	 * As we've just preallocated more space than
2165	 * user requested originally, we store allocated
2166	 * space in a special descriptor.
2167	 */
2168	if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2169		ext4_mb_new_preallocation(ac);
2170
2171}
2172
2173static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2174					struct ext4_buddy *e4b,
2175					int finish_group)
2176{
2177	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2178	struct ext4_free_extent *bex = &ac->ac_b_ex;
2179	struct ext4_free_extent *gex = &ac->ac_g_ex;
2180
2181	if (ac->ac_status == AC_STATUS_FOUND)
2182		return;
2183	/*
2184	 * We don't want to scan for a whole year
2185	 */
2186	if (ac->ac_found > sbi->s_mb_max_to_scan &&
2187			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2188		ac->ac_status = AC_STATUS_BREAK;
2189		return;
2190	}
2191
2192	/*
2193	 * Haven't found good chunk so far, let's continue
2194	 */
2195	if (bex->fe_len < gex->fe_len)
2196		return;
2197
2198	if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2199		ext4_mb_use_best_found(ac, e4b);
2200}
2201
2202/*
2203 * The routine checks whether found extent is good enough. If it is,
2204 * then the extent gets marked used and flag is set to the context
2205 * to stop scanning. Otherwise, the extent is compared with the
2206 * previous found extent and if new one is better, then it's stored
2207 * in the context. Later, the best found extent will be used, if
2208 * mballoc can't find good enough extent.
2209 *
2210 * The algorithm used is roughly as follows:
2211 *
2212 * * If free extent found is exactly as big as goal, then
2213 *   stop the scan and use it immediately
2214 *
2215 * * If free extent found is smaller than goal, then keep retrying
2216 *   upto a max of sbi->s_mb_max_to_scan times (default 200). After
2217 *   that stop scanning and use whatever we have.
2218 *
2219 * * If free extent found is bigger than goal, then keep retrying
2220 *   upto a max of sbi->s_mb_min_to_scan times (default 10) before
2221 *   stopping the scan and using the extent.
2222 *
2223 *
2224 * FIXME: real allocation policy is to be designed yet!
2225 */
2226static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2227					struct ext4_free_extent *ex,
2228					struct ext4_buddy *e4b)
2229{
2230	struct ext4_free_extent *bex = &ac->ac_b_ex;
2231	struct ext4_free_extent *gex = &ac->ac_g_ex;
2232
2233	BUG_ON(ex->fe_len <= 0);
2234	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2235	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2236	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2237
2238	ac->ac_found++;
2239	ac->ac_cX_found[ac->ac_criteria]++;
2240
2241	/*
2242	 * The special case - take what you catch first
2243	 */
2244	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2245		*bex = *ex;
2246		ext4_mb_use_best_found(ac, e4b);
2247		return;
2248	}
2249
2250	/*
2251	 * Let's check whether the chuck is good enough
2252	 */
2253	if (ex->fe_len == gex->fe_len) {
2254		*bex = *ex;
2255		ext4_mb_use_best_found(ac, e4b);
2256		return;
2257	}
2258
2259	/*
2260	 * If this is first found extent, just store it in the context
2261	 */
2262	if (bex->fe_len == 0) {
2263		*bex = *ex;
2264		return;
2265	}
2266
2267	/*
2268	 * If new found extent is better, store it in the context
2269	 */
2270	if (bex->fe_len < gex->fe_len) {
2271		/* if the request isn't satisfied, any found extent
2272		 * larger than previous best one is better */
2273		if (ex->fe_len > bex->fe_len)
2274			*bex = *ex;
2275	} else if (ex->fe_len > gex->fe_len) {
2276		/* if the request is satisfied, then we try to find
2277		 * an extent that still satisfy the request, but is
2278		 * smaller than previous one */
2279		if (ex->fe_len < bex->fe_len)
2280			*bex = *ex;
2281	}
2282
2283	ext4_mb_check_limits(ac, e4b, 0);
2284}
2285
2286static noinline_for_stack
2287void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2288					struct ext4_buddy *e4b)
2289{
2290	struct ext4_free_extent ex = ac->ac_b_ex;
2291	ext4_group_t group = ex.fe_group;
2292	int max;
2293	int err;
2294
2295	BUG_ON(ex.fe_len <= 0);
2296	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2297	if (err)
2298		return;
2299
2300	ext4_lock_group(ac->ac_sb, group);
2301	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2302		goto out;
2303
2304	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2305
2306	if (max > 0) {
2307		ac->ac_b_ex = ex;
2308		ext4_mb_use_best_found(ac, e4b);
2309	}
2310
2311out:
2312	ext4_unlock_group(ac->ac_sb, group);
2313	ext4_mb_unload_buddy(e4b);
2314}
2315
2316static noinline_for_stack
2317int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2318				struct ext4_buddy *e4b)
2319{
2320	ext4_group_t group = ac->ac_g_ex.fe_group;
2321	int max;
2322	int err;
2323	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2324	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2325	struct ext4_free_extent ex;
2326
2327	if (!grp)
2328		return -EFSCORRUPTED;
2329	if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
2330		return 0;
2331	if (grp->bb_free == 0)
2332		return 0;
2333
2334	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2335	if (err)
2336		return err;
2337
2338	ext4_lock_group(ac->ac_sb, group);
2339	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2340		goto out;
2341
2342	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2343			     ac->ac_g_ex.fe_len, &ex);
2344	ex.fe_logical = 0xDEADFA11; /* debug value */
2345
2346	if (max >= ac->ac_g_ex.fe_len &&
2347	    ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) {
2348		ext4_fsblk_t start;
2349
2350		start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
2351		/* use do_div to get remainder (would be 64-bit modulo) */
2352		if (do_div(start, sbi->s_stripe) == 0) {
2353			ac->ac_found++;
2354			ac->ac_b_ex = ex;
2355			ext4_mb_use_best_found(ac, e4b);
2356		}
2357	} else if (max >= ac->ac_g_ex.fe_len) {
2358		BUG_ON(ex.fe_len <= 0);
2359		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2360		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2361		ac->ac_found++;
2362		ac->ac_b_ex = ex;
2363		ext4_mb_use_best_found(ac, e4b);
2364	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2365		/* Sometimes, caller may want to merge even small
2366		 * number of blocks to an existing extent */
2367		BUG_ON(ex.fe_len <= 0);
2368		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2369		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2370		ac->ac_found++;
2371		ac->ac_b_ex = ex;
2372		ext4_mb_use_best_found(ac, e4b);
2373	}
2374out:
2375	ext4_unlock_group(ac->ac_sb, group);
2376	ext4_mb_unload_buddy(e4b);
2377
2378	return 0;
2379}
2380
2381/*
2382 * The routine scans buddy structures (not bitmap!) from given order
2383 * to max order and tries to find big enough chunk to satisfy the req
2384 */
2385static noinline_for_stack
2386void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2387					struct ext4_buddy *e4b)
2388{
2389	struct super_block *sb = ac->ac_sb;
2390	struct ext4_group_info *grp = e4b->bd_info;
2391	void *buddy;
2392	int i;
2393	int k;
2394	int max;
2395
2396	BUG_ON(ac->ac_2order <= 0);
2397	for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2398		if (grp->bb_counters[i] == 0)
2399			continue;
2400
2401		buddy = mb_find_buddy(e4b, i, &max);
2402		if (WARN_RATELIMIT(buddy == NULL,
2403			 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i))
2404			continue;
2405
2406		k = mb_find_next_zero_bit(buddy, max, 0);
2407		if (k >= max) {
2408			ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2409					e4b->bd_group,
2410					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2411			ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2412				"%d free clusters of order %d. But found 0",
2413				grp->bb_counters[i], i);
2414			break;
2415		}
2416		ac->ac_found++;
2417		ac->ac_cX_found[ac->ac_criteria]++;
2418
2419		ac->ac_b_ex.fe_len = 1 << i;
2420		ac->ac_b_ex.fe_start = k << i;
2421		ac->ac_b_ex.fe_group = e4b->bd_group;
2422
2423		ext4_mb_use_best_found(ac, e4b);
2424
2425		BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2426
2427		if (EXT4_SB(sb)->s_mb_stats)
2428			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2429
2430		break;
2431	}
2432}
2433
2434/*
2435 * The routine scans the group and measures all found extents.
2436 * In order to optimize scanning, caller must pass number of
2437 * free blocks in the group, so the routine can know upper limit.
2438 */
2439static noinline_for_stack
2440void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2441					struct ext4_buddy *e4b)
2442{
2443	struct super_block *sb = ac->ac_sb;
2444	void *bitmap = e4b->bd_bitmap;
2445	struct ext4_free_extent ex;
2446	int i, j, freelen;
2447	int free;
2448
2449	free = e4b->bd_info->bb_free;
2450	if (WARN_ON(free <= 0))
2451		return;
2452
2453	i = e4b->bd_info->bb_first_free;
2454
2455	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2456		i = mb_find_next_zero_bit(bitmap,
2457						EXT4_CLUSTERS_PER_GROUP(sb), i);
2458		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2459			/*
2460			 * IF we have corrupt bitmap, we won't find any
2461			 * free blocks even though group info says we
2462			 * have free blocks
2463			 */
2464			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2465					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2466			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2467					"%d free clusters as per "
2468					"group info. But bitmap says 0",
2469					free);
2470			break;
2471		}
2472
2473		if (!ext4_mb_cr_expensive(ac->ac_criteria)) {
2474			/*
2475			 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are
2476			 * sure that this group will have a large enough
2477			 * continuous free extent, so skip over the smaller free
2478			 * extents
2479			 */
2480			j = mb_find_next_bit(bitmap,
2481						EXT4_CLUSTERS_PER_GROUP(sb), i);
2482			freelen = j - i;
2483
2484			if (freelen < ac->ac_g_ex.fe_len) {
2485				i = j;
2486				free -= freelen;
2487				continue;
2488			}
2489		}
2490
2491		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2492		if (WARN_ON(ex.fe_len <= 0))
2493			break;
2494		if (free < ex.fe_len) {
2495			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2496					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2497			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2498					"%d free clusters as per "
2499					"group info. But got %d blocks",
2500					free, ex.fe_len);
2501			/*
2502			 * The number of free blocks differs. This mostly
2503			 * indicate that the bitmap is corrupt. So exit
2504			 * without claiming the space.
2505			 */
2506			break;
2507		}
2508		ex.fe_logical = 0xDEADC0DE; /* debug value */
2509		ext4_mb_measure_extent(ac, &ex, e4b);
2510
2511		i += ex.fe_len;
2512		free -= ex.fe_len;
2513	}
2514
2515	ext4_mb_check_limits(ac, e4b, 1);
2516}
2517
2518/*
2519 * This is a special case for storages like raid5
2520 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2521 */
2522static noinline_for_stack
2523void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2524				 struct ext4_buddy *e4b)
2525{
2526	struct super_block *sb = ac->ac_sb;
2527	struct ext4_sb_info *sbi = EXT4_SB(sb);
2528	void *bitmap = e4b->bd_bitmap;
2529	struct ext4_free_extent ex;
2530	ext4_fsblk_t first_group_block;
2531	ext4_fsblk_t a;
2532	ext4_grpblk_t i, stripe;
2533	int max;
2534
2535	BUG_ON(sbi->s_stripe == 0);
2536
2537	/* find first stripe-aligned block in group */
2538	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2539
2540	a = first_group_block + sbi->s_stripe - 1;
2541	do_div(a, sbi->s_stripe);
2542	i = (a * sbi->s_stripe) - first_group_block;
2543
2544	stripe = EXT4_B2C(sbi, sbi->s_stripe);
2545	i = EXT4_B2C(sbi, i);
2546	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2547		if (!mb_test_bit(i, bitmap)) {
2548			max = mb_find_extent(e4b, i, stripe, &ex);
2549			if (max >= stripe) {
2550				ac->ac_found++;
2551				ac->ac_cX_found[ac->ac_criteria]++;
2552				ex.fe_logical = 0xDEADF00D; /* debug value */
2553				ac->ac_b_ex = ex;
2554				ext4_mb_use_best_found(ac, e4b);
2555				break;
2556			}
2557		}
2558		i += stripe;
2559	}
2560}
2561
2562/*
2563 * This is also called BEFORE we load the buddy bitmap.
2564 * Returns either 1 or 0 indicating that the group is either suitable
2565 * for the allocation or not.
2566 */
2567static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2568				ext4_group_t group, enum criteria cr)
2569{
2570	ext4_grpblk_t free, fragments;
2571	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2572	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2573
2574	BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
2575
2576	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2577		return false;
2578
2579	free = grp->bb_free;
2580	if (free == 0)
2581		return false;
2582
2583	fragments = grp->bb_fragments;
2584	if (fragments == 0)
2585		return false;
2586
2587	switch (cr) {
2588	case CR_POWER2_ALIGNED:
2589		BUG_ON(ac->ac_2order == 0);
2590
2591		/* Avoid using the first bg of a flexgroup for data files */
2592		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2593		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2594		    ((group % flex_size) == 0))
2595			return false;
2596
2597		if (free < ac->ac_g_ex.fe_len)
2598			return false;
2599
2600		if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2601			return true;
2602
2603		if (grp->bb_largest_free_order < ac->ac_2order)
2604			return false;
2605
2606		return true;
2607	case CR_GOAL_LEN_FAST:
2608	case CR_BEST_AVAIL_LEN:
2609		if ((free / fragments) >= ac->ac_g_ex.fe_len)
2610			return true;
2611		break;
2612	case CR_GOAL_LEN_SLOW:
2613		if (free >= ac->ac_g_ex.fe_len)
2614			return true;
2615		break;
2616	case CR_ANY_FREE:
2617		return true;
2618	default:
2619		BUG();
2620	}
2621
2622	return false;
2623}
2624
2625/*
2626 * This could return negative error code if something goes wrong
2627 * during ext4_mb_init_group(). This should not be called with
2628 * ext4_lock_group() held.
2629 *
2630 * Note: because we are conditionally operating with the group lock in
2631 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2632 * function using __acquire and __release.  This means we need to be
2633 * super careful before messing with the error path handling via "goto
2634 * out"!
2635 */
2636static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2637				     ext4_group_t group, enum criteria cr)
2638{
2639	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2640	struct super_block *sb = ac->ac_sb;
2641	struct ext4_sb_info *sbi = EXT4_SB(sb);
2642	bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2643	ext4_grpblk_t free;
2644	int ret = 0;
2645
2646	if (!grp)
2647		return -EFSCORRUPTED;
2648	if (sbi->s_mb_stats)
2649		atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2650	if (should_lock) {
2651		ext4_lock_group(sb, group);
2652		__release(ext4_group_lock_ptr(sb, group));
2653	}
2654	free = grp->bb_free;
2655	if (free == 0)
2656		goto out;
2657	/*
2658	 * In all criterias except CR_ANY_FREE we try to avoid groups that
2659	 * can't possibly satisfy the full goal request due to insufficient
2660	 * free blocks.
2661	 */
2662	if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len)
2663		goto out;
2664	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2665		goto out;
2666	if (should_lock) {
2667		__acquire(ext4_group_lock_ptr(sb, group));
2668		ext4_unlock_group(sb, group);
2669	}
2670
2671	/* We only do this if the grp has never been initialized */
2672	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2673		struct ext4_group_desc *gdp =
2674			ext4_get_group_desc(sb, group, NULL);
2675		int ret;
2676
2677		/*
2678		 * cr=CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
2679		 * search to find large good chunks almost for free. If buddy
2680		 * data is not ready, then this optimization makes no sense. But
2681		 * we never skip the first block group in a flex_bg, since this
2682		 * gets used for metadata block allocation, and we want to make
2683		 * sure we locate metadata blocks in the first block group in
2684		 * the flex_bg if possible.
2685		 */
2686		if (!ext4_mb_cr_expensive(cr) &&
2687		    (!sbi->s_log_groups_per_flex ||
2688		     ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2689		    !(ext4_has_group_desc_csum(sb) &&
2690		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2691			return 0;
2692		ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2693		if (ret)
2694			return ret;
2695	}
2696
2697	if (should_lock) {
2698		ext4_lock_group(sb, group);
2699		__release(ext4_group_lock_ptr(sb, group));
2700	}
2701	ret = ext4_mb_good_group(ac, group, cr);
2702out:
2703	if (should_lock) {
2704		__acquire(ext4_group_lock_ptr(sb, group));
2705		ext4_unlock_group(sb, group);
2706	}
2707	return ret;
2708}
2709
2710/*
2711 * Start prefetching @nr block bitmaps starting at @group.
2712 * Return the next group which needs to be prefetched.
2713 */
2714ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2715			      unsigned int nr, int *cnt)
2716{
2717	ext4_group_t ngroups = ext4_get_groups_count(sb);
2718	struct buffer_head *bh;
2719	struct blk_plug plug;
2720
2721	blk_start_plug(&plug);
2722	while (nr-- > 0) {
2723		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2724								  NULL);
2725		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2726
2727		/*
2728		 * Prefetch block groups with free blocks; but don't
2729		 * bother if it is marked uninitialized on disk, since
2730		 * it won't require I/O to read.  Also only try to
2731		 * prefetch once, so we avoid getblk() call, which can
2732		 * be expensive.
2733		 */
2734		if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2735		    EXT4_MB_GRP_NEED_INIT(grp) &&
2736		    ext4_free_group_clusters(sb, gdp) > 0 ) {
2737			bh = ext4_read_block_bitmap_nowait(sb, group, true);
2738			if (bh && !IS_ERR(bh)) {
2739				if (!buffer_uptodate(bh) && cnt)
2740					(*cnt)++;
2741				brelse(bh);
2742			}
2743		}
2744		if (++group >= ngroups)
2745			group = 0;
2746	}
2747	blk_finish_plug(&plug);
2748	return group;
2749}
2750
2751/*
2752 * Prefetching reads the block bitmap into the buffer cache; but we
2753 * need to make sure that the buddy bitmap in the page cache has been
2754 * initialized.  Note that ext4_mb_init_group() will block if the I/O
2755 * is not yet completed, or indeed if it was not initiated by
2756 * ext4_mb_prefetch did not start the I/O.
2757 *
2758 * TODO: We should actually kick off the buddy bitmap setup in a work
2759 * queue when the buffer I/O is completed, so that we don't block
2760 * waiting for the block allocation bitmap read to finish when
2761 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2762 */
2763void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2764			   unsigned int nr)
2765{
2766	struct ext4_group_desc *gdp;
2767	struct ext4_group_info *grp;
2768
2769	while (nr-- > 0) {
2770		if (!group)
2771			group = ext4_get_groups_count(sb);
2772		group--;
2773		gdp = ext4_get_group_desc(sb, group, NULL);
2774		grp = ext4_get_group_info(sb, group);
2775
2776		if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2777		    ext4_free_group_clusters(sb, gdp) > 0) {
2778			if (ext4_mb_init_group(sb, group, GFP_NOFS))
2779				break;
2780		}
2781	}
2782}
2783
2784static noinline_for_stack int
2785ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2786{
2787	ext4_group_t prefetch_grp = 0, ngroups, group, i;
2788	enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
2789	int err = 0, first_err = 0;
2790	unsigned int nr = 0, prefetch_ios = 0;
2791	struct ext4_sb_info *sbi;
2792	struct super_block *sb;
2793	struct ext4_buddy e4b;
2794	int lost;
2795
2796	sb = ac->ac_sb;
2797	sbi = EXT4_SB(sb);
2798	ngroups = ext4_get_groups_count(sb);
2799	/* non-extent files are limited to low blocks/groups */
2800	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2801		ngroups = sbi->s_blockfile_groups;
2802
2803	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2804
2805	/* first, try the goal */
2806	err = ext4_mb_find_by_goal(ac, &e4b);
2807	if (err || ac->ac_status == AC_STATUS_FOUND)
2808		goto out;
2809
2810	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2811		goto out;
2812
2813	/*
2814	 * ac->ac_2order is set only if the fe_len is a power of 2
2815	 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED
2816	 * so that we try exact allocation using buddy.
2817	 */
2818	i = fls(ac->ac_g_ex.fe_len);
2819	ac->ac_2order = 0;
2820	/*
2821	 * We search using buddy data only if the order of the request
2822	 * is greater than equal to the sbi_s_mb_order2_reqs
2823	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2824	 * We also support searching for power-of-two requests only for
2825	 * requests upto maximum buddy size we have constructed.
2826	 */
2827	if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2828		if (is_power_of_2(ac->ac_g_ex.fe_len))
2829			ac->ac_2order = array_index_nospec(i - 1,
2830							   MB_NUM_ORDERS(sb));
2831	}
2832
2833	/* if stream allocation is enabled, use global goal */
2834	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2835		/* TBD: may be hot point */
2836		spin_lock(&sbi->s_md_lock);
2837		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2838		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2839		spin_unlock(&sbi->s_md_lock);
2840	}
2841
2842	/*
2843	 * Let's just scan groups to find more-less suitable blocks We
2844	 * start with CR_GOAL_LEN_FAST, unless it is power of 2
2845	 * aligned, in which case let's do that faster approach first.
2846	 */
2847	if (ac->ac_2order)
2848		cr = CR_POWER2_ALIGNED;
2849repeat:
2850	for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2851		ac->ac_criteria = cr;
2852		/*
2853		 * searching for the right group start
2854		 * from the goal value specified
2855		 */
2856		group = ac->ac_g_ex.fe_group;
2857		ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2858		prefetch_grp = group;
 
2859
2860		for (i = 0, new_cr = cr; i < ngroups; i++,
2861		     ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2862			int ret = 0;
2863
2864			cond_resched();
2865			if (new_cr != cr) {
2866				cr = new_cr;
2867				goto repeat;
2868			}
2869
2870			/*
2871			 * Batch reads of the block allocation bitmaps
2872			 * to get multiple READs in flight; limit
2873			 * prefetching at inexpensive CR, otherwise mballoc
2874			 * can spend a lot of time loading imperfect groups
2875			 */
2876			if ((prefetch_grp == group) &&
2877			    (ext4_mb_cr_expensive(cr) ||
2878			     prefetch_ios < sbi->s_mb_prefetch_limit)) {
2879				nr = sbi->s_mb_prefetch;
2880				if (ext4_has_feature_flex_bg(sb)) {
2881					nr = 1 << sbi->s_log_groups_per_flex;
2882					nr -= group & (nr - 1);
2883					nr = min(nr, sbi->s_mb_prefetch);
2884				}
2885				prefetch_grp = ext4_mb_prefetch(sb, group,
2886							nr, &prefetch_ios);
2887			}
2888
2889			/* This now checks without needing the buddy page */
2890			ret = ext4_mb_good_group_nolock(ac, group, cr);
2891			if (ret <= 0) {
2892				if (!first_err)
2893					first_err = ret;
2894				continue;
2895			}
2896
2897			err = ext4_mb_load_buddy(sb, group, &e4b);
2898			if (err)
2899				goto out;
2900
2901			ext4_lock_group(sb, group);
2902
2903			/*
2904			 * We need to check again after locking the
2905			 * block group
2906			 */
2907			ret = ext4_mb_good_group(ac, group, cr);
2908			if (ret == 0) {
2909				ext4_unlock_group(sb, group);
2910				ext4_mb_unload_buddy(&e4b);
2911				continue;
2912			}
2913
2914			ac->ac_groups_scanned++;
2915			if (cr == CR_POWER2_ALIGNED)
2916				ext4_mb_simple_scan_group(ac, &e4b);
2917			else {
2918				bool is_stripe_aligned = sbi->s_stripe &&
 
 
2919					!(ac->ac_g_ex.fe_len %
2920					  EXT4_B2C(sbi, sbi->s_stripe));
2921
2922				if ((cr == CR_GOAL_LEN_FAST ||
2923				     cr == CR_BEST_AVAIL_LEN) &&
2924				    is_stripe_aligned)
2925					ext4_mb_scan_aligned(ac, &e4b);
2926
2927				if (ac->ac_status == AC_STATUS_CONTINUE)
2928					ext4_mb_complex_scan_group(ac, &e4b);
2929			}
2930
2931			ext4_unlock_group(sb, group);
2932			ext4_mb_unload_buddy(&e4b);
2933
2934			if (ac->ac_status != AC_STATUS_CONTINUE)
2935				break;
2936		}
2937		/* Processed all groups and haven't found blocks */
2938		if (sbi->s_mb_stats && i == ngroups)
2939			atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2940
2941		if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN)
2942			/* Reset goal length to original goal length before
2943			 * falling into CR_GOAL_LEN_SLOW */
2944			ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
2945	}
2946
2947	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2948	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2949		/*
2950		 * We've been searching too long. Let's try to allocate
2951		 * the best chunk we've found so far
2952		 */
2953		ext4_mb_try_best_found(ac, &e4b);
2954		if (ac->ac_status != AC_STATUS_FOUND) {
2955			/*
2956			 * Someone more lucky has already allocated it.
2957			 * The only thing we can do is just take first
2958			 * found block(s)
2959			 */
2960			lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2961			mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2962				 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2963				 ac->ac_b_ex.fe_len, lost);
2964
2965			ac->ac_b_ex.fe_group = 0;
2966			ac->ac_b_ex.fe_start = 0;
2967			ac->ac_b_ex.fe_len = 0;
2968			ac->ac_status = AC_STATUS_CONTINUE;
2969			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2970			cr = CR_ANY_FREE;
2971			goto repeat;
2972		}
2973	}
2974
2975	if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2976		atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2977out:
2978	if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2979		err = first_err;
2980
2981	mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2982		 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2983		 ac->ac_flags, cr, err);
2984
2985	if (nr)
2986		ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2987
2988	return err;
2989}
2990
2991static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2992{
2993	struct super_block *sb = pde_data(file_inode(seq->file));
2994	ext4_group_t group;
2995
2996	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2997		return NULL;
2998	group = *pos + 1;
2999	return (void *) ((unsigned long) group);
3000}
3001
3002static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
3003{
3004	struct super_block *sb = pde_data(file_inode(seq->file));
3005	ext4_group_t group;
3006
3007	++*pos;
3008	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
3009		return NULL;
3010	group = *pos + 1;
3011	return (void *) ((unsigned long) group);
3012}
3013
3014static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
3015{
3016	struct super_block *sb = pde_data(file_inode(seq->file));
3017	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
3018	int i, err;
3019	char nbuf[16];
3020	struct ext4_buddy e4b;
3021	struct ext4_group_info *grinfo;
3022	unsigned char blocksize_bits = min_t(unsigned char,
3023					     sb->s_blocksize_bits,
3024					     EXT4_MAX_BLOCK_LOG_SIZE);
3025	struct sg {
3026		struct ext4_group_info info;
3027		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
3028	} sg;
3029
3030	group--;
3031	if (group == 0)
3032		seq_puts(seq, "#group: free  frags first ["
3033			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
3034			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
3035
3036	i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
3037		sizeof(struct ext4_group_info);
3038
3039	grinfo = ext4_get_group_info(sb, group);
3040	if (!grinfo)
3041		return 0;
3042	/* Load the group info in memory only if not already loaded. */
3043	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
3044		err = ext4_mb_load_buddy(sb, group, &e4b);
3045		if (err) {
3046			seq_printf(seq, "#%-5u: %s\n", group, ext4_decode_error(NULL, err, nbuf));
3047			return 0;
3048		}
3049		ext4_mb_unload_buddy(&e4b);
3050	}
3051
3052	/*
3053	 * We care only about free space counters in the group info and
3054	 * these are safe to access even after the buddy has been unloaded
3055	 */
3056	memcpy(&sg, grinfo, i);
3057	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
3058			sg.info.bb_fragments, sg.info.bb_first_free);
3059	for (i = 0; i <= 13; i++)
3060		seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
3061				sg.info.bb_counters[i] : 0);
3062	seq_puts(seq, " ]");
3063	if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
3064		seq_puts(seq, " Block bitmap corrupted!");
3065	seq_puts(seq, "\n");
3066
3067	return 0;
3068}
3069
3070static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
3071{
3072}
3073
3074const struct seq_operations ext4_mb_seq_groups_ops = {
3075	.start  = ext4_mb_seq_groups_start,
3076	.next   = ext4_mb_seq_groups_next,
3077	.stop   = ext4_mb_seq_groups_stop,
3078	.show   = ext4_mb_seq_groups_show,
3079};
3080
3081int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
3082{
3083	struct super_block *sb = seq->private;
3084	struct ext4_sb_info *sbi = EXT4_SB(sb);
3085
3086	seq_puts(seq, "mballoc:\n");
3087	if (!sbi->s_mb_stats) {
3088		seq_puts(seq, "\tmb stats collection turned off.\n");
3089		seq_puts(
3090			seq,
3091			"\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
3092		return 0;
3093	}
3094	seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
3095	seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
3096
3097	seq_printf(seq, "\tgroups_scanned: %u\n",
3098		   atomic_read(&sbi->s_bal_groups_scanned));
3099
3100	/* CR_POWER2_ALIGNED stats */
3101	seq_puts(seq, "\tcr_p2_aligned_stats:\n");
3102	seq_printf(seq, "\t\thits: %llu\n",
3103		   atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED]));
3104	seq_printf(
3105		seq, "\t\tgroups_considered: %llu\n",
3106		atomic64_read(
3107			&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]));
3108	seq_printf(seq, "\t\textents_scanned: %u\n",
3109		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED]));
3110	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3111		   atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED]));
3112	seq_printf(seq, "\t\tbad_suggestions: %u\n",
3113		   atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions));
3114
3115	/* CR_GOAL_LEN_FAST stats */
3116	seq_puts(seq, "\tcr_goal_fast_stats:\n");
3117	seq_printf(seq, "\t\thits: %llu\n",
3118		   atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST]));
3119	seq_printf(seq, "\t\tgroups_considered: %llu\n",
3120		   atomic64_read(
3121			   &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST]));
3122	seq_printf(seq, "\t\textents_scanned: %u\n",
3123		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST]));
3124	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3125		   atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST]));
3126	seq_printf(seq, "\t\tbad_suggestions: %u\n",
3127		   atomic_read(&sbi->s_bal_goal_fast_bad_suggestions));
3128
3129	/* CR_BEST_AVAIL_LEN stats */
3130	seq_puts(seq, "\tcr_best_avail_stats:\n");
3131	seq_printf(seq, "\t\thits: %llu\n",
3132		   atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN]));
3133	seq_printf(
3134		seq, "\t\tgroups_considered: %llu\n",
3135		atomic64_read(
3136			&sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN]));
3137	seq_printf(seq, "\t\textents_scanned: %u\n",
3138		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN]));
3139	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3140		   atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN]));
3141	seq_printf(seq, "\t\tbad_suggestions: %u\n",
3142		   atomic_read(&sbi->s_bal_best_avail_bad_suggestions));
3143
3144	/* CR_GOAL_LEN_SLOW stats */
3145	seq_puts(seq, "\tcr_goal_slow_stats:\n");
3146	seq_printf(seq, "\t\thits: %llu\n",
3147		   atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW]));
3148	seq_printf(seq, "\t\tgroups_considered: %llu\n",
3149		   atomic64_read(
3150			   &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW]));
3151	seq_printf(seq, "\t\textents_scanned: %u\n",
3152		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW]));
3153	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3154		   atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW]));
3155
3156	/* CR_ANY_FREE stats */
3157	seq_puts(seq, "\tcr_any_free_stats:\n");
3158	seq_printf(seq, "\t\thits: %llu\n",
3159		   atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE]));
3160	seq_printf(
3161		seq, "\t\tgroups_considered: %llu\n",
3162		atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE]));
3163	seq_printf(seq, "\t\textents_scanned: %u\n",
3164		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE]));
3165	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3166		   atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE]));
3167
3168	/* Aggregates */
3169	seq_printf(seq, "\textents_scanned: %u\n",
3170		   atomic_read(&sbi->s_bal_ex_scanned));
3171	seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
3172	seq_printf(seq, "\t\tlen_goal_hits: %u\n",
3173		   atomic_read(&sbi->s_bal_len_goals));
3174	seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
3175	seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
3176	seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
3177	seq_printf(seq, "\tbuddies_generated: %u/%u\n",
3178		   atomic_read(&sbi->s_mb_buddies_generated),
3179		   ext4_get_groups_count(sb));
3180	seq_printf(seq, "\tbuddies_time_used: %llu\n",
3181		   atomic64_read(&sbi->s_mb_generation_time));
3182	seq_printf(seq, "\tpreallocated: %u\n",
3183		   atomic_read(&sbi->s_mb_preallocated));
3184	seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded));
3185	return 0;
3186}
3187
3188static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
3189__acquires(&EXT4_SB(sb)->s_mb_rb_lock)
3190{
3191	struct super_block *sb = pde_data(file_inode(seq->file));
3192	unsigned long position;
3193
3194	if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3195		return NULL;
3196	position = *pos + 1;
3197	return (void *) ((unsigned long) position);
3198}
3199
3200static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3201{
3202	struct super_block *sb = pde_data(file_inode(seq->file));
3203	unsigned long position;
3204
3205	++*pos;
3206	if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3207		return NULL;
3208	position = *pos + 1;
3209	return (void *) ((unsigned long) position);
3210}
3211
3212static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3213{
3214	struct super_block *sb = pde_data(file_inode(seq->file));
3215	struct ext4_sb_info *sbi = EXT4_SB(sb);
3216	unsigned long position = ((unsigned long) v);
3217	struct ext4_group_info *grp;
3218	unsigned int count;
3219
3220	position--;
3221	if (position >= MB_NUM_ORDERS(sb)) {
3222		position -= MB_NUM_ORDERS(sb);
3223		if (position == 0)
3224			seq_puts(seq, "avg_fragment_size_lists:\n");
3225
3226		count = 0;
3227		read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
3228		list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
3229				    bb_avg_fragment_size_node)
3230			count++;
3231		read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
3232		seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3233					(unsigned int)position, count);
3234		return 0;
3235	}
3236
3237	if (position == 0) {
3238		seq_printf(seq, "optimize_scan: %d\n",
3239			   test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3240		seq_puts(seq, "max_free_order_lists:\n");
3241	}
3242	count = 0;
3243	read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
3244	list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3245			    bb_largest_free_order_node)
3246		count++;
3247	read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
3248	seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3249		   (unsigned int)position, count);
3250
3251	return 0;
3252}
3253
3254static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3255{
3256}
3257
3258const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3259	.start  = ext4_mb_seq_structs_summary_start,
3260	.next   = ext4_mb_seq_structs_summary_next,
3261	.stop   = ext4_mb_seq_structs_summary_stop,
3262	.show   = ext4_mb_seq_structs_summary_show,
3263};
3264
3265static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3266{
3267	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3268	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3269
3270	BUG_ON(!cachep);
3271	return cachep;
3272}
3273
3274/*
3275 * Allocate the top-level s_group_info array for the specified number
3276 * of groups
3277 */
3278int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3279{
3280	struct ext4_sb_info *sbi = EXT4_SB(sb);
3281	unsigned size;
3282	struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3283
3284	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3285		EXT4_DESC_PER_BLOCK_BITS(sb);
3286	if (size <= sbi->s_group_info_size)
3287		return 0;
3288
3289	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3290	new_groupinfo = kvzalloc(size, GFP_KERNEL);
3291	if (!new_groupinfo) {
3292		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3293		return -ENOMEM;
3294	}
3295	rcu_read_lock();
3296	old_groupinfo = rcu_dereference(sbi->s_group_info);
3297	if (old_groupinfo)
3298		memcpy(new_groupinfo, old_groupinfo,
3299		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3300	rcu_read_unlock();
3301	rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3302	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3303	if (old_groupinfo)
3304		ext4_kvfree_array_rcu(old_groupinfo);
3305	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3306		   sbi->s_group_info_size);
3307	return 0;
3308}
3309
3310/* Create and initialize ext4_group_info data for the given group. */
3311int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3312			  struct ext4_group_desc *desc)
3313{
3314	int i;
3315	int metalen = 0;
3316	int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3317	struct ext4_sb_info *sbi = EXT4_SB(sb);
3318	struct ext4_group_info **meta_group_info;
3319	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3320
3321	/*
3322	 * First check if this group is the first of a reserved block.
3323	 * If it's true, we have to allocate a new table of pointers
3324	 * to ext4_group_info structures
3325	 */
3326	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3327		metalen = sizeof(*meta_group_info) <<
3328			EXT4_DESC_PER_BLOCK_BITS(sb);
3329		meta_group_info = kmalloc(metalen, GFP_NOFS);
3330		if (meta_group_info == NULL) {
3331			ext4_msg(sb, KERN_ERR, "can't allocate mem "
3332				 "for a buddy group");
3333			return -ENOMEM;
3334		}
3335		rcu_read_lock();
3336		rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3337		rcu_read_unlock();
3338	}
3339
3340	meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3341	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3342
3343	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3344	if (meta_group_info[i] == NULL) {
3345		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3346		goto exit_group_info;
3347	}
3348	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3349		&(meta_group_info[i]->bb_state));
3350
3351	/*
3352	 * initialize bb_free to be able to skip
3353	 * empty groups without initialization
3354	 */
3355	if (ext4_has_group_desc_csum(sb) &&
3356	    (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3357		meta_group_info[i]->bb_free =
3358			ext4_free_clusters_after_init(sb, group, desc);
3359	} else {
3360		meta_group_info[i]->bb_free =
3361			ext4_free_group_clusters(sb, desc);
3362	}
3363
3364	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3365	init_rwsem(&meta_group_info[i]->alloc_sem);
3366	meta_group_info[i]->bb_free_root = RB_ROOT;
3367	INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3368	INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3369	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
3370	meta_group_info[i]->bb_avg_fragment_size_order = -1;  /* uninit */
3371	meta_group_info[i]->bb_group = group;
3372
3373	mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3374	return 0;
3375
3376exit_group_info:
3377	/* If a meta_group_info table has been allocated, release it now */
3378	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3379		struct ext4_group_info ***group_info;
3380
3381		rcu_read_lock();
3382		group_info = rcu_dereference(sbi->s_group_info);
3383		kfree(group_info[idx]);
3384		group_info[idx] = NULL;
3385		rcu_read_unlock();
3386	}
3387	return -ENOMEM;
3388} /* ext4_mb_add_groupinfo */
3389
3390static int ext4_mb_init_backend(struct super_block *sb)
3391{
3392	ext4_group_t ngroups = ext4_get_groups_count(sb);
3393	ext4_group_t i;
3394	struct ext4_sb_info *sbi = EXT4_SB(sb);
3395	int err;
3396	struct ext4_group_desc *desc;
3397	struct ext4_group_info ***group_info;
3398	struct kmem_cache *cachep;
3399
3400	err = ext4_mb_alloc_groupinfo(sb, ngroups);
3401	if (err)
3402		return err;
3403
3404	sbi->s_buddy_cache = new_inode(sb);
3405	if (sbi->s_buddy_cache == NULL) {
3406		ext4_msg(sb, KERN_ERR, "can't get new inode");
3407		goto err_freesgi;
3408	}
3409	/* To avoid potentially colliding with an valid on-disk inode number,
3410	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
3411	 * not in the inode hash, so it should never be found by iget(), but
3412	 * this will avoid confusion if it ever shows up during debugging. */
3413	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3414	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3415	for (i = 0; i < ngroups; i++) {
3416		cond_resched();
3417		desc = ext4_get_group_desc(sb, i, NULL);
3418		if (desc == NULL) {
3419			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3420			goto err_freebuddy;
3421		}
3422		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3423			goto err_freebuddy;
3424	}
3425
3426	if (ext4_has_feature_flex_bg(sb)) {
3427		/* a single flex group is supposed to be read by a single IO.
3428		 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3429		 * unsigned integer, so the maximum shift is 32.
3430		 */
3431		if (sbi->s_es->s_log_groups_per_flex >= 32) {
3432			ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3433			goto err_freebuddy;
3434		}
3435		sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3436			BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3437		sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3438	} else {
3439		sbi->s_mb_prefetch = 32;
3440	}
3441	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3442		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3443	/* now many real IOs to prefetch within a single allocation at cr=0
3444	 * given cr=0 is an CPU-related optimization we shouldn't try to
3445	 * load too many groups, at some point we should start to use what
3446	 * we've got in memory.
 
3447	 * with an average random access time 5ms, it'd take a second to get
3448	 * 200 groups (* N with flex_bg), so let's make this limit 4
3449	 */
3450	sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3451	if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3452		sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3453
3454	return 0;
3455
3456err_freebuddy:
3457	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3458	while (i-- > 0) {
3459		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3460
3461		if (grp)
3462			kmem_cache_free(cachep, grp);
3463	}
3464	i = sbi->s_group_info_size;
3465	rcu_read_lock();
3466	group_info = rcu_dereference(sbi->s_group_info);
3467	while (i-- > 0)
3468		kfree(group_info[i]);
3469	rcu_read_unlock();
3470	iput(sbi->s_buddy_cache);
3471err_freesgi:
3472	rcu_read_lock();
3473	kvfree(rcu_dereference(sbi->s_group_info));
3474	rcu_read_unlock();
3475	return -ENOMEM;
3476}
3477
3478static void ext4_groupinfo_destroy_slabs(void)
3479{
3480	int i;
3481
3482	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3483		kmem_cache_destroy(ext4_groupinfo_caches[i]);
3484		ext4_groupinfo_caches[i] = NULL;
3485	}
3486}
3487
3488static int ext4_groupinfo_create_slab(size_t size)
3489{
3490	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3491	int slab_size;
3492	int blocksize_bits = order_base_2(size);
3493	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3494	struct kmem_cache *cachep;
3495
3496	if (cache_index >= NR_GRPINFO_CACHES)
3497		return -EINVAL;
3498
3499	if (unlikely(cache_index < 0))
3500		cache_index = 0;
3501
3502	mutex_lock(&ext4_grpinfo_slab_create_mutex);
3503	if (ext4_groupinfo_caches[cache_index]) {
3504		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3505		return 0;	/* Already created */
3506	}
3507
3508	slab_size = offsetof(struct ext4_group_info,
3509				bb_counters[blocksize_bits + 2]);
3510
3511	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3512					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3513					NULL);
3514
3515	ext4_groupinfo_caches[cache_index] = cachep;
3516
3517	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3518	if (!cachep) {
3519		printk(KERN_EMERG
3520		       "EXT4-fs: no memory for groupinfo slab cache\n");
3521		return -ENOMEM;
3522	}
3523
3524	return 0;
3525}
3526
3527static void ext4_discard_work(struct work_struct *work)
3528{
3529	struct ext4_sb_info *sbi = container_of(work,
3530			struct ext4_sb_info, s_discard_work);
3531	struct super_block *sb = sbi->s_sb;
3532	struct ext4_free_data *fd, *nfd;
3533	struct ext4_buddy e4b;
3534	LIST_HEAD(discard_list);
3535	ext4_group_t grp, load_grp;
3536	int err = 0;
3537
3538	spin_lock(&sbi->s_md_lock);
3539	list_splice_init(&sbi->s_discard_list, &discard_list);
3540	spin_unlock(&sbi->s_md_lock);
3541
3542	load_grp = UINT_MAX;
3543	list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3544		/*
3545		 * If filesystem is umounting or no memory or suffering
3546		 * from no space, give up the discard
3547		 */
3548		if ((sb->s_flags & SB_ACTIVE) && !err &&
3549		    !atomic_read(&sbi->s_retry_alloc_pending)) {
3550			grp = fd->efd_group;
3551			if (grp != load_grp) {
3552				if (load_grp != UINT_MAX)
3553					ext4_mb_unload_buddy(&e4b);
3554
3555				err = ext4_mb_load_buddy(sb, grp, &e4b);
3556				if (err) {
3557					kmem_cache_free(ext4_free_data_cachep, fd);
3558					load_grp = UINT_MAX;
3559					continue;
3560				} else {
3561					load_grp = grp;
3562				}
3563			}
3564
3565			ext4_lock_group(sb, grp);
3566			ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3567						fd->efd_start_cluster + fd->efd_count - 1, 1);
3568			ext4_unlock_group(sb, grp);
3569		}
3570		kmem_cache_free(ext4_free_data_cachep, fd);
3571	}
3572
3573	if (load_grp != UINT_MAX)
3574		ext4_mb_unload_buddy(&e4b);
3575}
3576
3577int ext4_mb_init(struct super_block *sb)
3578{
3579	struct ext4_sb_info *sbi = EXT4_SB(sb);
3580	unsigned i, j;
3581	unsigned offset, offset_incr;
3582	unsigned max;
3583	int ret;
3584
3585	i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3586
3587	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3588	if (sbi->s_mb_offsets == NULL) {
3589		ret = -ENOMEM;
3590		goto out;
3591	}
3592
3593	i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3594	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3595	if (sbi->s_mb_maxs == NULL) {
3596		ret = -ENOMEM;
3597		goto out;
3598	}
3599
3600	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3601	if (ret < 0)
3602		goto out;
3603
3604	/* order 0 is regular bitmap */
3605	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3606	sbi->s_mb_offsets[0] = 0;
3607
3608	i = 1;
3609	offset = 0;
3610	offset_incr = 1 << (sb->s_blocksize_bits - 1);
3611	max = sb->s_blocksize << 2;
3612	do {
3613		sbi->s_mb_offsets[i] = offset;
3614		sbi->s_mb_maxs[i] = max;
3615		offset += offset_incr;
3616		offset_incr = offset_incr >> 1;
3617		max = max >> 1;
3618		i++;
3619	} while (i < MB_NUM_ORDERS(sb));
3620
3621	sbi->s_mb_avg_fragment_size =
3622		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3623			GFP_KERNEL);
3624	if (!sbi->s_mb_avg_fragment_size) {
3625		ret = -ENOMEM;
3626		goto out;
3627	}
3628	sbi->s_mb_avg_fragment_size_locks =
3629		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3630			GFP_KERNEL);
3631	if (!sbi->s_mb_avg_fragment_size_locks) {
3632		ret = -ENOMEM;
3633		goto out;
3634	}
3635	for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3636		INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3637		rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3638	}
3639	sbi->s_mb_largest_free_orders =
3640		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3641			GFP_KERNEL);
3642	if (!sbi->s_mb_largest_free_orders) {
3643		ret = -ENOMEM;
3644		goto out;
3645	}
3646	sbi->s_mb_largest_free_orders_locks =
3647		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3648			GFP_KERNEL);
3649	if (!sbi->s_mb_largest_free_orders_locks) {
3650		ret = -ENOMEM;
3651		goto out;
3652	}
3653	for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3654		INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3655		rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3656	}
3657
3658	spin_lock_init(&sbi->s_md_lock);
3659	sbi->s_mb_free_pending = 0;
3660	INIT_LIST_HEAD(&sbi->s_freed_data_list[0]);
3661	INIT_LIST_HEAD(&sbi->s_freed_data_list[1]);
3662	INIT_LIST_HEAD(&sbi->s_discard_list);
3663	INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3664	atomic_set(&sbi->s_retry_alloc_pending, 0);
3665
3666	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3667	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3668	sbi->s_mb_stats = MB_DEFAULT_STATS;
3669	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3670	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3671	sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER;
3672
3673	/*
3674	 * The default group preallocation is 512, which for 4k block
3675	 * sizes translates to 2 megabytes.  However for bigalloc file
3676	 * systems, this is probably too big (i.e, if the cluster size
3677	 * is 1 megabyte, then group preallocation size becomes half a
3678	 * gigabyte!).  As a default, we will keep a two megabyte
3679	 * group pralloc size for cluster sizes up to 64k, and after
3680	 * that, we will force a minimum group preallocation size of
3681	 * 32 clusters.  This translates to 8 megs when the cluster
3682	 * size is 256k, and 32 megs when the cluster size is 1 meg,
3683	 * which seems reasonable as a default.
3684	 */
3685	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3686				       sbi->s_cluster_bits, 32);
3687	/*
3688	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3689	 * to the lowest multiple of s_stripe which is bigger than
3690	 * the s_mb_group_prealloc as determined above. We want
3691	 * the preallocation size to be an exact multiple of the
3692	 * RAID stripe size so that preallocations don't fragment
3693	 * the stripes.
3694	 */
3695	if (sbi->s_stripe > 1) {
3696		sbi->s_mb_group_prealloc = roundup(
3697			sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe));
3698	}
3699
3700	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3701	if (sbi->s_locality_groups == NULL) {
3702		ret = -ENOMEM;
3703		goto out;
3704	}
3705	for_each_possible_cpu(i) {
3706		struct ext4_locality_group *lg;
3707		lg = per_cpu_ptr(sbi->s_locality_groups, i);
3708		mutex_init(&lg->lg_mutex);
3709		for (j = 0; j < PREALLOC_TB_SIZE; j++)
3710			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3711		spin_lock_init(&lg->lg_prealloc_lock);
3712	}
3713
3714	if (bdev_nonrot(sb->s_bdev))
3715		sbi->s_mb_max_linear_groups = 0;
3716	else
3717		sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3718	/* init file for buddy data */
3719	ret = ext4_mb_init_backend(sb);
3720	if (ret != 0)
3721		goto out_free_locality_groups;
3722
3723	return 0;
3724
3725out_free_locality_groups:
3726	free_percpu(sbi->s_locality_groups);
3727	sbi->s_locality_groups = NULL;
3728out:
3729	kfree(sbi->s_mb_avg_fragment_size);
3730	kfree(sbi->s_mb_avg_fragment_size_locks);
3731	kfree(sbi->s_mb_largest_free_orders);
3732	kfree(sbi->s_mb_largest_free_orders_locks);
3733	kfree(sbi->s_mb_offsets);
3734	sbi->s_mb_offsets = NULL;
3735	kfree(sbi->s_mb_maxs);
3736	sbi->s_mb_maxs = NULL;
3737	return ret;
3738}
3739
3740/* need to called with the ext4 group lock held */
3741static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3742{
3743	struct ext4_prealloc_space *pa;
3744	struct list_head *cur, *tmp;
3745	int count = 0;
3746
3747	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3748		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3749		list_del(&pa->pa_group_list);
3750		count++;
3751		kmem_cache_free(ext4_pspace_cachep, pa);
3752	}
3753	return count;
3754}
3755
3756void ext4_mb_release(struct super_block *sb)
3757{
3758	ext4_group_t ngroups = ext4_get_groups_count(sb);
3759	ext4_group_t i;
3760	int num_meta_group_infos;
3761	struct ext4_group_info *grinfo, ***group_info;
3762	struct ext4_sb_info *sbi = EXT4_SB(sb);
3763	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3764	int count;
3765
3766	if (test_opt(sb, DISCARD)) {
3767		/*
3768		 * wait the discard work to drain all of ext4_free_data
3769		 */
3770		flush_work(&sbi->s_discard_work);
3771		WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3772	}
3773
3774	if (sbi->s_group_info) {
3775		for (i = 0; i < ngroups; i++) {
3776			cond_resched();
3777			grinfo = ext4_get_group_info(sb, i);
3778			if (!grinfo)
3779				continue;
3780			mb_group_bb_bitmap_free(grinfo);
3781			ext4_lock_group(sb, i);
3782			count = ext4_mb_cleanup_pa(grinfo);
3783			if (count)
3784				mb_debug(sb, "mballoc: %d PAs left\n",
3785					 count);
3786			ext4_unlock_group(sb, i);
3787			kmem_cache_free(cachep, grinfo);
3788		}
3789		num_meta_group_infos = (ngroups +
3790				EXT4_DESC_PER_BLOCK(sb) - 1) >>
3791			EXT4_DESC_PER_BLOCK_BITS(sb);
3792		rcu_read_lock();
3793		group_info = rcu_dereference(sbi->s_group_info);
3794		for (i = 0; i < num_meta_group_infos; i++)
3795			kfree(group_info[i]);
3796		kvfree(group_info);
3797		rcu_read_unlock();
3798	}
3799	kfree(sbi->s_mb_avg_fragment_size);
3800	kfree(sbi->s_mb_avg_fragment_size_locks);
3801	kfree(sbi->s_mb_largest_free_orders);
3802	kfree(sbi->s_mb_largest_free_orders_locks);
3803	kfree(sbi->s_mb_offsets);
3804	kfree(sbi->s_mb_maxs);
3805	iput(sbi->s_buddy_cache);
3806	if (sbi->s_mb_stats) {
3807		ext4_msg(sb, KERN_INFO,
3808		       "mballoc: %u blocks %u reqs (%u success)",
3809				atomic_read(&sbi->s_bal_allocated),
3810				atomic_read(&sbi->s_bal_reqs),
3811				atomic_read(&sbi->s_bal_success));
3812		ext4_msg(sb, KERN_INFO,
3813		      "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3814				"%u 2^N hits, %u breaks, %u lost",
3815				atomic_read(&sbi->s_bal_ex_scanned),
3816				atomic_read(&sbi->s_bal_groups_scanned),
3817				atomic_read(&sbi->s_bal_goals),
3818				atomic_read(&sbi->s_bal_2orders),
3819				atomic_read(&sbi->s_bal_breaks),
3820				atomic_read(&sbi->s_mb_lost_chunks));
3821		ext4_msg(sb, KERN_INFO,
3822		       "mballoc: %u generated and it took %llu",
3823				atomic_read(&sbi->s_mb_buddies_generated),
3824				atomic64_read(&sbi->s_mb_generation_time));
3825		ext4_msg(sb, KERN_INFO,
3826		       "mballoc: %u preallocated, %u discarded",
3827				atomic_read(&sbi->s_mb_preallocated),
3828				atomic_read(&sbi->s_mb_discarded));
3829	}
3830
3831	free_percpu(sbi->s_locality_groups);
3832}
3833
3834static inline int ext4_issue_discard(struct super_block *sb,
3835		ext4_group_t block_group, ext4_grpblk_t cluster, int count)
3836{
3837	ext4_fsblk_t discard_block;
3838
3839	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3840			 ext4_group_first_block_no(sb, block_group));
3841	count = EXT4_C2B(EXT4_SB(sb), count);
3842	trace_ext4_discard_blocks(sb,
3843			(unsigned long long) discard_block, count);
3844
3845	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3846}
3847
3848static void ext4_free_data_in_buddy(struct super_block *sb,
3849				    struct ext4_free_data *entry)
3850{
3851	struct ext4_buddy e4b;
3852	struct ext4_group_info *db;
3853	int err, count = 0;
3854
3855	mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3856		 entry->efd_count, entry->efd_group, entry);
3857
3858	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3859	/* we expect to find existing buddy because it's pinned */
3860	BUG_ON(err != 0);
3861
3862	spin_lock(&EXT4_SB(sb)->s_md_lock);
3863	EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3864	spin_unlock(&EXT4_SB(sb)->s_md_lock);
3865
3866	db = e4b.bd_info;
3867	/* there are blocks to put in buddy to make them really free */
3868	count += entry->efd_count;
3869	ext4_lock_group(sb, entry->efd_group);
3870	/* Take it out of per group rb tree */
3871	rb_erase(&entry->efd_node, &(db->bb_free_root));
3872	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3873
3874	/*
3875	 * Clear the trimmed flag for the group so that the next
3876	 * ext4_trim_fs can trim it.
3877	 * If the volume is mounted with -o discard, online discard
3878	 * is supported and the free blocks will be trimmed online.
3879	 */
3880	if (!test_opt(sb, DISCARD))
3881		EXT4_MB_GRP_CLEAR_TRIMMED(db);
3882
3883	if (!db->bb_free_root.rb_node) {
3884		/* No more items in the per group rb tree
3885		 * balance refcounts from ext4_mb_free_metadata()
3886		 */
3887		put_page(e4b.bd_buddy_page);
3888		put_page(e4b.bd_bitmap_page);
3889	}
3890	ext4_unlock_group(sb, entry->efd_group);
3891	ext4_mb_unload_buddy(&e4b);
3892
3893	mb_debug(sb, "freed %d blocks in 1 structures\n", count);
3894}
3895
3896/*
3897 * This function is called by the jbd2 layer once the commit has finished,
3898 * so we know we can free the blocks that were released with that commit.
3899 */
3900void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3901{
3902	struct ext4_sb_info *sbi = EXT4_SB(sb);
3903	struct ext4_free_data *entry, *tmp;
3904	LIST_HEAD(freed_data_list);
3905	struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1];
3906	bool wake;
3907
3908	list_replace_init(s_freed_head, &freed_data_list);
3909
3910	list_for_each_entry(entry, &freed_data_list, efd_list)
3911		ext4_free_data_in_buddy(sb, entry);
3912
3913	if (test_opt(sb, DISCARD)) {
3914		spin_lock(&sbi->s_md_lock);
3915		wake = list_empty(&sbi->s_discard_list);
3916		list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3917		spin_unlock(&sbi->s_md_lock);
3918		if (wake)
3919			queue_work(system_unbound_wq, &sbi->s_discard_work);
3920	} else {
3921		list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3922			kmem_cache_free(ext4_free_data_cachep, entry);
3923	}
3924}
3925
3926int __init ext4_init_mballoc(void)
3927{
3928	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3929					SLAB_RECLAIM_ACCOUNT);
3930	if (ext4_pspace_cachep == NULL)
3931		goto out;
3932
3933	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3934				    SLAB_RECLAIM_ACCOUNT);
3935	if (ext4_ac_cachep == NULL)
3936		goto out_pa_free;
3937
3938	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3939					   SLAB_RECLAIM_ACCOUNT);
3940	if (ext4_free_data_cachep == NULL)
3941		goto out_ac_free;
3942
3943	return 0;
3944
3945out_ac_free:
3946	kmem_cache_destroy(ext4_ac_cachep);
3947out_pa_free:
3948	kmem_cache_destroy(ext4_pspace_cachep);
3949out:
3950	return -ENOMEM;
3951}
3952
3953void ext4_exit_mballoc(void)
3954{
3955	/*
3956	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3957	 * before destroying the slab cache.
3958	 */
3959	rcu_barrier();
3960	kmem_cache_destroy(ext4_pspace_cachep);
3961	kmem_cache_destroy(ext4_ac_cachep);
3962	kmem_cache_destroy(ext4_free_data_cachep);
3963	ext4_groupinfo_destroy_slabs();
3964}
3965
3966#define EXT4_MB_BITMAP_MARKED_CHECK 0x0001
3967#define EXT4_MB_SYNC_UPDATE 0x0002
3968static int
3969ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state,
3970		     ext4_group_t group, ext4_grpblk_t blkoff,
3971		     ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed)
3972{
3973	struct ext4_sb_info *sbi = EXT4_SB(sb);
3974	struct buffer_head *bitmap_bh = NULL;
3975	struct ext4_group_desc *gdp;
3976	struct buffer_head *gdp_bh;
3977	int err;
3978	unsigned int i, already, changed = len;
3979
3980	KUNIT_STATIC_STUB_REDIRECT(ext4_mb_mark_context,
3981				   handle, sb, state, group, blkoff, len,
3982				   flags, ret_changed);
3983
3984	if (ret_changed)
3985		*ret_changed = 0;
3986	bitmap_bh = ext4_read_block_bitmap(sb, group);
3987	if (IS_ERR(bitmap_bh))
3988		return PTR_ERR(bitmap_bh);
3989
3990	if (handle) {
3991		BUFFER_TRACE(bitmap_bh, "getting write access");
3992		err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3993						    EXT4_JTR_NONE);
3994		if (err)
3995			goto out_err;
3996	}
3997
3998	err = -EIO;
3999	gdp = ext4_get_group_desc(sb, group, &gdp_bh);
4000	if (!gdp)
4001		goto out_err;
4002
4003	if (handle) {
4004		BUFFER_TRACE(gdp_bh, "get_write_access");
4005		err = ext4_journal_get_write_access(handle, sb, gdp_bh,
4006						    EXT4_JTR_NONE);
4007		if (err)
4008			goto out_err;
4009	}
4010
4011	ext4_lock_group(sb, group);
4012	if (ext4_has_group_desc_csum(sb) &&
4013	    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
4014		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
4015		ext4_free_group_clusters_set(sb, gdp,
4016			ext4_free_clusters_after_init(sb, group, gdp));
4017	}
4018
4019	if (flags & EXT4_MB_BITMAP_MARKED_CHECK) {
4020		already = 0;
4021		for (i = 0; i < len; i++)
4022			if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
4023					state)
4024				already++;
4025		changed = len - already;
4026	}
4027
4028	if (state) {
4029		mb_set_bits(bitmap_bh->b_data, blkoff, len);
4030		ext4_free_group_clusters_set(sb, gdp,
4031			ext4_free_group_clusters(sb, gdp) - changed);
4032	} else {
4033		mb_clear_bits(bitmap_bh->b_data, blkoff, len);
4034		ext4_free_group_clusters_set(sb, gdp,
4035			ext4_free_group_clusters(sb, gdp) + changed);
4036	}
4037
4038	ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
4039	ext4_group_desc_csum_set(sb, group, gdp);
4040	ext4_unlock_group(sb, group);
4041	if (ret_changed)
4042		*ret_changed = changed;
4043
4044	if (sbi->s_log_groups_per_flex) {
4045		ext4_group_t flex_group = ext4_flex_group(sbi, group);
4046		struct flex_groups *fg = sbi_array_rcu_deref(sbi,
4047					   s_flex_groups, flex_group);
4048
4049		if (state)
4050			atomic64_sub(changed, &fg->free_clusters);
4051		else
4052			atomic64_add(changed, &fg->free_clusters);
4053	}
4054
4055	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4056	if (err)
4057		goto out_err;
4058	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
4059	if (err)
4060		goto out_err;
4061
4062	if (flags & EXT4_MB_SYNC_UPDATE) {
4063		sync_dirty_buffer(bitmap_bh);
4064		sync_dirty_buffer(gdp_bh);
4065	}
4066
4067out_err:
4068	brelse(bitmap_bh);
4069	return err;
4070}
4071
4072/*
4073 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
4074 * Returns 0 if success or error code
4075 */
4076static noinline_for_stack int
4077ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
4078				handle_t *handle, unsigned int reserv_clstrs)
4079{
4080	struct ext4_group_desc *gdp;
4081	struct ext4_sb_info *sbi;
4082	struct super_block *sb;
4083	ext4_fsblk_t block;
4084	int err, len;
4085	int flags = 0;
4086	ext4_grpblk_t changed;
4087
4088	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4089	BUG_ON(ac->ac_b_ex.fe_len <= 0);
4090
4091	sb = ac->ac_sb;
4092	sbi = EXT4_SB(sb);
4093
4094	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL);
4095	if (!gdp)
4096		return -EIO;
4097	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
4098			ext4_free_group_clusters(sb, gdp));
4099
4100	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4101	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4102	if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
4103		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
4104			   "fs metadata", block, block+len);
4105		/* File system mounted not to panic on error
4106		 * Fix the bitmap and return EFSCORRUPTED
4107		 * We leak some of the blocks here.
4108		 */
4109		err = ext4_mb_mark_context(handle, sb, true,
4110					   ac->ac_b_ex.fe_group,
4111					   ac->ac_b_ex.fe_start,
4112					   ac->ac_b_ex.fe_len,
4113					   0, NULL);
4114		if (!err)
4115			err = -EFSCORRUPTED;
4116		return err;
4117	}
4118
4119#ifdef AGGRESSIVE_CHECK
4120	flags |= EXT4_MB_BITMAP_MARKED_CHECK;
4121#endif
4122	err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group,
4123				   ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len,
4124				   flags, &changed);
4125
4126	if (err && changed == 0)
4127		return err;
4128
4129#ifdef AGGRESSIVE_CHECK
4130	BUG_ON(changed != ac->ac_b_ex.fe_len);
4131#endif
4132	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
4133	/*
4134	 * Now reduce the dirty block count also. Should not go negative
4135	 */
4136	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
4137		/* release all the reserved blocks if non delalloc */
4138		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4139				   reserv_clstrs);
4140
4141	return err;
4142}
4143
4144/*
4145 * Idempotent helper for Ext4 fast commit replay path to set the state of
4146 * blocks in bitmaps and update counters.
4147 */
4148void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
4149		     int len, bool state)
4150{
4151	struct ext4_sb_info *sbi = EXT4_SB(sb);
4152	ext4_group_t group;
4153	ext4_grpblk_t blkoff;
4154	int err = 0;
4155	unsigned int clen, thisgrp_len;
4156
4157	while (len > 0) {
4158		ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
4159
4160		/*
4161		 * Check to see if we are freeing blocks across a group
4162		 * boundary.
4163		 * In case of flex_bg, this can happen that (block, len) may
4164		 * span across more than one group. In that case we need to
4165		 * get the corresponding group metadata to work with.
4166		 * For this we have goto again loop.
4167		 */
4168		thisgrp_len = min_t(unsigned int, (unsigned int)len,
4169			EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
4170		clen = EXT4_NUM_B2C(sbi, thisgrp_len);
4171
4172		if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
4173			ext4_error(sb, "Marking blocks in system zone - "
4174				   "Block = %llu, len = %u",
4175				   block, thisgrp_len);
4176			break;
4177		}
4178
4179		err = ext4_mb_mark_context(NULL, sb, state,
4180					   group, blkoff, clen,
4181					   EXT4_MB_BITMAP_MARKED_CHECK |
4182					   EXT4_MB_SYNC_UPDATE,
4183					   NULL);
4184		if (err)
4185			break;
4186
4187		block += thisgrp_len;
4188		len -= thisgrp_len;
4189		BUG_ON(len < 0);
4190	}
4191}
4192
4193/*
4194 * here we normalize request for locality group
4195 * Group request are normalized to s_mb_group_prealloc, which goes to
4196 * s_strip if we set the same via mount option.
4197 * s_mb_group_prealloc can be configured via
4198 * /sys/fs/ext4/<partition>/mb_group_prealloc
4199 *
4200 * XXX: should we try to preallocate more than the group has now?
4201 */
4202static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4203{
4204	struct super_block *sb = ac->ac_sb;
4205	struct ext4_locality_group *lg = ac->ac_lg;
4206
4207	BUG_ON(lg == NULL);
4208	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
4209	mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
4210}
4211
4212/*
4213 * This function returns the next element to look at during inode
4214 * PA rbtree walk. We assume that we have held the inode PA rbtree lock
4215 * (ei->i_prealloc_lock)
4216 *
4217 * new_start	The start of the range we want to compare
4218 * cur_start	The existing start that we are comparing against
4219 * node	The node of the rb_tree
4220 */
4221static inline struct rb_node*
4222ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node)
4223{
4224	if (new_start < cur_start)
4225		return node->rb_left;
4226	else
4227		return node->rb_right;
4228}
4229
4230static inline void
4231ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
4232			  ext4_lblk_t start, loff_t end)
4233{
4234	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4235	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4236	struct ext4_prealloc_space *tmp_pa;
4237	ext4_lblk_t tmp_pa_start;
4238	loff_t tmp_pa_end;
4239	struct rb_node *iter;
4240
4241	read_lock(&ei->i_prealloc_lock);
4242	for (iter = ei->i_prealloc_node.rb_node; iter;
4243	     iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) {
4244		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4245				  pa_node.inode_node);
4246		tmp_pa_start = tmp_pa->pa_lstart;
4247		tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4248
4249		spin_lock(&tmp_pa->pa_lock);
4250		if (tmp_pa->pa_deleted == 0)
4251			BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
4252		spin_unlock(&tmp_pa->pa_lock);
4253	}
4254	read_unlock(&ei->i_prealloc_lock);
4255}
4256
4257/*
4258 * Given an allocation context "ac" and a range "start", "end", check
4259 * and adjust boundaries if the range overlaps with any of the existing
4260 * preallocatoins stored in the corresponding inode of the allocation context.
4261 *
4262 * Parameters:
4263 *	ac			allocation context
4264 *	start			start of the new range
4265 *	end			end of the new range
4266 */
4267static inline void
4268ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
4269			  ext4_lblk_t *start, loff_t *end)
4270{
4271	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4272	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4273	struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
4274	struct rb_node *iter;
4275	ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
4276	loff_t new_end, tmp_pa_end, left_pa_end = -1;
4277
4278	new_start = *start;
4279	new_end = *end;
4280
4281	/*
4282	 * Adjust the normalized range so that it doesn't overlap with any
4283	 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock
4284	 * so it doesn't change underneath us.
4285	 */
4286	read_lock(&ei->i_prealloc_lock);
4287
4288	/* Step 1: find any one immediate neighboring PA of the normalized range */
4289	for (iter = ei->i_prealloc_node.rb_node; iter;
4290	     iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4291					    tmp_pa_start, iter)) {
4292		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4293				  pa_node.inode_node);
4294		tmp_pa_start = tmp_pa->pa_lstart;
4295		tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4296
4297		/* PA must not overlap original request */
4298		spin_lock(&tmp_pa->pa_lock);
4299		if (tmp_pa->pa_deleted == 0)
4300			BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
4301				 ac->ac_o_ex.fe_logical < tmp_pa_start));
4302		spin_unlock(&tmp_pa->pa_lock);
4303	}
4304
4305	/*
4306	 * Step 2: check if the found PA is left or right neighbor and
4307	 * get the other neighbor
4308	 */
4309	if (tmp_pa) {
4310		if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) {
4311			struct rb_node *tmp;
4312
4313			left_pa = tmp_pa;
4314			tmp = rb_next(&left_pa->pa_node.inode_node);
4315			if (tmp) {
4316				right_pa = rb_entry(tmp,
4317						    struct ext4_prealloc_space,
4318						    pa_node.inode_node);
4319			}
4320		} else {
4321			struct rb_node *tmp;
4322
4323			right_pa = tmp_pa;
4324			tmp = rb_prev(&right_pa->pa_node.inode_node);
4325			if (tmp) {
4326				left_pa = rb_entry(tmp,
4327						   struct ext4_prealloc_space,
4328						   pa_node.inode_node);
4329			}
4330		}
4331	}
4332
4333	/* Step 3: get the non deleted neighbors */
4334	if (left_pa) {
4335		for (iter = &left_pa->pa_node.inode_node;;
4336		     iter = rb_prev(iter)) {
4337			if (!iter) {
4338				left_pa = NULL;
4339				break;
4340			}
4341
4342			tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4343					  pa_node.inode_node);
4344			left_pa = tmp_pa;
4345			spin_lock(&tmp_pa->pa_lock);
4346			if (tmp_pa->pa_deleted == 0) {
4347				spin_unlock(&tmp_pa->pa_lock);
4348				break;
4349			}
4350			spin_unlock(&tmp_pa->pa_lock);
4351		}
4352	}
4353
4354	if (right_pa) {
4355		for (iter = &right_pa->pa_node.inode_node;;
4356		     iter = rb_next(iter)) {
4357			if (!iter) {
4358				right_pa = NULL;
4359				break;
4360			}
4361
4362			tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4363					  pa_node.inode_node);
4364			right_pa = tmp_pa;
4365			spin_lock(&tmp_pa->pa_lock);
4366			if (tmp_pa->pa_deleted == 0) {
4367				spin_unlock(&tmp_pa->pa_lock);
4368				break;
4369			}
4370			spin_unlock(&tmp_pa->pa_lock);
4371		}
4372	}
4373
4374	if (left_pa) {
4375		left_pa_end = pa_logical_end(sbi, left_pa);
4376		BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
4377	}
4378
4379	if (right_pa) {
4380		right_pa_start = right_pa->pa_lstart;
4381		BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical);
4382	}
4383
4384	/* Step 4: trim our normalized range to not overlap with the neighbors */
4385	if (left_pa) {
4386		if (left_pa_end > new_start)
4387			new_start = left_pa_end;
4388	}
4389
4390	if (right_pa) {
4391		if (right_pa_start < new_end)
4392			new_end = right_pa_start;
4393	}
4394	read_unlock(&ei->i_prealloc_lock);
4395
4396	/* XXX: extra loop to check we really don't overlap preallocations */
4397	ext4_mb_pa_assert_overlap(ac, new_start, new_end);
4398
4399	*start = new_start;
4400	*end = new_end;
4401}
4402
4403/*
4404 * Normalization means making request better in terms of
4405 * size and alignment
4406 */
4407static noinline_for_stack void
4408ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4409				struct ext4_allocation_request *ar)
4410{
4411	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4412	struct ext4_super_block *es = sbi->s_es;
4413	int bsbits, max;
4414	loff_t size, start_off, end;
4415	loff_t orig_size __maybe_unused;
4416	ext4_lblk_t start;
4417
4418	/* do normalize only data requests, metadata requests
4419	   do not need preallocation */
4420	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4421		return;
4422
4423	/* sometime caller may want exact blocks */
4424	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4425		return;
4426
4427	/* caller may indicate that preallocation isn't
4428	 * required (it's a tail, for example) */
4429	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4430		return;
4431
4432	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4433		ext4_mb_normalize_group_request(ac);
4434		return ;
4435	}
4436
4437	bsbits = ac->ac_sb->s_blocksize_bits;
4438
4439	/* first, let's learn actual file size
4440	 * given current request is allocated */
4441	size = extent_logical_end(sbi, &ac->ac_o_ex);
4442	size = size << bsbits;
4443	if (size < i_size_read(ac->ac_inode))
4444		size = i_size_read(ac->ac_inode);
4445	orig_size = size;
4446
4447	/* max size of free chunks */
4448	max = 2 << bsbits;
4449
4450#define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
4451		(req <= (size) || max <= (chunk_size))
4452
4453	/* first, try to predict filesize */
4454	/* XXX: should this table be tunable? */
4455	start_off = 0;
4456	if (size <= 16 * 1024) {
4457		size = 16 * 1024;
4458	} else if (size <= 32 * 1024) {
4459		size = 32 * 1024;
4460	} else if (size <= 64 * 1024) {
4461		size = 64 * 1024;
4462	} else if (size <= 128 * 1024) {
4463		size = 128 * 1024;
4464	} else if (size <= 256 * 1024) {
4465		size = 256 * 1024;
4466	} else if (size <= 512 * 1024) {
4467		size = 512 * 1024;
4468	} else if (size <= 1024 * 1024) {
4469		size = 1024 * 1024;
4470	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4471		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4472						(21 - bsbits)) << 21;
4473		size = 2 * 1024 * 1024;
4474	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4475		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4476							(22 - bsbits)) << 22;
4477		size = 4 * 1024 * 1024;
4478	} else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len),
4479					(8<<20)>>bsbits, max, 8 * 1024)) {
4480		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4481							(23 - bsbits)) << 23;
4482		size = 8 * 1024 * 1024;
4483	} else {
4484		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4485		size	  = (loff_t) EXT4_C2B(sbi,
4486					      ac->ac_o_ex.fe_len) << bsbits;
4487	}
4488	size = size >> bsbits;
4489	start = start_off >> bsbits;
4490
4491	/*
4492	 * For tiny groups (smaller than 8MB) the chosen allocation
4493	 * alignment may be larger than group size. Make sure the
4494	 * alignment does not move allocation to a different group which
4495	 * makes mballoc fail assertions later.
4496	 */
4497	start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4498			(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4499
4500	/* avoid unnecessary preallocation that may trigger assertions */
4501	if (start + size > EXT_MAX_BLOCKS)
4502		size = EXT_MAX_BLOCKS - start;
4503
4504	/* don't cover already allocated blocks in selected range */
4505	if (ar->pleft && start <= ar->lleft) {
4506		size -= ar->lleft + 1 - start;
4507		start = ar->lleft + 1;
4508	}
4509	if (ar->pright && start + size - 1 >= ar->lright)
4510		size -= start + size - ar->lright;
4511
4512	/*
4513	 * Trim allocation request for filesystems with artificially small
4514	 * groups.
4515	 */
4516	if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4517		size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4518
4519	end = start + size;
4520
4521	ext4_mb_pa_adjust_overlap(ac, &start, &end);
4522
4523	size = end - start;
4524
4525	/*
4526	 * In this function "start" and "size" are normalized for better
4527	 * alignment and length such that we could preallocate more blocks.
4528	 * This normalization is done such that original request of
4529	 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4530	 * "size" boundaries.
4531	 * (Note fe_len can be relaxed since FS block allocation API does not
4532	 * provide gurantee on number of contiguous blocks allocation since that
4533	 * depends upon free space left, etc).
4534	 * In case of inode pa, later we use the allocated blocks
4535	 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated
4536	 * range of goal/best blocks [start, size] to put it at the
4537	 * ac_o_ex.fe_logical extent of this inode.
4538	 * (See ext4_mb_use_inode_pa() for more details)
4539	 */
4540	if (start + size <= ac->ac_o_ex.fe_logical ||
4541			start > ac->ac_o_ex.fe_logical) {
4542		ext4_msg(ac->ac_sb, KERN_ERR,
4543			 "start %lu, size %lu, fe_logical %lu",
4544			 (unsigned long) start, (unsigned long) size,
4545			 (unsigned long) ac->ac_o_ex.fe_logical);
4546		BUG();
4547	}
4548	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4549
4550	/* now prepare goal request */
4551
4552	/* XXX: is it better to align blocks WRT to logical
4553	 * placement or satisfy big request as is */
4554	ac->ac_g_ex.fe_logical = start;
4555	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4556	ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
4557
4558	/* define goal start in order to merge */
4559	if (ar->pright && (ar->lright == (start + size)) &&
4560	    ar->pright >= size &&
4561	    ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
4562		/* merge to the right */
4563		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4564						&ac->ac_g_ex.fe_group,
4565						&ac->ac_g_ex.fe_start);
4566		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4567	}
4568	if (ar->pleft && (ar->lleft + 1 == start) &&
4569	    ar->pleft + 1 < ext4_blocks_count(es)) {
4570		/* merge to the left */
4571		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4572						&ac->ac_g_ex.fe_group,
4573						&ac->ac_g_ex.fe_start);
4574		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4575	}
4576
4577	mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4578		 orig_size, start);
4579}
4580
4581static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4582{
4583	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4584
4585	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4586		atomic_inc(&sbi->s_bal_reqs);
4587		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4588		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4589			atomic_inc(&sbi->s_bal_success);
4590
4591		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4592		for (int i=0; i<EXT4_MB_NUM_CRS; i++) {
4593			atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]);
4594		}
4595
4596		atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4597		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4598				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4599			atomic_inc(&sbi->s_bal_goals);
4600		/* did we allocate as much as normalizer originally wanted? */
4601		if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len)
4602			atomic_inc(&sbi->s_bal_len_goals);
4603
4604		if (ac->ac_found > sbi->s_mb_max_to_scan)
4605			atomic_inc(&sbi->s_bal_breaks);
4606	}
4607
4608	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4609		trace_ext4_mballoc_alloc(ac);
4610	else
4611		trace_ext4_mballoc_prealloc(ac);
4612}
4613
4614/*
4615 * Called on failure; free up any blocks from the inode PA for this
4616 * context.  We don't need this for MB_GROUP_PA because we only change
4617 * pa_free in ext4_mb_release_context(), but on failure, we've already
4618 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4619 */
4620static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4621{
4622	struct ext4_prealloc_space *pa = ac->ac_pa;
4623	struct ext4_buddy e4b;
4624	int err;
4625
4626	if (pa == NULL) {
4627		if (ac->ac_f_ex.fe_len == 0)
4628			return;
4629		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4630		if (WARN_RATELIMIT(err,
4631				   "ext4: mb_load_buddy failed (%d)", err))
4632			/*
4633			 * This should never happen since we pin the
4634			 * pages in the ext4_allocation_context so
4635			 * ext4_mb_load_buddy() should never fail.
4636			 */
4637			return;
4638		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4639		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4640			       ac->ac_f_ex.fe_len);
4641		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4642		ext4_mb_unload_buddy(&e4b);
4643		return;
4644	}
4645	if (pa->pa_type == MB_INODE_PA) {
4646		spin_lock(&pa->pa_lock);
4647		pa->pa_free += ac->ac_b_ex.fe_len;
4648		spin_unlock(&pa->pa_lock);
4649	}
4650}
4651
4652/*
4653 * use blocks preallocated to inode
4654 */
4655static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4656				struct ext4_prealloc_space *pa)
4657{
4658	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4659	ext4_fsblk_t start;
4660	ext4_fsblk_t end;
4661	int len;
4662
4663	/* found preallocated blocks, use them */
4664	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4665	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4666		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4667	len = EXT4_NUM_B2C(sbi, end - start);
4668	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4669					&ac->ac_b_ex.fe_start);
4670	ac->ac_b_ex.fe_len = len;
4671	ac->ac_status = AC_STATUS_FOUND;
4672	ac->ac_pa = pa;
4673
4674	BUG_ON(start < pa->pa_pstart);
4675	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4676	BUG_ON(pa->pa_free < len);
4677	BUG_ON(ac->ac_b_ex.fe_len <= 0);
4678	pa->pa_free -= len;
4679
4680	mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4681}
4682
4683/*
4684 * use blocks preallocated to locality group
4685 */
4686static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4687				struct ext4_prealloc_space *pa)
4688{
4689	unsigned int len = ac->ac_o_ex.fe_len;
4690
4691	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4692					&ac->ac_b_ex.fe_group,
4693					&ac->ac_b_ex.fe_start);
4694	ac->ac_b_ex.fe_len = len;
4695	ac->ac_status = AC_STATUS_FOUND;
4696	ac->ac_pa = pa;
4697
4698	/* we don't correct pa_pstart or pa_len here to avoid
4699	 * possible race when the group is being loaded concurrently
4700	 * instead we correct pa later, after blocks are marked
4701	 * in on-disk bitmap -- see ext4_mb_release_context()
4702	 * Other CPUs are prevented from allocating from this pa by lg_mutex
4703	 */
4704	mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4705		 pa->pa_lstart, len, pa);
4706}
4707
4708/*
4709 * Return the prealloc space that have minimal distance
4710 * from the goal block. @cpa is the prealloc
4711 * space that is having currently known minimal distance
4712 * from the goal block.
4713 */
4714static struct ext4_prealloc_space *
4715ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4716			struct ext4_prealloc_space *pa,
4717			struct ext4_prealloc_space *cpa)
4718{
4719	ext4_fsblk_t cur_distance, new_distance;
4720
4721	if (cpa == NULL) {
4722		atomic_inc(&pa->pa_count);
4723		return pa;
4724	}
4725	cur_distance = abs(goal_block - cpa->pa_pstart);
4726	new_distance = abs(goal_block - pa->pa_pstart);
4727
4728	if (cur_distance <= new_distance)
4729		return cpa;
4730
4731	/* drop the previous reference */
4732	atomic_dec(&cpa->pa_count);
4733	atomic_inc(&pa->pa_count);
4734	return pa;
4735}
4736
4737/*
4738 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY
4739 */
4740static bool
4741ext4_mb_pa_goal_check(struct ext4_allocation_context *ac,
4742		      struct ext4_prealloc_space *pa)
4743{
4744	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4745	ext4_fsblk_t start;
4746
4747	if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)))
4748		return true;
4749
4750	/*
4751	 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted
4752	 * in ext4_mb_normalize_request and will keep same with ac_o_ex
4753	 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep
4754	 * consistent with ext4_mb_find_by_goal.
4755	 */
4756	start = pa->pa_pstart +
4757		(ac->ac_g_ex.fe_logical - pa->pa_lstart);
4758	if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start)
4759		return false;
4760
4761	if (ac->ac_g_ex.fe_len > pa->pa_len -
4762	    EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart))
4763		return false;
4764
4765	return true;
4766}
4767
4768/*
4769 * search goal blocks in preallocated space
4770 */
4771static noinline_for_stack bool
4772ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4773{
4774	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4775	int order, i;
4776	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4777	struct ext4_locality_group *lg;
4778	struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
4779	struct rb_node *iter;
4780	ext4_fsblk_t goal_block;
4781
4782	/* only data can be preallocated */
4783	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4784		return false;
4785
4786	/*
4787	 * first, try per-file preallocation by searching the inode pa rbtree.
4788	 *
4789	 * Here, we can't do a direct traversal of the tree because
4790	 * ext4_mb_discard_group_preallocation() can paralelly mark the pa
4791	 * deleted and that can cause direct traversal to skip some entries.
4792	 */
4793	read_lock(&ei->i_prealloc_lock);
4794
4795	if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) {
4796		goto try_group_pa;
4797	}
4798
4799	/*
4800	 * Step 1: Find a pa with logical start immediately adjacent to the
4801	 * original logical start. This could be on the left or right.
4802	 *
4803	 * (tmp_pa->pa_lstart never changes so we can skip locking for it).
4804	 */
4805	for (iter = ei->i_prealloc_node.rb_node; iter;
4806	     iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4807					    tmp_pa->pa_lstart, iter)) {
4808		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4809				  pa_node.inode_node);
4810	}
4811
4812	/*
4813	 * Step 2: The adjacent pa might be to the right of logical start, find
4814	 * the left adjacent pa. After this step we'd have a valid tmp_pa whose
4815	 * logical start is towards the left of original request's logical start
4816	 */
4817	if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4818		struct rb_node *tmp;
4819		tmp = rb_prev(&tmp_pa->pa_node.inode_node);
4820
4821		if (tmp) {
4822			tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
4823					    pa_node.inode_node);
4824		} else {
4825			/*
4826			 * If there is no adjacent pa to the left then finding
4827			 * an overlapping pa is not possible hence stop searching
4828			 * inode pa tree
4829			 */
4830			goto try_group_pa;
4831		}
4832	}
4833
4834	BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4835
4836	/*
4837	 * Step 3: If the left adjacent pa is deleted, keep moving left to find
4838	 * the first non deleted adjacent pa. After this step we should have a
4839	 * valid tmp_pa which is guaranteed to be non deleted.
4840	 */
4841	for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
4842		if (!iter) {
4843			/*
4844			 * no non deleted left adjacent pa, so stop searching
4845			 * inode pa tree
4846			 */
4847			goto try_group_pa;
4848		}
4849		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4850				  pa_node.inode_node);
4851		spin_lock(&tmp_pa->pa_lock);
4852		if (tmp_pa->pa_deleted == 0) {
4853			/*
4854			 * We will keep holding the pa_lock from
4855			 * this point on because we don't want group discard
4856			 * to delete this pa underneath us. Since group
4857			 * discard is anyways an ENOSPC operation it
4858			 * should be okay for it to wait a few more cycles.
4859			 */
4860			break;
4861		} else {
4862			spin_unlock(&tmp_pa->pa_lock);
4863		}
4864	}
4865
4866	BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4867	BUG_ON(tmp_pa->pa_deleted == 1);
4868
4869	/*
4870	 * Step 4: We now have the non deleted left adjacent pa. Only this
4871	 * pa can possibly satisfy the request hence check if it overlaps
4872	 * original logical start and stop searching if it doesn't.
4873	 */
4874	if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
4875		spin_unlock(&tmp_pa->pa_lock);
4876		goto try_group_pa;
4877	}
4878
4879	/* non-extent files can't have physical blocks past 2^32 */
4880	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4881	    (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
4882	     EXT4_MAX_BLOCK_FILE_PHYS)) {
4883		/*
4884		 * Since PAs don't overlap, we won't find any other PA to
4885		 * satisfy this.
4886		 */
4887		spin_unlock(&tmp_pa->pa_lock);
4888		goto try_group_pa;
4889	}
4890
4891	if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
4892		atomic_inc(&tmp_pa->pa_count);
4893		ext4_mb_use_inode_pa(ac, tmp_pa);
4894		spin_unlock(&tmp_pa->pa_lock);
4895		read_unlock(&ei->i_prealloc_lock);
4896		return true;
4897	} else {
4898		/*
4899		 * We found a valid overlapping pa but couldn't use it because
4900		 * it had no free blocks. This should ideally never happen
4901		 * because:
4902		 *
4903		 * 1. When a new inode pa is added to rbtree it must have
4904		 *    pa_free > 0 since otherwise we won't actually need
4905		 *    preallocation.
4906		 *
4907		 * 2. An inode pa that is in the rbtree can only have it's
4908		 *    pa_free become zero when another thread calls:
4909		 *      ext4_mb_new_blocks
4910		 *       ext4_mb_use_preallocated
4911		 *        ext4_mb_use_inode_pa
4912		 *
4913		 * 3. Further, after the above calls make pa_free == 0, we will
4914		 *    immediately remove it from the rbtree in:
4915		 *      ext4_mb_new_blocks
4916		 *       ext4_mb_release_context
4917		 *        ext4_mb_put_pa
4918		 *
4919		 * 4. Since the pa_free becoming 0 and pa_free getting removed
4920		 * from tree both happen in ext4_mb_new_blocks, which is always
4921		 * called with i_data_sem held for data allocations, we can be
4922		 * sure that another process will never see a pa in rbtree with
4923		 * pa_free == 0.
4924		 */
4925		WARN_ON_ONCE(tmp_pa->pa_free == 0);
4926	}
4927	spin_unlock(&tmp_pa->pa_lock);
4928try_group_pa:
4929	read_unlock(&ei->i_prealloc_lock);
4930
4931	/* can we use group allocation? */
4932	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4933		return false;
4934
4935	/* inode may have no locality group for some reason */
4936	lg = ac->ac_lg;
4937	if (lg == NULL)
4938		return false;
4939	order  = fls(ac->ac_o_ex.fe_len) - 1;
4940	if (order > PREALLOC_TB_SIZE - 1)
4941		/* The max size of hash table is PREALLOC_TB_SIZE */
4942		order = PREALLOC_TB_SIZE - 1;
4943
4944	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4945	/*
4946	 * search for the prealloc space that is having
4947	 * minimal distance from the goal block.
4948	 */
4949	for (i = order; i < PREALLOC_TB_SIZE; i++) {
4950		rcu_read_lock();
4951		list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
4952					pa_node.lg_list) {
4953			spin_lock(&tmp_pa->pa_lock);
4954			if (tmp_pa->pa_deleted == 0 &&
4955					tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
4956
4957				cpa = ext4_mb_check_group_pa(goal_block,
4958								tmp_pa, cpa);
4959			}
4960			spin_unlock(&tmp_pa->pa_lock);
4961		}
4962		rcu_read_unlock();
4963	}
4964	if (cpa) {
4965		ext4_mb_use_group_pa(ac, cpa);
4966		return true;
4967	}
4968	return false;
4969}
4970
4971/*
4972 * the function goes through all preallocation in this group and marks them
4973 * used in in-core bitmap. buddy must be generated from this bitmap
4974 * Need to be called with ext4 group lock held
4975 */
4976static noinline_for_stack
4977void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4978					ext4_group_t group)
4979{
4980	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4981	struct ext4_prealloc_space *pa;
4982	struct list_head *cur;
4983	ext4_group_t groupnr;
4984	ext4_grpblk_t start;
4985	int preallocated = 0;
4986	int len;
4987
4988	if (!grp)
4989		return;
4990
4991	/* all form of preallocation discards first load group,
4992	 * so the only competing code is preallocation use.
4993	 * we don't need any locking here
4994	 * notice we do NOT ignore preallocations with pa_deleted
4995	 * otherwise we could leave used blocks available for
4996	 * allocation in buddy when concurrent ext4_mb_put_pa()
4997	 * is dropping preallocation
4998	 */
4999	list_for_each(cur, &grp->bb_prealloc_list) {
5000		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
5001		spin_lock(&pa->pa_lock);
5002		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5003					     &groupnr, &start);
5004		len = pa->pa_len;
5005		spin_unlock(&pa->pa_lock);
5006		if (unlikely(len == 0))
5007			continue;
5008		BUG_ON(groupnr != group);
5009		mb_set_bits(bitmap, start, len);
5010		preallocated += len;
5011	}
5012	mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
5013}
5014
5015static void ext4_mb_mark_pa_deleted(struct super_block *sb,
5016				    struct ext4_prealloc_space *pa)
5017{
5018	struct ext4_inode_info *ei;
5019
5020	if (pa->pa_deleted) {
5021		ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
5022			     pa->pa_type, pa->pa_pstart, pa->pa_lstart,
5023			     pa->pa_len);
5024		return;
5025	}
5026
5027	pa->pa_deleted = 1;
5028
5029	if (pa->pa_type == MB_INODE_PA) {
5030		ei = EXT4_I(pa->pa_inode);
5031		atomic_dec(&ei->i_prealloc_active);
5032	}
5033}
5034
5035static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa)
5036{
5037	BUG_ON(!pa);
5038	BUG_ON(atomic_read(&pa->pa_count));
5039	BUG_ON(pa->pa_deleted == 0);
5040	kmem_cache_free(ext4_pspace_cachep, pa);
5041}
5042
5043static void ext4_mb_pa_callback(struct rcu_head *head)
5044{
5045	struct ext4_prealloc_space *pa;
5046
5047	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
5048	ext4_mb_pa_free(pa);
5049}
5050
5051/*
5052 * drops a reference to preallocated space descriptor
5053 * if this was the last reference and the space is consumed
5054 */
5055static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
5056			struct super_block *sb, struct ext4_prealloc_space *pa)
5057{
5058	ext4_group_t grp;
5059	ext4_fsblk_t grp_blk;
5060	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
5061
5062	/* in this short window concurrent discard can set pa_deleted */
5063	spin_lock(&pa->pa_lock);
5064	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
5065		spin_unlock(&pa->pa_lock);
5066		return;
5067	}
5068
5069	if (pa->pa_deleted == 1) {
5070		spin_unlock(&pa->pa_lock);
5071		return;
5072	}
5073
5074	ext4_mb_mark_pa_deleted(sb, pa);
5075	spin_unlock(&pa->pa_lock);
5076
5077	grp_blk = pa->pa_pstart;
5078	/*
5079	 * If doing group-based preallocation, pa_pstart may be in the
5080	 * next group when pa is used up
5081	 */
5082	if (pa->pa_type == MB_GROUP_PA)
5083		grp_blk--;
5084
5085	grp = ext4_get_group_number(sb, grp_blk);
5086
5087	/*
5088	 * possible race:
5089	 *
5090	 *  P1 (buddy init)			P2 (regular allocation)
5091	 *					find block B in PA
5092	 *  copy on-disk bitmap to buddy
5093	 *  					mark B in on-disk bitmap
5094	 *					drop PA from group
5095	 *  mark all PAs in buddy
5096	 *
5097	 * thus, P1 initializes buddy with B available. to prevent this
5098	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
5099	 * against that pair
5100	 */
5101	ext4_lock_group(sb, grp);
5102	list_del(&pa->pa_group_list);
5103	ext4_unlock_group(sb, grp);
5104
5105	if (pa->pa_type == MB_INODE_PA) {
5106		write_lock(pa->pa_node_lock.inode_lock);
5107		rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5108		write_unlock(pa->pa_node_lock.inode_lock);
5109		ext4_mb_pa_free(pa);
5110	} else {
5111		spin_lock(pa->pa_node_lock.lg_lock);
5112		list_del_rcu(&pa->pa_node.lg_list);
5113		spin_unlock(pa->pa_node_lock.lg_lock);
5114		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5115	}
5116}
5117
5118static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new)
5119{
5120	struct rb_node **iter = &root->rb_node, *parent = NULL;
5121	struct ext4_prealloc_space *iter_pa, *new_pa;
5122	ext4_lblk_t iter_start, new_start;
5123
5124	while (*iter) {
5125		iter_pa = rb_entry(*iter, struct ext4_prealloc_space,
5126				   pa_node.inode_node);
5127		new_pa = rb_entry(new, struct ext4_prealloc_space,
5128				   pa_node.inode_node);
5129		iter_start = iter_pa->pa_lstart;
5130		new_start = new_pa->pa_lstart;
5131
5132		parent = *iter;
5133		if (new_start < iter_start)
5134			iter = &((*iter)->rb_left);
5135		else
5136			iter = &((*iter)->rb_right);
5137	}
5138
5139	rb_link_node(new, parent, iter);
5140	rb_insert_color(new, root);
5141}
5142
5143/*
5144 * creates new preallocated space for given inode
5145 */
5146static noinline_for_stack void
5147ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
5148{
5149	struct super_block *sb = ac->ac_sb;
5150	struct ext4_sb_info *sbi = EXT4_SB(sb);
5151	struct ext4_prealloc_space *pa;
5152	struct ext4_group_info *grp;
5153	struct ext4_inode_info *ei;
5154
5155	/* preallocate only when found space is larger then requested */
5156	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5157	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5158	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5159	BUG_ON(ac->ac_pa == NULL);
5160
5161	pa = ac->ac_pa;
5162
5163	if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
5164		struct ext4_free_extent ex = {
5165			.fe_logical = ac->ac_g_ex.fe_logical,
5166			.fe_len = ac->ac_orig_goal_len,
5167		};
5168		loff_t orig_goal_end = extent_logical_end(sbi, &ex);
5169		loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
5170
5171		/*
5172		 * We can't allocate as much as normalizer wants, so we try
5173		 * to get proper lstart to cover the original request, except
5174		 * when the goal doesn't cover the original request as below:
5175		 *
5176		 * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
5177		 * best_ex:0/200(200) -> adjusted: 1848/2048(200)
5178		 */
5179		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
5180		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
5181
5182		/*
5183		 * Use the below logic for adjusting best extent as it keeps
5184		 * fragmentation in check while ensuring logical range of best
5185		 * extent doesn't overflow out of goal extent:
5186		 *
5187		 * 1. Check if best ex can be kept at end of goal (before
5188		 *    cr_best_avail trimmed it) and still cover original start
5189		 * 2. Else, check if best ex can be kept at start of goal and
5190		 *    still cover original end
5191		 * 3. Else, keep the best ex at start of original request.
5192		 */
5193		ex.fe_len = ac->ac_b_ex.fe_len;
5194
5195		ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
5196		if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
5197			goto adjust_bex;
5198
5199		ex.fe_logical = ac->ac_g_ex.fe_logical;
5200		if (o_ex_end <= extent_logical_end(sbi, &ex))
5201			goto adjust_bex;
5202
5203		ex.fe_logical = ac->ac_o_ex.fe_logical;
5204adjust_bex:
5205		ac->ac_b_ex.fe_logical = ex.fe_logical;
5206
5207		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
5208		BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
5209	}
5210
5211	pa->pa_lstart = ac->ac_b_ex.fe_logical;
5212	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5213	pa->pa_len = ac->ac_b_ex.fe_len;
5214	pa->pa_free = pa->pa_len;
5215	spin_lock_init(&pa->pa_lock);
5216	INIT_LIST_HEAD(&pa->pa_group_list);
5217	pa->pa_deleted = 0;
5218	pa->pa_type = MB_INODE_PA;
5219
5220	mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5221		 pa->pa_len, pa->pa_lstart);
5222	trace_ext4_mb_new_inode_pa(ac, pa);
5223
5224	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
5225	ext4_mb_use_inode_pa(ac, pa);
5226
5227	ei = EXT4_I(ac->ac_inode);
5228	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5229	if (!grp)
5230		return;
5231
5232	pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
5233	pa->pa_inode = ac->ac_inode;
5234
5235	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5236
5237	write_lock(pa->pa_node_lock.inode_lock);
5238	ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node);
5239	write_unlock(pa->pa_node_lock.inode_lock);
5240	atomic_inc(&ei->i_prealloc_active);
5241}
5242
5243/*
5244 * creates new preallocated space for locality group inodes belongs to
5245 */
5246static noinline_for_stack void
5247ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
5248{
5249	struct super_block *sb = ac->ac_sb;
5250	struct ext4_locality_group *lg;
5251	struct ext4_prealloc_space *pa;
5252	struct ext4_group_info *grp;
5253
5254	/* preallocate only when found space is larger then requested */
5255	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5256	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5257	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5258	BUG_ON(ac->ac_pa == NULL);
5259
5260	pa = ac->ac_pa;
5261
5262	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5263	pa->pa_lstart = pa->pa_pstart;
5264	pa->pa_len = ac->ac_b_ex.fe_len;
5265	pa->pa_free = pa->pa_len;
5266	spin_lock_init(&pa->pa_lock);
5267	INIT_LIST_HEAD(&pa->pa_node.lg_list);
5268	INIT_LIST_HEAD(&pa->pa_group_list);
5269	pa->pa_deleted = 0;
5270	pa->pa_type = MB_GROUP_PA;
5271
5272	mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5273		 pa->pa_len, pa->pa_lstart);
5274	trace_ext4_mb_new_group_pa(ac, pa);
5275
5276	ext4_mb_use_group_pa(ac, pa);
5277	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
5278
5279	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5280	if (!grp)
5281		return;
5282	lg = ac->ac_lg;
5283	BUG_ON(lg == NULL);
5284
5285	pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock;
5286	pa->pa_inode = NULL;
5287
5288	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5289
5290	/*
5291	 * We will later add the new pa to the right bucket
5292	 * after updating the pa_free in ext4_mb_release_context
5293	 */
5294}
5295
5296static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
5297{
5298	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5299		ext4_mb_new_group_pa(ac);
5300	else
5301		ext4_mb_new_inode_pa(ac);
5302}
5303
5304/*
5305 * finds all unused blocks in on-disk bitmap, frees them in
5306 * in-core bitmap and buddy.
5307 * @pa must be unlinked from inode and group lists, so that
5308 * nobody else can find/use it.
5309 * the caller MUST hold group/inode locks.
5310 * TODO: optimize the case when there are no in-core structures yet
5311 */
5312static noinline_for_stack void
5313ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
5314			struct ext4_prealloc_space *pa)
5315{
5316	struct super_block *sb = e4b->bd_sb;
5317	struct ext4_sb_info *sbi = EXT4_SB(sb);
5318	unsigned int end;
5319	unsigned int next;
5320	ext4_group_t group;
5321	ext4_grpblk_t bit;
5322	unsigned long long grp_blk_start;
5323	int free = 0;
5324
5325	BUG_ON(pa->pa_deleted == 0);
5326	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5327	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
5328	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
5329	end = bit + pa->pa_len;
5330
5331	while (bit < end) {
5332		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
5333		if (bit >= end)
5334			break;
5335		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
5336		mb_debug(sb, "free preallocated %u/%u in group %u\n",
5337			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
5338			 (unsigned) next - bit, (unsigned) group);
5339		free += next - bit;
5340
5341		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
5342		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
5343						    EXT4_C2B(sbi, bit)),
5344					       next - bit);
5345		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
5346		bit = next + 1;
5347	}
5348	if (free != pa->pa_free) {
5349		ext4_msg(e4b->bd_sb, KERN_CRIT,
5350			 "pa %p: logic %lu, phys. %lu, len %d",
5351			 pa, (unsigned long) pa->pa_lstart,
5352			 (unsigned long) pa->pa_pstart,
5353			 pa->pa_len);
5354		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
5355					free, pa->pa_free);
5356		/*
5357		 * pa is already deleted so we use the value obtained
5358		 * from the bitmap and continue.
5359		 */
5360	}
5361	atomic_add(free, &sbi->s_mb_discarded);
5362}
5363
5364static noinline_for_stack void
5365ext4_mb_release_group_pa(struct ext4_buddy *e4b,
5366				struct ext4_prealloc_space *pa)
5367{
5368	struct super_block *sb = e4b->bd_sb;
5369	ext4_group_t group;
5370	ext4_grpblk_t bit;
5371
5372	trace_ext4_mb_release_group_pa(sb, pa);
5373	BUG_ON(pa->pa_deleted == 0);
5374	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5375	if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
5376		ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
5377			     e4b->bd_group, group, pa->pa_pstart);
5378		return;
5379	}
5380	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
5381	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
5382	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
5383}
5384
5385/*
5386 * releases all preallocations in given group
5387 *
5388 * first, we need to decide discard policy:
5389 * - when do we discard
5390 *   1) ENOSPC
5391 * - how many do we discard
5392 *   1) how many requested
5393 */
5394static noinline_for_stack int
5395ext4_mb_discard_group_preallocations(struct super_block *sb,
5396				     ext4_group_t group, int *busy)
5397{
5398	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
5399	struct buffer_head *bitmap_bh = NULL;
5400	struct ext4_prealloc_space *pa, *tmp;
5401	LIST_HEAD(list);
5402	struct ext4_buddy e4b;
5403	struct ext4_inode_info *ei;
5404	int err;
5405	int free = 0;
5406
5407	if (!grp)
5408		return 0;
5409	mb_debug(sb, "discard preallocation for group %u\n", group);
5410	if (list_empty(&grp->bb_prealloc_list))
5411		goto out_dbg;
5412
5413	bitmap_bh = ext4_read_block_bitmap(sb, group);
5414	if (IS_ERR(bitmap_bh)) {
5415		err = PTR_ERR(bitmap_bh);
5416		ext4_error_err(sb, -err,
5417			       "Error %d reading block bitmap for %u",
5418			       err, group);
5419		goto out_dbg;
5420	}
5421
5422	err = ext4_mb_load_buddy(sb, group, &e4b);
5423	if (err) {
5424		ext4_warning(sb, "Error %d loading buddy information for %u",
5425			     err, group);
5426		put_bh(bitmap_bh);
5427		goto out_dbg;
5428	}
5429
5430	ext4_lock_group(sb, group);
5431	list_for_each_entry_safe(pa, tmp,
5432				&grp->bb_prealloc_list, pa_group_list) {
5433		spin_lock(&pa->pa_lock);
5434		if (atomic_read(&pa->pa_count)) {
5435			spin_unlock(&pa->pa_lock);
5436			*busy = 1;
5437			continue;
5438		}
5439		if (pa->pa_deleted) {
5440			spin_unlock(&pa->pa_lock);
5441			continue;
5442		}
5443
5444		/* seems this one can be freed ... */
5445		ext4_mb_mark_pa_deleted(sb, pa);
5446
5447		if (!free)
5448			this_cpu_inc(discard_pa_seq);
5449
5450		/* we can trust pa_free ... */
5451		free += pa->pa_free;
5452
5453		spin_unlock(&pa->pa_lock);
5454
5455		list_del(&pa->pa_group_list);
5456		list_add(&pa->u.pa_tmp_list, &list);
5457	}
5458
5459	/* now free all selected PAs */
5460	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5461
5462		/* remove from object (inode or locality group) */
5463		if (pa->pa_type == MB_GROUP_PA) {
5464			spin_lock(pa->pa_node_lock.lg_lock);
5465			list_del_rcu(&pa->pa_node.lg_list);
5466			spin_unlock(pa->pa_node_lock.lg_lock);
5467		} else {
5468			write_lock(pa->pa_node_lock.inode_lock);
5469			ei = EXT4_I(pa->pa_inode);
5470			rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5471			write_unlock(pa->pa_node_lock.inode_lock);
5472		}
5473
5474		list_del(&pa->u.pa_tmp_list);
5475
5476		if (pa->pa_type == MB_GROUP_PA) {
5477			ext4_mb_release_group_pa(&e4b, pa);
5478			call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5479		} else {
5480			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5481			ext4_mb_pa_free(pa);
5482		}
5483	}
5484
5485	ext4_unlock_group(sb, group);
5486	ext4_mb_unload_buddy(&e4b);
5487	put_bh(bitmap_bh);
5488out_dbg:
5489	mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
5490		 free, group, grp->bb_free);
5491	return free;
5492}
5493
5494/*
5495 * releases all non-used preallocated blocks for given inode
5496 *
5497 * It's important to discard preallocations under i_data_sem
5498 * We don't want another block to be served from the prealloc
5499 * space when we are discarding the inode prealloc space.
5500 *
5501 * FIXME!! Make sure it is valid at all the call sites
5502 */
5503void ext4_discard_preallocations(struct inode *inode)
5504{
5505	struct ext4_inode_info *ei = EXT4_I(inode);
5506	struct super_block *sb = inode->i_sb;
5507	struct buffer_head *bitmap_bh = NULL;
5508	struct ext4_prealloc_space *pa, *tmp;
5509	ext4_group_t group = 0;
5510	LIST_HEAD(list);
5511	struct ext4_buddy e4b;
5512	struct rb_node *iter;
5513	int err;
5514
5515	if (!S_ISREG(inode->i_mode))
5516		return;
5517
5518	if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
5519		return;
5520
5521	mb_debug(sb, "discard preallocation for inode %lu\n",
5522		 inode->i_ino);
5523	trace_ext4_discard_preallocations(inode,
5524			atomic_read(&ei->i_prealloc_active));
5525
5526repeat:
5527	/* first, collect all pa's in the inode */
5528	write_lock(&ei->i_prealloc_lock);
5529	for (iter = rb_first(&ei->i_prealloc_node); iter;
5530	     iter = rb_next(iter)) {
5531		pa = rb_entry(iter, struct ext4_prealloc_space,
5532			      pa_node.inode_node);
5533		BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock);
5534
5535		spin_lock(&pa->pa_lock);
5536		if (atomic_read(&pa->pa_count)) {
5537			/* this shouldn't happen often - nobody should
5538			 * use preallocation while we're discarding it */
5539			spin_unlock(&pa->pa_lock);
5540			write_unlock(&ei->i_prealloc_lock);
5541			ext4_msg(sb, KERN_ERR,
5542				 "uh-oh! used pa while discarding");
5543			WARN_ON(1);
5544			schedule_timeout_uninterruptible(HZ);
5545			goto repeat;
5546
5547		}
5548		if (pa->pa_deleted == 0) {
5549			ext4_mb_mark_pa_deleted(sb, pa);
5550			spin_unlock(&pa->pa_lock);
5551			rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5552			list_add(&pa->u.pa_tmp_list, &list);
5553			continue;
5554		}
5555
5556		/* someone is deleting pa right now */
5557		spin_unlock(&pa->pa_lock);
5558		write_unlock(&ei->i_prealloc_lock);
5559
5560		/* we have to wait here because pa_deleted
5561		 * doesn't mean pa is already unlinked from
5562		 * the list. as we might be called from
5563		 * ->clear_inode() the inode will get freed
5564		 * and concurrent thread which is unlinking
5565		 * pa from inode's list may access already
5566		 * freed memory, bad-bad-bad */
5567
5568		/* XXX: if this happens too often, we can
5569		 * add a flag to force wait only in case
5570		 * of ->clear_inode(), but not in case of
5571		 * regular truncate */
5572		schedule_timeout_uninterruptible(HZ);
5573		goto repeat;
5574	}
5575	write_unlock(&ei->i_prealloc_lock);
5576
5577	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5578		BUG_ON(pa->pa_type != MB_INODE_PA);
5579		group = ext4_get_group_number(sb, pa->pa_pstart);
5580
5581		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5582					     GFP_NOFS|__GFP_NOFAIL);
5583		if (err) {
5584			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5585				       err, group);
5586			continue;
5587		}
5588
5589		bitmap_bh = ext4_read_block_bitmap(sb, group);
5590		if (IS_ERR(bitmap_bh)) {
5591			err = PTR_ERR(bitmap_bh);
5592			ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5593				       err, group);
5594			ext4_mb_unload_buddy(&e4b);
5595			continue;
5596		}
5597
5598		ext4_lock_group(sb, group);
5599		list_del(&pa->pa_group_list);
5600		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5601		ext4_unlock_group(sb, group);
5602
5603		ext4_mb_unload_buddy(&e4b);
5604		put_bh(bitmap_bh);
5605
5606		list_del(&pa->u.pa_tmp_list);
5607		ext4_mb_pa_free(pa);
5608	}
5609}
5610
5611static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5612{
5613	struct ext4_prealloc_space *pa;
5614
5615	BUG_ON(ext4_pspace_cachep == NULL);
5616	pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5617	if (!pa)
5618		return -ENOMEM;
5619	atomic_set(&pa->pa_count, 1);
5620	ac->ac_pa = pa;
5621	return 0;
5622}
5623
5624static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac)
5625{
5626	struct ext4_prealloc_space *pa = ac->ac_pa;
5627
5628	BUG_ON(!pa);
5629	ac->ac_pa = NULL;
5630	WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5631	/*
5632	 * current function is only called due to an error or due to
5633	 * len of found blocks < len of requested blocks hence the PA has not
5634	 * been added to grp->bb_prealloc_list. So we don't need to lock it
5635	 */
5636	pa->pa_deleted = 1;
5637	ext4_mb_pa_free(pa);
5638}
5639
5640#ifdef CONFIG_EXT4_DEBUG
5641static inline void ext4_mb_show_pa(struct super_block *sb)
5642{
5643	ext4_group_t i, ngroups;
5644
5645	if (ext4_forced_shutdown(sb))
5646		return;
5647
5648	ngroups = ext4_get_groups_count(sb);
5649	mb_debug(sb, "groups: ");
5650	for (i = 0; i < ngroups; i++) {
5651		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5652		struct ext4_prealloc_space *pa;
5653		ext4_grpblk_t start;
5654		struct list_head *cur;
5655
5656		if (!grp)
5657			continue;
5658		ext4_lock_group(sb, i);
5659		list_for_each(cur, &grp->bb_prealloc_list) {
5660			pa = list_entry(cur, struct ext4_prealloc_space,
5661					pa_group_list);
5662			spin_lock(&pa->pa_lock);
5663			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5664						     NULL, &start);
5665			spin_unlock(&pa->pa_lock);
5666			mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5667				 pa->pa_len);
5668		}
5669		ext4_unlock_group(sb, i);
5670		mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5671			 grp->bb_fragments);
5672	}
5673}
5674
5675static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5676{
5677	struct super_block *sb = ac->ac_sb;
5678
5679	if (ext4_forced_shutdown(sb))
5680		return;
5681
5682	mb_debug(sb, "Can't allocate:"
5683			" Allocation context details:");
5684	mb_debug(sb, "status %u flags 0x%x",
5685			ac->ac_status, ac->ac_flags);
5686	mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5687			"goal %lu/%lu/%lu@%lu, "
5688			"best %lu/%lu/%lu@%lu cr %d",
5689			(unsigned long)ac->ac_o_ex.fe_group,
5690			(unsigned long)ac->ac_o_ex.fe_start,
5691			(unsigned long)ac->ac_o_ex.fe_len,
5692			(unsigned long)ac->ac_o_ex.fe_logical,
5693			(unsigned long)ac->ac_g_ex.fe_group,
5694			(unsigned long)ac->ac_g_ex.fe_start,
5695			(unsigned long)ac->ac_g_ex.fe_len,
5696			(unsigned long)ac->ac_g_ex.fe_logical,
5697			(unsigned long)ac->ac_b_ex.fe_group,
5698			(unsigned long)ac->ac_b_ex.fe_start,
5699			(unsigned long)ac->ac_b_ex.fe_len,
5700			(unsigned long)ac->ac_b_ex.fe_logical,
5701			(int)ac->ac_criteria);
5702	mb_debug(sb, "%u found", ac->ac_found);
5703	mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no");
5704	if (ac->ac_pa)
5705		mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ?
5706			 "group pa" : "inode pa");
5707	ext4_mb_show_pa(sb);
5708}
5709#else
5710static inline void ext4_mb_show_pa(struct super_block *sb)
5711{
5712}
5713static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5714{
5715	ext4_mb_show_pa(ac->ac_sb);
5716}
5717#endif
5718
5719/*
5720 * We use locality group preallocation for small size file. The size of the
5721 * file is determined by the current size or the resulting size after
5722 * allocation which ever is larger
5723 *
5724 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5725 */
5726static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5727{
5728	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5729	int bsbits = ac->ac_sb->s_blocksize_bits;
5730	loff_t size, isize;
5731	bool inode_pa_eligible, group_pa_eligible;
5732
5733	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5734		return;
5735
5736	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5737		return;
5738
5739	group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5740	inode_pa_eligible = true;
5741	size = extent_logical_end(sbi, &ac->ac_o_ex);
5742	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5743		>> bsbits;
5744
5745	/* No point in using inode preallocation for closed files */
5746	if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5747	    !inode_is_open_for_write(ac->ac_inode))
5748		inode_pa_eligible = false;
5749
5750	size = max(size, isize);
5751	/* Don't use group allocation for large files */
5752	if (size > sbi->s_mb_stream_request)
5753		group_pa_eligible = false;
5754
5755	if (!group_pa_eligible) {
5756		if (inode_pa_eligible)
5757			ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5758		else
5759			ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5760		return;
5761	}
5762
5763	BUG_ON(ac->ac_lg != NULL);
5764	/*
5765	 * locality group prealloc space are per cpu. The reason for having
5766	 * per cpu locality group is to reduce the contention between block
5767	 * request from multiple CPUs.
5768	 */
5769	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5770
5771	/* we're going to use group allocation */
5772	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5773
5774	/* serialize all allocations in the group */
5775	mutex_lock(&ac->ac_lg->lg_mutex);
5776}
5777
5778static noinline_for_stack void
5779ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5780				struct ext4_allocation_request *ar)
5781{
5782	struct super_block *sb = ar->inode->i_sb;
5783	struct ext4_sb_info *sbi = EXT4_SB(sb);
5784	struct ext4_super_block *es = sbi->s_es;
5785	ext4_group_t group;
5786	unsigned int len;
5787	ext4_fsblk_t goal;
5788	ext4_grpblk_t block;
5789
5790	/* we can't allocate > group size */
5791	len = ar->len;
5792
5793	/* just a dirty hack to filter too big requests  */
5794	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5795		len = EXT4_CLUSTERS_PER_GROUP(sb);
5796
5797	/* start searching from the goal */
5798	goal = ar->goal;
5799	if (goal < le32_to_cpu(es->s_first_data_block) ||
5800			goal >= ext4_blocks_count(es))
5801		goal = le32_to_cpu(es->s_first_data_block);
5802	ext4_get_group_no_and_offset(sb, goal, &group, &block);
5803
5804	/* set up allocation goals */
5805	ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5806	ac->ac_status = AC_STATUS_CONTINUE;
5807	ac->ac_sb = sb;
5808	ac->ac_inode = ar->inode;
5809	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5810	ac->ac_o_ex.fe_group = group;
5811	ac->ac_o_ex.fe_start = block;
5812	ac->ac_o_ex.fe_len = len;
5813	ac->ac_g_ex = ac->ac_o_ex;
5814	ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
5815	ac->ac_flags = ar->flags;
5816
5817	/* we have to define context: we'll work with a file or
5818	 * locality group. this is a policy, actually */
5819	ext4_mb_group_or_file(ac);
5820
5821	mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5822			"left: %u/%u, right %u/%u to %swritable\n",
5823			(unsigned) ar->len, (unsigned) ar->logical,
5824			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5825			(unsigned) ar->lleft, (unsigned) ar->pleft,
5826			(unsigned) ar->lright, (unsigned) ar->pright,
5827			inode_is_open_for_write(ar->inode) ? "" : "non-");
5828}
5829
5830static noinline_for_stack void
5831ext4_mb_discard_lg_preallocations(struct super_block *sb,
5832					struct ext4_locality_group *lg,
5833					int order, int total_entries)
5834{
5835	ext4_group_t group = 0;
5836	struct ext4_buddy e4b;
5837	LIST_HEAD(discard_list);
5838	struct ext4_prealloc_space *pa, *tmp;
5839
5840	mb_debug(sb, "discard locality group preallocation\n");
5841
5842	spin_lock(&lg->lg_prealloc_lock);
5843	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5844				pa_node.lg_list,
5845				lockdep_is_held(&lg->lg_prealloc_lock)) {
5846		spin_lock(&pa->pa_lock);
5847		if (atomic_read(&pa->pa_count)) {
5848			/*
5849			 * This is the pa that we just used
5850			 * for block allocation. So don't
5851			 * free that
5852			 */
5853			spin_unlock(&pa->pa_lock);
5854			continue;
5855		}
5856		if (pa->pa_deleted) {
5857			spin_unlock(&pa->pa_lock);
5858			continue;
5859		}
5860		/* only lg prealloc space */
5861		BUG_ON(pa->pa_type != MB_GROUP_PA);
5862
5863		/* seems this one can be freed ... */
5864		ext4_mb_mark_pa_deleted(sb, pa);
5865		spin_unlock(&pa->pa_lock);
5866
5867		list_del_rcu(&pa->pa_node.lg_list);
5868		list_add(&pa->u.pa_tmp_list, &discard_list);
5869
5870		total_entries--;
5871		if (total_entries <= 5) {
5872			/*
5873			 * we want to keep only 5 entries
5874			 * allowing it to grow to 8. This
5875			 * mak sure we don't call discard
5876			 * soon for this list.
5877			 */
5878			break;
5879		}
5880	}
5881	spin_unlock(&lg->lg_prealloc_lock);
5882
5883	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5884		int err;
5885
5886		group = ext4_get_group_number(sb, pa->pa_pstart);
5887		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5888					     GFP_NOFS|__GFP_NOFAIL);
5889		if (err) {
5890			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5891				       err, group);
5892			continue;
5893		}
5894		ext4_lock_group(sb, group);
5895		list_del(&pa->pa_group_list);
5896		ext4_mb_release_group_pa(&e4b, pa);
5897		ext4_unlock_group(sb, group);
5898
5899		ext4_mb_unload_buddy(&e4b);
5900		list_del(&pa->u.pa_tmp_list);
5901		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5902	}
5903}
5904
5905/*
5906 * We have incremented pa_count. So it cannot be freed at this
5907 * point. Also we hold lg_mutex. So no parallel allocation is
5908 * possible from this lg. That means pa_free cannot be updated.
5909 *
5910 * A parallel ext4_mb_discard_group_preallocations is possible.
5911 * which can cause the lg_prealloc_list to be updated.
5912 */
5913
5914static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5915{
5916	int order, added = 0, lg_prealloc_count = 1;
5917	struct super_block *sb = ac->ac_sb;
5918	struct ext4_locality_group *lg = ac->ac_lg;
5919	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5920
5921	order = fls(pa->pa_free) - 1;
5922	if (order > PREALLOC_TB_SIZE - 1)
5923		/* The max size of hash table is PREALLOC_TB_SIZE */
5924		order = PREALLOC_TB_SIZE - 1;
5925	/* Add the prealloc space to lg */
5926	spin_lock(&lg->lg_prealloc_lock);
5927	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5928				pa_node.lg_list,
5929				lockdep_is_held(&lg->lg_prealloc_lock)) {
5930		spin_lock(&tmp_pa->pa_lock);
5931		if (tmp_pa->pa_deleted) {
5932			spin_unlock(&tmp_pa->pa_lock);
5933			continue;
5934		}
5935		if (!added && pa->pa_free < tmp_pa->pa_free) {
5936			/* Add to the tail of the previous entry */
5937			list_add_tail_rcu(&pa->pa_node.lg_list,
5938						&tmp_pa->pa_node.lg_list);
5939			added = 1;
5940			/*
5941			 * we want to count the total
5942			 * number of entries in the list
5943			 */
5944		}
5945		spin_unlock(&tmp_pa->pa_lock);
5946		lg_prealloc_count++;
5947	}
5948	if (!added)
5949		list_add_tail_rcu(&pa->pa_node.lg_list,
5950					&lg->lg_prealloc_list[order]);
5951	spin_unlock(&lg->lg_prealloc_lock);
5952
5953	/* Now trim the list to be not more than 8 elements */
5954	if (lg_prealloc_count > 8)
5955		ext4_mb_discard_lg_preallocations(sb, lg,
5956						  order, lg_prealloc_count);
5957}
5958
5959/*
5960 * release all resource we used in allocation
5961 */
5962static void ext4_mb_release_context(struct ext4_allocation_context *ac)
5963{
5964	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5965	struct ext4_prealloc_space *pa = ac->ac_pa;
5966	if (pa) {
5967		if (pa->pa_type == MB_GROUP_PA) {
5968			/* see comment in ext4_mb_use_group_pa() */
5969			spin_lock(&pa->pa_lock);
5970			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5971			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5972			pa->pa_free -= ac->ac_b_ex.fe_len;
5973			pa->pa_len -= ac->ac_b_ex.fe_len;
5974			spin_unlock(&pa->pa_lock);
5975
5976			/*
5977			 * We want to add the pa to the right bucket.
5978			 * Remove it from the list and while adding
5979			 * make sure the list to which we are adding
5980			 * doesn't grow big.
5981			 */
5982			if (likely(pa->pa_free)) {
5983				spin_lock(pa->pa_node_lock.lg_lock);
5984				list_del_rcu(&pa->pa_node.lg_list);
5985				spin_unlock(pa->pa_node_lock.lg_lock);
5986				ext4_mb_add_n_trim(ac);
5987			}
5988		}
5989
5990		ext4_mb_put_pa(ac, ac->ac_sb, pa);
5991	}
5992	if (ac->ac_bitmap_page)
5993		put_page(ac->ac_bitmap_page);
5994	if (ac->ac_buddy_page)
5995		put_page(ac->ac_buddy_page);
5996	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5997		mutex_unlock(&ac->ac_lg->lg_mutex);
5998	ext4_mb_collect_stats(ac);
5999}
6000
6001static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
6002{
6003	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
6004	int ret;
6005	int freed = 0, busy = 0;
6006	int retry = 0;
6007
6008	trace_ext4_mb_discard_preallocations(sb, needed);
6009
6010	if (needed == 0)
6011		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
6012 repeat:
6013	for (i = 0; i < ngroups && needed > 0; i++) {
6014		ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
6015		freed += ret;
6016		needed -= ret;
6017		cond_resched();
6018	}
6019
6020	if (needed > 0 && busy && ++retry < 3) {
6021		busy = 0;
6022		goto repeat;
6023	}
6024
6025	return freed;
6026}
6027
6028static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
6029			struct ext4_allocation_context *ac, u64 *seq)
6030{
6031	int freed;
6032	u64 seq_retry = 0;
6033	bool ret = false;
6034
6035	freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
6036	if (freed) {
6037		ret = true;
6038		goto out_dbg;
6039	}
6040	seq_retry = ext4_get_discard_pa_seq_sum();
6041	if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
6042		ac->ac_flags |= EXT4_MB_STRICT_CHECK;
6043		*seq = seq_retry;
6044		ret = true;
6045	}
6046
6047out_dbg:
6048	mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
6049	return ret;
6050}
6051
6052/*
6053 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
6054 * linearly starting at the goal block and also excludes the blocks which
6055 * are going to be in use after fast commit replay.
6056 */
6057static ext4_fsblk_t
6058ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
6059{
6060	struct buffer_head *bitmap_bh;
6061	struct super_block *sb = ar->inode->i_sb;
6062	struct ext4_sb_info *sbi = EXT4_SB(sb);
6063	ext4_group_t group, nr;
6064	ext4_grpblk_t blkoff;
6065	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
6066	ext4_grpblk_t i = 0;
6067	ext4_fsblk_t goal, block;
6068	struct ext4_super_block *es = sbi->s_es;
6069
6070	goal = ar->goal;
6071	if (goal < le32_to_cpu(es->s_first_data_block) ||
6072			goal >= ext4_blocks_count(es))
6073		goal = le32_to_cpu(es->s_first_data_block);
6074
6075	ar->len = 0;
6076	ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
6077	for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
6078		bitmap_bh = ext4_read_block_bitmap(sb, group);
6079		if (IS_ERR(bitmap_bh)) {
6080			*errp = PTR_ERR(bitmap_bh);
6081			pr_warn("Failed to read block bitmap\n");
6082			return 0;
6083		}
6084
6085		while (1) {
6086			i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
6087						blkoff);
6088			if (i >= max)
6089				break;
6090			if (ext4_fc_replay_check_excluded(sb,
6091				ext4_group_first_block_no(sb, group) +
6092				EXT4_C2B(sbi, i))) {
6093				blkoff = i + 1;
6094			} else
6095				break;
6096		}
6097		brelse(bitmap_bh);
6098		if (i < max)
6099			break;
6100
6101		if (++group >= ext4_get_groups_count(sb))
6102			group = 0;
6103
6104		blkoff = 0;
6105	}
6106
6107	if (i >= max) {
6108		*errp = -ENOSPC;
6109		return 0;
6110	}
6111
6112	block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
6113	ext4_mb_mark_bb(sb, block, 1, true);
6114	ar->len = 1;
6115
6116	*errp = 0;
6117	return block;
6118}
6119
6120/*
6121 * Main entry point into mballoc to allocate blocks
6122 * it tries to use preallocation first, then falls back
6123 * to usual allocation
6124 */
6125ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
6126				struct ext4_allocation_request *ar, int *errp)
6127{
6128	struct ext4_allocation_context *ac = NULL;
6129	struct ext4_sb_info *sbi;
6130	struct super_block *sb;
6131	ext4_fsblk_t block = 0;
6132	unsigned int inquota = 0;
6133	unsigned int reserv_clstrs = 0;
6134	int retries = 0;
6135	u64 seq;
6136
6137	might_sleep();
6138	sb = ar->inode->i_sb;
6139	sbi = EXT4_SB(sb);
6140
6141	trace_ext4_request_blocks(ar);
6142	if (sbi->s_mount_state & EXT4_FC_REPLAY)
6143		return ext4_mb_new_blocks_simple(ar, errp);
6144
6145	/* Allow to use superuser reservation for quota file */
6146	if (ext4_is_quota_file(ar->inode))
6147		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
6148
6149	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
6150		/* Without delayed allocation we need to verify
6151		 * there is enough free blocks to do block allocation
6152		 * and verify allocation doesn't exceed the quota limits.
6153		 */
6154		while (ar->len &&
6155			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
6156
6157			/* let others to free the space */
6158			cond_resched();
6159			ar->len = ar->len >> 1;
6160		}
6161		if (!ar->len) {
6162			ext4_mb_show_pa(sb);
6163			*errp = -ENOSPC;
6164			return 0;
6165		}
6166		reserv_clstrs = ar->len;
6167		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
6168			dquot_alloc_block_nofail(ar->inode,
6169						 EXT4_C2B(sbi, ar->len));
6170		} else {
6171			while (ar->len &&
6172				dquot_alloc_block(ar->inode,
6173						  EXT4_C2B(sbi, ar->len))) {
6174
6175				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
6176				ar->len--;
6177			}
6178		}
6179		inquota = ar->len;
6180		if (ar->len == 0) {
6181			*errp = -EDQUOT;
6182			goto out;
6183		}
6184	}
6185
6186	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
6187	if (!ac) {
6188		ar->len = 0;
6189		*errp = -ENOMEM;
6190		goto out;
6191	}
6192
6193	ext4_mb_initialize_context(ac, ar);
6194
6195	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
6196	seq = this_cpu_read(discard_pa_seq);
6197	if (!ext4_mb_use_preallocated(ac)) {
6198		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
6199		ext4_mb_normalize_request(ac, ar);
6200
6201		*errp = ext4_mb_pa_alloc(ac);
6202		if (*errp)
6203			goto errout;
6204repeat:
6205		/* allocate space in core */
6206		*errp = ext4_mb_regular_allocator(ac);
6207		/*
6208		 * pa allocated above is added to grp->bb_prealloc_list only
6209		 * when we were able to allocate some block i.e. when
6210		 * ac->ac_status == AC_STATUS_FOUND.
6211		 * And error from above mean ac->ac_status != AC_STATUS_FOUND
6212		 * So we have to free this pa here itself.
6213		 */
6214		if (*errp) {
6215			ext4_mb_pa_put_free(ac);
6216			ext4_discard_allocated_blocks(ac);
6217			goto errout;
6218		}
6219		if (ac->ac_status == AC_STATUS_FOUND &&
6220			ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
6221			ext4_mb_pa_put_free(ac);
6222	}
6223	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
6224		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
6225		if (*errp) {
6226			ext4_discard_allocated_blocks(ac);
6227			goto errout;
6228		} else {
6229			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
6230			ar->len = ac->ac_b_ex.fe_len;
6231		}
6232	} else {
6233		if (++retries < 3 &&
6234		    ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
6235			goto repeat;
6236		/*
6237		 * If block allocation fails then the pa allocated above
6238		 * needs to be freed here itself.
6239		 */
6240		ext4_mb_pa_put_free(ac);
6241		*errp = -ENOSPC;
6242	}
6243
6244	if (*errp) {
6245errout:
6246		ac->ac_b_ex.fe_len = 0;
6247		ar->len = 0;
6248		ext4_mb_show_ac(ac);
6249	}
6250	ext4_mb_release_context(ac);
6251	kmem_cache_free(ext4_ac_cachep, ac);
6252out:
6253	if (inquota && ar->len < inquota)
6254		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
6255	if (!ar->len) {
6256		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
6257			/* release all the reserved blocks if non delalloc */
6258			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
6259						reserv_clstrs);
6260	}
6261
6262	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
6263
6264	return block;
6265}
6266
6267/*
6268 * We can merge two free data extents only if the physical blocks
6269 * are contiguous, AND the extents were freed by the same transaction,
6270 * AND the blocks are associated with the same group.
6271 */
6272static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
6273					struct ext4_free_data *entry,
6274					struct ext4_free_data *new_entry,
6275					struct rb_root *entry_rb_root)
6276{
6277	if ((entry->efd_tid != new_entry->efd_tid) ||
6278	    (entry->efd_group != new_entry->efd_group))
6279		return;
6280	if (entry->efd_start_cluster + entry->efd_count ==
6281	    new_entry->efd_start_cluster) {
6282		new_entry->efd_start_cluster = entry->efd_start_cluster;
6283		new_entry->efd_count += entry->efd_count;
6284	} else if (new_entry->efd_start_cluster + new_entry->efd_count ==
6285		   entry->efd_start_cluster) {
6286		new_entry->efd_count += entry->efd_count;
6287	} else
6288		return;
6289	spin_lock(&sbi->s_md_lock);
6290	list_del(&entry->efd_list);
6291	spin_unlock(&sbi->s_md_lock);
6292	rb_erase(&entry->efd_node, entry_rb_root);
6293	kmem_cache_free(ext4_free_data_cachep, entry);
6294}
6295
6296static noinline_for_stack void
6297ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
6298		      struct ext4_free_data *new_entry)
6299{
6300	ext4_group_t group = e4b->bd_group;
6301	ext4_grpblk_t cluster;
6302	ext4_grpblk_t clusters = new_entry->efd_count;
6303	struct ext4_free_data *entry;
6304	struct ext4_group_info *db = e4b->bd_info;
6305	struct super_block *sb = e4b->bd_sb;
6306	struct ext4_sb_info *sbi = EXT4_SB(sb);
6307	struct rb_node **n = &db->bb_free_root.rb_node, *node;
6308	struct rb_node *parent = NULL, *new_node;
6309
6310	BUG_ON(!ext4_handle_valid(handle));
6311	BUG_ON(e4b->bd_bitmap_page == NULL);
6312	BUG_ON(e4b->bd_buddy_page == NULL);
6313
6314	new_node = &new_entry->efd_node;
6315	cluster = new_entry->efd_start_cluster;
6316
6317	if (!*n) {
6318		/* first free block exent. We need to
6319		   protect buddy cache from being freed,
6320		 * otherwise we'll refresh it from
6321		 * on-disk bitmap and lose not-yet-available
6322		 * blocks */
6323		get_page(e4b->bd_buddy_page);
6324		get_page(e4b->bd_bitmap_page);
6325	}
6326	while (*n) {
6327		parent = *n;
6328		entry = rb_entry(parent, struct ext4_free_data, efd_node);
6329		if (cluster < entry->efd_start_cluster)
6330			n = &(*n)->rb_left;
6331		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
6332			n = &(*n)->rb_right;
6333		else {
6334			ext4_grp_locked_error(sb, group, 0,
6335				ext4_group_first_block_no(sb, group) +
6336				EXT4_C2B(sbi, cluster),
6337				"Block already on to-be-freed list");
6338			kmem_cache_free(ext4_free_data_cachep, new_entry);
6339			return;
6340		}
6341	}
6342
6343	rb_link_node(new_node, parent, n);
6344	rb_insert_color(new_node, &db->bb_free_root);
6345
6346	/* Now try to see the extent can be merged to left and right */
6347	node = rb_prev(new_node);
6348	if (node) {
6349		entry = rb_entry(node, struct ext4_free_data, efd_node);
6350		ext4_try_merge_freed_extent(sbi, entry, new_entry,
6351					    &(db->bb_free_root));
6352	}
6353
6354	node = rb_next(new_node);
6355	if (node) {
6356		entry = rb_entry(node, struct ext4_free_data, efd_node);
6357		ext4_try_merge_freed_extent(sbi, entry, new_entry,
6358					    &(db->bb_free_root));
6359	}
6360
6361	spin_lock(&sbi->s_md_lock);
6362	list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]);
6363	sbi->s_mb_free_pending += clusters;
6364	spin_unlock(&sbi->s_md_lock);
6365}
6366
6367static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
6368					unsigned long count)
6369{
6370	struct super_block *sb = inode->i_sb;
6371	ext4_group_t group;
6372	ext4_grpblk_t blkoff;
6373
6374	ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
6375	ext4_mb_mark_context(NULL, sb, false, group, blkoff, count,
6376			     EXT4_MB_BITMAP_MARKED_CHECK |
6377			     EXT4_MB_SYNC_UPDATE,
6378			     NULL);
6379}
6380
6381/**
6382 * ext4_mb_clear_bb() -- helper function for freeing blocks.
6383 *			Used by ext4_free_blocks()
6384 * @handle:		handle for this transaction
6385 * @inode:		inode
6386 * @block:		starting physical block to be freed
6387 * @count:		number of blocks to be freed
6388 * @flags:		flags used by ext4_free_blocks
6389 */
6390static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
6391			       ext4_fsblk_t block, unsigned long count,
6392			       int flags)
6393{
6394	struct super_block *sb = inode->i_sb;
6395	struct ext4_group_info *grp;
6396	unsigned int overflow;
6397	ext4_grpblk_t bit;
6398	ext4_group_t block_group;
6399	struct ext4_sb_info *sbi;
6400	struct ext4_buddy e4b;
6401	unsigned int count_clusters;
6402	int err = 0;
6403	int mark_flags = 0;
6404	ext4_grpblk_t changed;
6405
6406	sbi = EXT4_SB(sb);
6407
6408	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6409	    !ext4_inode_block_valid(inode, block, count)) {
6410		ext4_error(sb, "Freeing blocks in system zone - "
6411			   "Block = %llu, count = %lu", block, count);
6412		/* err = 0. ext4_std_error should be a no op */
6413		goto error_out;
6414	}
6415	flags |= EXT4_FREE_BLOCKS_VALIDATED;
6416
6417do_more:
6418	overflow = 0;
6419	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6420
6421	grp = ext4_get_group_info(sb, block_group);
6422	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
6423		return;
6424
6425	/*
6426	 * Check to see if we are freeing blocks across a group
6427	 * boundary.
6428	 */
6429	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
6430		overflow = EXT4_C2B(sbi, bit) + count -
6431			EXT4_BLOCKS_PER_GROUP(sb);
6432		count -= overflow;
6433		/* The range changed so it's no longer validated */
6434		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6435	}
6436	count_clusters = EXT4_NUM_B2C(sbi, count);
6437	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6438
6439	/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6440	err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6441				     GFP_NOFS|__GFP_NOFAIL);
6442	if (err)
6443		goto error_out;
6444
6445	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6446	    !ext4_inode_block_valid(inode, block, count)) {
6447		ext4_error(sb, "Freeing blocks in system zone - "
6448			   "Block = %llu, count = %lu", block, count);
6449		/* err = 0. ext4_std_error should be a no op */
6450		goto error_clean;
6451	}
6452
6453#ifdef AGGRESSIVE_CHECK
6454	mark_flags |= EXT4_MB_BITMAP_MARKED_CHECK;
6455#endif
6456	err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
6457				   count_clusters, mark_flags, &changed);
6458
6459
6460	if (err && changed == 0)
6461		goto error_clean;
6462
6463#ifdef AGGRESSIVE_CHECK
6464	BUG_ON(changed != count_clusters);
6465#endif
6466
6467	/*
6468	 * We need to make sure we don't reuse the freed block until after the
6469	 * transaction is committed. We make an exception if the inode is to be
6470	 * written in writeback mode since writeback mode has weak data
6471	 * consistency guarantees.
6472	 */
6473	if (ext4_handle_valid(handle) &&
6474	    ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6475	     !ext4_should_writeback_data(inode))) {
6476		struct ext4_free_data *new_entry;
6477		/*
6478		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6479		 * to fail.
6480		 */
6481		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6482				GFP_NOFS|__GFP_NOFAIL);
6483		new_entry->efd_start_cluster = bit;
6484		new_entry->efd_group = block_group;
6485		new_entry->efd_count = count_clusters;
6486		new_entry->efd_tid = handle->h_transaction->t_tid;
6487
6488		ext4_lock_group(sb, block_group);
6489		ext4_mb_free_metadata(handle, &e4b, new_entry);
6490	} else {
6491		if (test_opt(sb, DISCARD)) {
6492			err = ext4_issue_discard(sb, block_group, bit,
6493						 count_clusters);
6494			/*
6495			 * Ignore EOPNOTSUPP error. This is consistent with
6496			 * what happens when using journal.
6497			 */
6498			if (err == -EOPNOTSUPP)
6499				err = 0;
6500			if (err)
6501				ext4_msg(sb, KERN_WARNING, "discard request in"
6502					 " group:%u block:%d count:%lu failed"
6503					 " with %d", block_group, bit, count,
6504					 err);
6505		} else
6506			EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
 
6507
6508		ext4_lock_group(sb, block_group);
6509		mb_free_blocks(inode, &e4b, bit, count_clusters);
6510	}
6511
6512	ext4_unlock_group(sb, block_group);
6513
6514	/*
6515	 * on a bigalloc file system, defer the s_freeclusters_counter
6516	 * update to the caller (ext4_remove_space and friends) so they
6517	 * can determine if a cluster freed here should be rereserved
6518	 */
6519	if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6520		if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6521			dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6522		percpu_counter_add(&sbi->s_freeclusters_counter,
6523				   count_clusters);
6524	}
6525
6526	if (overflow && !err) {
6527		block += count;
6528		count = overflow;
6529		ext4_mb_unload_buddy(&e4b);
6530		/* The range changed so it's no longer validated */
6531		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6532		goto do_more;
6533	}
6534
6535error_clean:
6536	ext4_mb_unload_buddy(&e4b);
6537error_out:
6538	ext4_std_error(sb, err);
6539}
6540
6541/**
6542 * ext4_free_blocks() -- Free given blocks and update quota
6543 * @handle:		handle for this transaction
6544 * @inode:		inode
6545 * @bh:			optional buffer of the block to be freed
6546 * @block:		starting physical block to be freed
6547 * @count:		number of blocks to be freed
6548 * @flags:		flags used by ext4_free_blocks
6549 */
6550void ext4_free_blocks(handle_t *handle, struct inode *inode,
6551		      struct buffer_head *bh, ext4_fsblk_t block,
6552		      unsigned long count, int flags)
6553{
6554	struct super_block *sb = inode->i_sb;
6555	unsigned int overflow;
6556	struct ext4_sb_info *sbi;
6557
6558	sbi = EXT4_SB(sb);
6559
6560	if (bh) {
6561		if (block)
6562			BUG_ON(block != bh->b_blocknr);
6563		else
6564			block = bh->b_blocknr;
6565	}
6566
6567	if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6568		ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
6569		return;
6570	}
6571
6572	might_sleep();
6573
6574	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6575	    !ext4_inode_block_valid(inode, block, count)) {
6576		ext4_error(sb, "Freeing blocks not in datazone - "
6577			   "block = %llu, count = %lu", block, count);
6578		return;
6579	}
6580	flags |= EXT4_FREE_BLOCKS_VALIDATED;
6581
6582	ext4_debug("freeing block %llu\n", block);
6583	trace_ext4_free_blocks(inode, block, count, flags);
6584
6585	if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6586		BUG_ON(count > 1);
6587
6588		ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6589			    inode, bh, block);
6590	}
6591
6592	/*
6593	 * If the extent to be freed does not begin on a cluster
6594	 * boundary, we need to deal with partial clusters at the
6595	 * beginning and end of the extent.  Normally we will free
6596	 * blocks at the beginning or the end unless we are explicitly
6597	 * requested to avoid doing so.
6598	 */
6599	overflow = EXT4_PBLK_COFF(sbi, block);
6600	if (overflow) {
6601		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6602			overflow = sbi->s_cluster_ratio - overflow;
6603			block += overflow;
6604			if (count > overflow)
6605				count -= overflow;
6606			else
6607				return;
6608		} else {
6609			block -= overflow;
6610			count += overflow;
6611		}
6612		/* The range changed so it's no longer validated */
6613		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6614	}
6615	overflow = EXT4_LBLK_COFF(sbi, count);
6616	if (overflow) {
6617		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6618			if (count > overflow)
6619				count -= overflow;
6620			else
6621				return;
6622		} else
6623			count += sbi->s_cluster_ratio - overflow;
6624		/* The range changed so it's no longer validated */
6625		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6626	}
6627
6628	if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6629		int i;
6630		int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6631
6632		for (i = 0; i < count; i++) {
6633			cond_resched();
6634			if (is_metadata)
6635				bh = sb_find_get_block(inode->i_sb, block + i);
6636			ext4_forget(handle, is_metadata, inode, bh, block + i);
6637		}
6638	}
6639
6640	ext4_mb_clear_bb(handle, inode, block, count, flags);
6641}
6642
6643/**
6644 * ext4_group_add_blocks() -- Add given blocks to an existing group
6645 * @handle:			handle to this transaction
6646 * @sb:				super block
6647 * @block:			start physical block to add to the block group
6648 * @count:			number of blocks to free
6649 *
6650 * This marks the blocks as free in the bitmap and buddy.
6651 */
6652int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6653			 ext4_fsblk_t block, unsigned long count)
6654{
6655	ext4_group_t block_group;
6656	ext4_grpblk_t bit;
6657	struct ext4_sb_info *sbi = EXT4_SB(sb);
6658	struct ext4_buddy e4b;
6659	int err = 0;
6660	ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6661	ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6662	unsigned long cluster_count = last_cluster - first_cluster + 1;
6663	ext4_grpblk_t changed;
6664
6665	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6666
6667	if (cluster_count == 0)
6668		return 0;
6669
6670	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6671	/*
6672	 * Check to see if we are freeing blocks across a group
6673	 * boundary.
6674	 */
6675	if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6676		ext4_warning(sb, "too many blocks added to group %u",
6677			     block_group);
6678		err = -EINVAL;
6679		goto error_out;
6680	}
6681
6682	err = ext4_mb_load_buddy(sb, block_group, &e4b);
6683	if (err)
6684		goto error_out;
6685
6686	if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6687		ext4_error(sb, "Adding blocks in system zones - "
6688			   "Block = %llu, count = %lu",
6689			   block, count);
6690		err = -EINVAL;
6691		goto error_clean;
6692	}
6693
6694	err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
6695				   cluster_count, EXT4_MB_BITMAP_MARKED_CHECK,
6696				   &changed);
6697	if (err && changed == 0)
6698		goto error_clean;
6699
6700	if (changed != cluster_count)
6701		ext4_error(sb, "bit already cleared in group %u", block_group);
6702
6703	ext4_lock_group(sb, block_group);
6704	mb_free_blocks(NULL, &e4b, bit, cluster_count);
6705	ext4_unlock_group(sb, block_group);
6706	percpu_counter_add(&sbi->s_freeclusters_counter,
6707			   changed);
6708
6709error_clean:
6710	ext4_mb_unload_buddy(&e4b);
6711error_out:
6712	ext4_std_error(sb, err);
6713	return err;
6714}
6715
6716/**
6717 * ext4_trim_extent -- function to TRIM one single free extent in the group
6718 * @sb:		super block for the file system
6719 * @start:	starting block of the free extent in the alloc. group
6720 * @count:	number of blocks to TRIM
6721 * @e4b:	ext4 buddy for the group
6722 *
6723 * Trim "count" blocks starting at "start" in the "group". To assure that no
6724 * one will allocate those blocks, mark it as used in buddy bitmap. This must
6725 * be called with under the group lock.
6726 */
6727static int ext4_trim_extent(struct super_block *sb,
6728		int start, int count, struct ext4_buddy *e4b)
6729__releases(bitlock)
6730__acquires(bitlock)
6731{
6732	struct ext4_free_extent ex;
6733	ext4_group_t group = e4b->bd_group;
6734	int ret = 0;
6735
6736	trace_ext4_trim_extent(sb, group, start, count);
6737
6738	assert_spin_locked(ext4_group_lock_ptr(sb, group));
6739
6740	ex.fe_start = start;
6741	ex.fe_group = group;
6742	ex.fe_len = count;
6743
6744	/*
6745	 * Mark blocks used, so no one can reuse them while
6746	 * being trimmed.
6747	 */
6748	mb_mark_used(e4b, &ex);
6749	ext4_unlock_group(sb, group);
6750	ret = ext4_issue_discard(sb, group, start, count);
6751	ext4_lock_group(sb, group);
6752	mb_free_blocks(NULL, e4b, start, ex.fe_len);
6753	return ret;
6754}
6755
6756static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
6757					   ext4_group_t grp)
6758{
6759	unsigned long nr_clusters_in_group;
6760
6761	if (grp < (ext4_get_groups_count(sb) - 1))
6762		nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
6763	else
6764		nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
6765					ext4_group_first_block_no(sb, grp))
6766				       >> EXT4_CLUSTER_BITS(sb);
6767
6768	return nr_clusters_in_group - 1;
6769}
6770
6771static bool ext4_trim_interrupted(void)
6772{
6773	return fatal_signal_pending(current) || freezing(current);
6774}
6775
6776static int ext4_try_to_trim_range(struct super_block *sb,
6777		struct ext4_buddy *e4b, ext4_grpblk_t start,
6778		ext4_grpblk_t max, ext4_grpblk_t minblocks)
6779__acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6780__releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6781{
6782	ext4_grpblk_t next, count, free_count, last, origin_start;
6783	bool set_trimmed = false;
6784	void *bitmap;
6785
6786	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
6787		return 0;
6788
6789	last = ext4_last_grp_cluster(sb, e4b->bd_group);
6790	bitmap = e4b->bd_bitmap;
6791	if (start == 0 && max >= last)
6792		set_trimmed = true;
6793	origin_start = start;
6794	start = max(e4b->bd_info->bb_first_free, start);
6795	count = 0;
6796	free_count = 0;
6797
6798	while (start <= max) {
6799		start = mb_find_next_zero_bit(bitmap, max + 1, start);
6800		if (start > max)
6801			break;
6802
6803		next = mb_find_next_bit(bitmap, last + 1, start);
6804		if (origin_start == 0 && next >= last)
6805			set_trimmed = true;
6806
6807		if ((next - start) >= minblocks) {
6808			int ret = ext4_trim_extent(sb, start, next - start, e4b);
6809
6810			if (ret && ret != -EOPNOTSUPP)
6811				return count;
6812			count += next - start;
6813		}
6814		free_count += next - start;
6815		start = next + 1;
6816
6817		if (ext4_trim_interrupted())
6818			return count;
6819
6820		if (need_resched()) {
6821			ext4_unlock_group(sb, e4b->bd_group);
6822			cond_resched();
6823			ext4_lock_group(sb, e4b->bd_group);
6824		}
6825
6826		if ((e4b->bd_info->bb_free - free_count) < minblocks)
6827			break;
6828	}
6829
6830	if (set_trimmed)
6831		EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
6832
6833	return count;
6834}
6835
6836/**
6837 * ext4_trim_all_free -- function to trim all free space in alloc. group
6838 * @sb:			super block for file system
6839 * @group:		group to be trimmed
6840 * @start:		first group block to examine
6841 * @max:		last group block to examine
6842 * @minblocks:		minimum extent block count
6843 *
6844 * ext4_trim_all_free walks through group's block bitmap searching for free
6845 * extents. When the free extent is found, mark it as used in group buddy
6846 * bitmap. Then issue a TRIM command on this extent and free the extent in
6847 * the group buddy bitmap.
6848 */
6849static ext4_grpblk_t
6850ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6851		   ext4_grpblk_t start, ext4_grpblk_t max,
6852		   ext4_grpblk_t minblocks)
6853{
6854	struct ext4_buddy e4b;
6855	int ret;
6856
6857	trace_ext4_trim_all_free(sb, group, start, max);
6858
6859	ret = ext4_mb_load_buddy(sb, group, &e4b);
6860	if (ret) {
6861		ext4_warning(sb, "Error %d loading buddy information for %u",
6862			     ret, group);
6863		return ret;
6864	}
6865
6866	ext4_lock_group(sb, group);
6867
6868	if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6869	    minblocks < EXT4_SB(sb)->s_last_trim_minblks)
6870		ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6871	else
6872		ret = 0;
6873
6874	ext4_unlock_group(sb, group);
6875	ext4_mb_unload_buddy(&e4b);
6876
6877	ext4_debug("trimmed %d blocks in the group %d\n",
6878		ret, group);
6879
6880	return ret;
6881}
6882
6883/**
6884 * ext4_trim_fs() -- trim ioctl handle function
6885 * @sb:			superblock for filesystem
6886 * @range:		fstrim_range structure
6887 *
6888 * start:	First Byte to trim
6889 * len:		number of Bytes to trim from start
6890 * minlen:	minimum extent length in Bytes
6891 * ext4_trim_fs goes through all allocation groups containing Bytes from
6892 * start to start+len. For each such a group ext4_trim_all_free function
6893 * is invoked to trim all free space.
6894 */
6895int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6896{
6897	unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
6898	struct ext4_group_info *grp;
6899	ext4_group_t group, first_group, last_group;
6900	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6901	uint64_t start, end, minlen, trimmed = 0;
6902	ext4_fsblk_t first_data_blk =
6903			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6904	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6905	int ret = 0;
6906
6907	start = range->start >> sb->s_blocksize_bits;
6908	end = start + (range->len >> sb->s_blocksize_bits) - 1;
6909	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6910			      range->minlen >> sb->s_blocksize_bits);
6911
6912	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6913	    start >= max_blks ||
6914	    range->len < sb->s_blocksize)
6915		return -EINVAL;
6916	/* No point to try to trim less than discard granularity */
6917	if (range->minlen < discard_granularity) {
6918		minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6919				discard_granularity >> sb->s_blocksize_bits);
6920		if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6921			goto out;
6922	}
6923	if (end >= max_blks - 1)
6924		end = max_blks - 1;
6925	if (end <= first_data_blk)
6926		goto out;
6927	if (start < first_data_blk)
6928		start = first_data_blk;
6929
6930	/* Determine first and last group to examine based on start and end */
6931	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6932				     &first_group, &first_cluster);
6933	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6934				     &last_group, &last_cluster);
6935
6936	/* end now represents the last cluster to discard in this group */
6937	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6938
6939	for (group = first_group; group <= last_group; group++) {
6940		if (ext4_trim_interrupted())
6941			break;
6942		grp = ext4_get_group_info(sb, group);
6943		if (!grp)
6944			continue;
6945		/* We only do this if the grp has never been initialized */
6946		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6947			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6948			if (ret)
6949				break;
6950		}
6951
6952		/*
6953		 * For all the groups except the last one, last cluster will
6954		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6955		 * change it for the last group, note that last_cluster is
6956		 * already computed earlier by ext4_get_group_no_and_offset()
6957		 */
6958		if (group == last_group)
6959			end = last_cluster;
6960		if (grp->bb_free >= minlen) {
6961			cnt = ext4_trim_all_free(sb, group, first_cluster,
6962						 end, minlen);
6963			if (cnt < 0) {
6964				ret = cnt;
6965				break;
6966			}
6967			trimmed += cnt;
6968		}
6969
6970		/*
6971		 * For every group except the first one, we are sure
6972		 * that the first cluster to discard will be cluster #0.
6973		 */
6974		first_cluster = 0;
6975	}
6976
6977	if (!ret)
6978		EXT4_SB(sb)->s_last_trim_minblks = minlen;
6979
6980out:
6981	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6982	return ret;
6983}
6984
6985/* Iterate all the free extents in the group. */
6986int
6987ext4_mballoc_query_range(
6988	struct super_block		*sb,
6989	ext4_group_t			group,
6990	ext4_grpblk_t			start,
6991	ext4_grpblk_t			end,
 
6992	ext4_mballoc_query_range_fn	formatter,
6993	void				*priv)
6994{
6995	void				*bitmap;
6996	ext4_grpblk_t			next;
6997	struct ext4_buddy		e4b;
6998	int				error;
6999
7000	error = ext4_mb_load_buddy(sb, group, &e4b);
7001	if (error)
7002		return error;
7003	bitmap = e4b.bd_bitmap;
7004
7005	ext4_lock_group(sb, group);
7006
7007	start = max(e4b.bd_info->bb_first_free, start);
7008	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
7009		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
7010
 
 
 
 
 
 
 
 
 
7011	while (start <= end) {
7012		start = mb_find_next_zero_bit(bitmap, end + 1, start);
7013		if (start > end)
7014			break;
7015		next = mb_find_next_bit(bitmap, end + 1, start);
7016
7017		ext4_unlock_group(sb, group);
7018		error = formatter(sb, group, start, next - start, priv);
7019		if (error)
7020			goto out_unload;
7021		ext4_lock_group(sb, group);
7022
7023		start = next + 1;
7024	}
7025
7026	ext4_unlock_group(sb, group);
7027out_unload:
7028	ext4_mb_unload_buddy(&e4b);
7029
7030	return error;
7031}
7032
7033#ifdef CONFIG_EXT4_KUNIT_TESTS
7034#include "mballoc-test.c"
7035#endif
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
   4 * Written by Alex Tomas <alex@clusterfs.com>
   5 */
   6
   7
   8/*
   9 * mballoc.c contains the multiblocks allocation routines
  10 */
  11
  12#include "ext4_jbd2.h"
  13#include "mballoc.h"
  14#include <linux/log2.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/nospec.h>
  18#include <linux/backing-dev.h>
  19#include <linux/freezer.h>
  20#include <trace/events/ext4.h>
  21#include <kunit/static_stub.h>
  22
  23/*
  24 * MUSTDO:
  25 *   - test ext4_ext_search_left() and ext4_ext_search_right()
  26 *   - search for metadata in few groups
  27 *
  28 * TODO v4:
  29 *   - normalization should take into account whether file is still open
  30 *   - discard preallocations if no free space left (policy?)
  31 *   - don't normalize tails
  32 *   - quota
  33 *   - reservation for superuser
  34 *
  35 * TODO v3:
  36 *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
  37 *   - track min/max extents in each group for better group selection
  38 *   - mb_mark_used() may allocate chunk right after splitting buddy
  39 *   - tree of groups sorted by number of free blocks
  40 *   - error handling
  41 */
  42
  43/*
  44 * The allocation request involve request for multiple number of blocks
  45 * near to the goal(block) value specified.
  46 *
  47 * During initialization phase of the allocator we decide to use the
  48 * group preallocation or inode preallocation depending on the size of
  49 * the file. The size of the file could be the resulting file size we
  50 * would have after allocation, or the current file size, which ever
  51 * is larger. If the size is less than sbi->s_mb_stream_request we
  52 * select to use the group preallocation. The default value of
  53 * s_mb_stream_request is 16 blocks. This can also be tuned via
  54 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
  55 * terms of number of blocks.
  56 *
  57 * The main motivation for having small file use group preallocation is to
  58 * ensure that we have small files closer together on the disk.
  59 *
  60 * First stage the allocator looks at the inode prealloc list,
  61 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
  62 * spaces for this particular inode. The inode prealloc space is
  63 * represented as:
  64 *
  65 * pa_lstart -> the logical start block for this prealloc space
  66 * pa_pstart -> the physical start block for this prealloc space
  67 * pa_len    -> length for this prealloc space (in clusters)
  68 * pa_free   ->  free space available in this prealloc space (in clusters)
  69 *
  70 * The inode preallocation space is used looking at the _logical_ start
  71 * block. If only the logical file block falls within the range of prealloc
  72 * space we will consume the particular prealloc space. This makes sure that
  73 * we have contiguous physical blocks representing the file blocks
  74 *
  75 * The important thing to be noted in case of inode prealloc space is that
  76 * we don't modify the values associated to inode prealloc space except
  77 * pa_free.
  78 *
  79 * If we are not able to find blocks in the inode prealloc space and if we
  80 * have the group allocation flag set then we look at the locality group
  81 * prealloc space. These are per CPU prealloc list represented as
  82 *
  83 * ext4_sb_info.s_locality_groups[smp_processor_id()]
  84 *
  85 * The reason for having a per cpu locality group is to reduce the contention
  86 * between CPUs. It is possible to get scheduled at this point.
  87 *
  88 * The locality group prealloc space is used looking at whether we have
  89 * enough free space (pa_free) within the prealloc space.
  90 *
  91 * If we can't allocate blocks via inode prealloc or/and locality group
  92 * prealloc then we look at the buddy cache. The buddy cache is represented
  93 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
  94 * mapped to the buddy and bitmap information regarding different
  95 * groups. The buddy information is attached to buddy cache inode so that
  96 * we can access them through the page cache. The information regarding
  97 * each group is loaded via ext4_mb_load_buddy.  The information involve
  98 * block bitmap and buddy information. The information are stored in the
  99 * inode as:
 100 *
 101 *  {                        page                        }
 102 *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
 103 *
 104 *
 105 * one block each for bitmap and buddy information.  So for each group we
 106 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
 107 * blocksize) blocks.  So it can have information regarding groups_per_page
 108 * which is blocks_per_page/2
 109 *
 110 * The buddy cache inode is not stored on disk. The inode is thrown
 111 * away when the filesystem is unmounted.
 112 *
 113 * We look for count number of blocks in the buddy cache. If we were able
 114 * to locate that many free blocks we return with additional information
 115 * regarding rest of the contiguous physical block available
 116 *
 117 * Before allocating blocks via buddy cache we normalize the request
 118 * blocks. This ensure we ask for more blocks that we needed. The extra
 119 * blocks that we get after allocation is added to the respective prealloc
 120 * list. In case of inode preallocation we follow a list of heuristics
 121 * based on file size. This can be found in ext4_mb_normalize_request. If
 122 * we are doing a group prealloc we try to normalize the request to
 123 * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
 124 * dependent on the cluster size; for non-bigalloc file systems, it is
 125 * 512 blocks. This can be tuned via
 126 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
 127 * terms of number of blocks. If we have mounted the file system with -O
 128 * stripe=<value> option the group prealloc request is normalized to the
 129 * smallest multiple of the stripe value (sbi->s_stripe) which is
 130 * greater than the default mb_group_prealloc.
 131 *
 132 * If "mb_optimize_scan" mount option is set, we maintain in memory group info
 133 * structures in two data structures:
 134 *
 135 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
 136 *
 137 *    Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
 138 *
 139 *    This is an array of lists where the index in the array represents the
 140 *    largest free order in the buddy bitmap of the participating group infos of
 141 *    that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
 142 *    number of buddy bitmap orders possible) number of lists. Group-infos are
 143 *    placed in appropriate lists.
 144 *
 145 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
 146 *
 147 *    Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
 148 *
 149 *    This is an array of lists where in the i-th list there are groups with
 150 *    average fragment size >= 2^i and < 2^(i+1). The average fragment size
 151 *    is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
 152 *    Note that we don't bother with a special list for completely empty groups
 153 *    so we only have MB_NUM_ORDERS(sb) lists.
 154 *
 155 * When "mb_optimize_scan" mount option is set, mballoc consults the above data
 156 * structures to decide the order in which groups are to be traversed for
 157 * fulfilling an allocation request.
 158 *
 159 * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order
 160 * >= the order of the request. We directly look at the largest free order list
 161 * in the data structure (1) above where largest_free_order = order of the
 162 * request. If that list is empty, we look at remaining list in the increasing
 163 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED
 164 * lookup in O(1) time.
 165 *
 166 * At CR_GOAL_LEN_FAST, we only consider groups where
 167 * average fragment size > request size. So, we lookup a group which has average
 168 * fragment size just above or equal to request size using our average fragment
 169 * size group lists (data structure 2) in O(1) time.
 170 *
 171 * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied
 172 * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in
 173 * CR_GOAL_LEN_FAST suggests that there is no BG that has avg
 174 * fragment size > goal length. So before falling to the slower
 175 * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and
 176 * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big
 177 * enough average fragment size. This increases the chances of finding a
 178 * suitable block group in O(1) time and results in faster allocation at the
 179 * cost of reduced size of allocation.
 180 *
 181 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
 182 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and
 183 * CR_GOAL_LEN_FAST phase.
 184 *
 185 * The regular allocator (using the buddy cache) supports a few tunables.
 186 *
 187 * /sys/fs/ext4/<partition>/mb_min_to_scan
 188 * /sys/fs/ext4/<partition>/mb_max_to_scan
 189 * /sys/fs/ext4/<partition>/mb_order2_req
 190 * /sys/fs/ext4/<partition>/mb_linear_limit
 191 *
 192 * The regular allocator uses buddy scan only if the request len is power of
 193 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
 194 * value of s_mb_order2_reqs can be tuned via
 195 * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
 196 * stripe size (sbi->s_stripe), we try to search for contiguous block in
 197 * stripe size. This should result in better allocation on RAID setups. If
 198 * not, we search in the specific group using bitmap for best extents. The
 199 * tunable min_to_scan and max_to_scan control the behaviour here.
 200 * min_to_scan indicate how long the mballoc __must__ look for a best
 201 * extent and max_to_scan indicates how long the mballoc __can__ look for a
 202 * best extent in the found extents. Searching for the blocks starts with
 203 * the group specified as the goal value in allocation context via
 204 * ac_g_ex. Each group is first checked based on the criteria whether it
 205 * can be used for allocation. ext4_mb_good_group explains how the groups are
 206 * checked.
 207 *
 208 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
 209 * get traversed linearly. That may result in subsequent allocations being not
 210 * close to each other. And so, the underlying device may get filled up in a
 211 * non-linear fashion. While that may not matter on non-rotational devices, for
 212 * rotational devices that may result in higher seek times. "mb_linear_limit"
 213 * tells mballoc how many groups mballoc should search linearly before
 214 * performing consulting above data structures for more efficient lookups. For
 215 * non rotational devices, this value defaults to 0 and for rotational devices
 216 * this is set to MB_DEFAULT_LINEAR_LIMIT.
 217 *
 218 * Both the prealloc space are getting populated as above. So for the first
 219 * request we will hit the buddy cache which will result in this prealloc
 220 * space getting filled. The prealloc space is then later used for the
 221 * subsequent request.
 222 */
 223
 224/*
 225 * mballoc operates on the following data:
 226 *  - on-disk bitmap
 227 *  - in-core buddy (actually includes buddy and bitmap)
 228 *  - preallocation descriptors (PAs)
 229 *
 230 * there are two types of preallocations:
 231 *  - inode
 232 *    assiged to specific inode and can be used for this inode only.
 233 *    it describes part of inode's space preallocated to specific
 234 *    physical blocks. any block from that preallocated can be used
 235 *    independent. the descriptor just tracks number of blocks left
 236 *    unused. so, before taking some block from descriptor, one must
 237 *    make sure corresponded logical block isn't allocated yet. this
 238 *    also means that freeing any block within descriptor's range
 239 *    must discard all preallocated blocks.
 240 *  - locality group
 241 *    assigned to specific locality group which does not translate to
 242 *    permanent set of inodes: inode can join and leave group. space
 243 *    from this type of preallocation can be used for any inode. thus
 244 *    it's consumed from the beginning to the end.
 245 *
 246 * relation between them can be expressed as:
 247 *    in-core buddy = on-disk bitmap + preallocation descriptors
 248 *
 249 * this mean blocks mballoc considers used are:
 250 *  - allocated blocks (persistent)
 251 *  - preallocated blocks (non-persistent)
 252 *
 253 * consistency in mballoc world means that at any time a block is either
 254 * free or used in ALL structures. notice: "any time" should not be read
 255 * literally -- time is discrete and delimited by locks.
 256 *
 257 *  to keep it simple, we don't use block numbers, instead we count number of
 258 *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
 259 *
 260 * all operations can be expressed as:
 261 *  - init buddy:			buddy = on-disk + PAs
 262 *  - new PA:				buddy += N; PA = N
 263 *  - use inode PA:			on-disk += N; PA -= N
 264 *  - discard inode PA			buddy -= on-disk - PA; PA = 0
 265 *  - use locality group PA		on-disk += N; PA -= N
 266 *  - discard locality group PA		buddy -= PA; PA = 0
 267 *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
 268 *        is used in real operation because we can't know actual used
 269 *        bits from PA, only from on-disk bitmap
 270 *
 271 * if we follow this strict logic, then all operations above should be atomic.
 272 * given some of them can block, we'd have to use something like semaphores
 273 * killing performance on high-end SMP hardware. let's try to relax it using
 274 * the following knowledge:
 275 *  1) if buddy is referenced, it's already initialized
 276 *  2) while block is used in buddy and the buddy is referenced,
 277 *     nobody can re-allocate that block
 278 *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
 279 *     bit set and PA claims same block, it's OK. IOW, one can set bit in
 280 *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
 281 *     block
 282 *
 283 * so, now we're building a concurrency table:
 284 *  - init buddy vs.
 285 *    - new PA
 286 *      blocks for PA are allocated in the buddy, buddy must be referenced
 287 *      until PA is linked to allocation group to avoid concurrent buddy init
 288 *    - use inode PA
 289 *      we need to make sure that either on-disk bitmap or PA has uptodate data
 290 *      given (3) we care that PA-=N operation doesn't interfere with init
 291 *    - discard inode PA
 292 *      the simplest way would be to have buddy initialized by the discard
 293 *    - use locality group PA
 294 *      again PA-=N must be serialized with init
 295 *    - discard locality group PA
 296 *      the simplest way would be to have buddy initialized by the discard
 297 *  - new PA vs.
 298 *    - use inode PA
 299 *      i_data_sem serializes them
 300 *    - discard inode PA
 301 *      discard process must wait until PA isn't used by another process
 302 *    - use locality group PA
 303 *      some mutex should serialize them
 304 *    - discard locality group PA
 305 *      discard process must wait until PA isn't used by another process
 306 *  - use inode PA
 307 *    - use inode PA
 308 *      i_data_sem or another mutex should serializes them
 309 *    - discard inode PA
 310 *      discard process must wait until PA isn't used by another process
 311 *    - use locality group PA
 312 *      nothing wrong here -- they're different PAs covering different blocks
 313 *    - discard locality group PA
 314 *      discard process must wait until PA isn't used by another process
 315 *
 316 * now we're ready to make few consequences:
 317 *  - PA is referenced and while it is no discard is possible
 318 *  - PA is referenced until block isn't marked in on-disk bitmap
 319 *  - PA changes only after on-disk bitmap
 320 *  - discard must not compete with init. either init is done before
 321 *    any discard or they're serialized somehow
 322 *  - buddy init as sum of on-disk bitmap and PAs is done atomically
 323 *
 324 * a special case when we've used PA to emptiness. no need to modify buddy
 325 * in this case, but we should care about concurrent init
 326 *
 327 */
 328
 329 /*
 330 * Logic in few words:
 331 *
 332 *  - allocation:
 333 *    load group
 334 *    find blocks
 335 *    mark bits in on-disk bitmap
 336 *    release group
 337 *
 338 *  - use preallocation:
 339 *    find proper PA (per-inode or group)
 340 *    load group
 341 *    mark bits in on-disk bitmap
 342 *    release group
 343 *    release PA
 344 *
 345 *  - free:
 346 *    load group
 347 *    mark bits in on-disk bitmap
 348 *    release group
 349 *
 350 *  - discard preallocations in group:
 351 *    mark PAs deleted
 352 *    move them onto local list
 353 *    load on-disk bitmap
 354 *    load group
 355 *    remove PA from object (inode or locality group)
 356 *    mark free blocks in-core
 357 *
 358 *  - discard inode's preallocations:
 359 */
 360
 361/*
 362 * Locking rules
 363 *
 364 * Locks:
 365 *  - bitlock on a group	(group)
 366 *  - object (inode/locality)	(object)
 367 *  - per-pa lock		(pa)
 368 *  - cr_power2_aligned lists lock	(cr_power2_aligned)
 369 *  - cr_goal_len_fast lists lock	(cr_goal_len_fast)
 370 *
 371 * Paths:
 372 *  - new pa
 373 *    object
 374 *    group
 375 *
 376 *  - find and use pa:
 377 *    pa
 378 *
 379 *  - release consumed pa:
 380 *    pa
 381 *    group
 382 *    object
 383 *
 384 *  - generate in-core bitmap:
 385 *    group
 386 *        pa
 387 *
 388 *  - discard all for given object (inode, locality group):
 389 *    object
 390 *        pa
 391 *    group
 392 *
 393 *  - discard all for given group:
 394 *    group
 395 *        pa
 396 *    group
 397 *        object
 398 *
 399 *  - allocation path (ext4_mb_regular_allocator)
 400 *    group
 401 *    cr_power2_aligned/cr_goal_len_fast
 402 */
 403static struct kmem_cache *ext4_pspace_cachep;
 404static struct kmem_cache *ext4_ac_cachep;
 405static struct kmem_cache *ext4_free_data_cachep;
 406
 407/* We create slab caches for groupinfo data structures based on the
 408 * superblock block size.  There will be one per mounted filesystem for
 409 * each unique s_blocksize_bits */
 410#define NR_GRPINFO_CACHES 8
 411static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
 412
 413static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
 414	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
 415	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
 416	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
 417};
 418
 419static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
 420					ext4_group_t group);
 421static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
 422
 423static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
 424			       ext4_group_t group, enum criteria cr);
 425
 426static int ext4_try_to_trim_range(struct super_block *sb,
 427		struct ext4_buddy *e4b, ext4_grpblk_t start,
 428		ext4_grpblk_t max, ext4_grpblk_t minblocks);
 429
 430/*
 431 * The algorithm using this percpu seq counter goes below:
 432 * 1. We sample the percpu discard_pa_seq counter before trying for block
 433 *    allocation in ext4_mb_new_blocks().
 434 * 2. We increment this percpu discard_pa_seq counter when we either allocate
 435 *    or free these blocks i.e. while marking those blocks as used/free in
 436 *    mb_mark_used()/mb_free_blocks().
 437 * 3. We also increment this percpu seq counter when we successfully identify
 438 *    that the bb_prealloc_list is not empty and hence proceed for discarding
 439 *    of those PAs inside ext4_mb_discard_group_preallocations().
 440 *
 441 * Now to make sure that the regular fast path of block allocation is not
 442 * affected, as a small optimization we only sample the percpu seq counter
 443 * on that cpu. Only when the block allocation fails and when freed blocks
 444 * found were 0, that is when we sample percpu seq counter for all cpus using
 445 * below function ext4_get_discard_pa_seq_sum(). This happens after making
 446 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
 447 */
 448static DEFINE_PER_CPU(u64, discard_pa_seq);
 449static inline u64 ext4_get_discard_pa_seq_sum(void)
 450{
 451	int __cpu;
 452	u64 __seq = 0;
 453
 454	for_each_possible_cpu(__cpu)
 455		__seq += per_cpu(discard_pa_seq, __cpu);
 456	return __seq;
 457}
 458
 459static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
 460{
 461#if BITS_PER_LONG == 64
 462	*bit += ((unsigned long) addr & 7UL) << 3;
 463	addr = (void *) ((unsigned long) addr & ~7UL);
 464#elif BITS_PER_LONG == 32
 465	*bit += ((unsigned long) addr & 3UL) << 3;
 466	addr = (void *) ((unsigned long) addr & ~3UL);
 467#else
 468#error "how many bits you are?!"
 469#endif
 470	return addr;
 471}
 472
 473static inline int mb_test_bit(int bit, void *addr)
 474{
 475	/*
 476	 * ext4_test_bit on architecture like powerpc
 477	 * needs unsigned long aligned address
 478	 */
 479	addr = mb_correct_addr_and_bit(&bit, addr);
 480	return ext4_test_bit(bit, addr);
 481}
 482
 483static inline void mb_set_bit(int bit, void *addr)
 484{
 485	addr = mb_correct_addr_and_bit(&bit, addr);
 486	ext4_set_bit(bit, addr);
 487}
 488
 489static inline void mb_clear_bit(int bit, void *addr)
 490{
 491	addr = mb_correct_addr_and_bit(&bit, addr);
 492	ext4_clear_bit(bit, addr);
 493}
 494
 495static inline int mb_test_and_clear_bit(int bit, void *addr)
 496{
 497	addr = mb_correct_addr_and_bit(&bit, addr);
 498	return ext4_test_and_clear_bit(bit, addr);
 499}
 500
 501static inline int mb_find_next_zero_bit(void *addr, int max, int start)
 502{
 503	int fix = 0, ret, tmpmax;
 504	addr = mb_correct_addr_and_bit(&fix, addr);
 505	tmpmax = max + fix;
 506	start += fix;
 507
 508	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
 509	if (ret > max)
 510		return max;
 511	return ret;
 512}
 513
 514static inline int mb_find_next_bit(void *addr, int max, int start)
 515{
 516	int fix = 0, ret, tmpmax;
 517	addr = mb_correct_addr_and_bit(&fix, addr);
 518	tmpmax = max + fix;
 519	start += fix;
 520
 521	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
 522	if (ret > max)
 523		return max;
 524	return ret;
 525}
 526
 527static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
 528{
 529	char *bb;
 530
 531	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
 532	BUG_ON(max == NULL);
 533
 534	if (order > e4b->bd_blkbits + 1) {
 535		*max = 0;
 536		return NULL;
 537	}
 538
 539	/* at order 0 we see each particular block */
 540	if (order == 0) {
 541		*max = 1 << (e4b->bd_blkbits + 3);
 542		return e4b->bd_bitmap;
 543	}
 544
 545	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
 546	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
 547
 548	return bb;
 549}
 550
 551#ifdef DOUBLE_CHECK
 552static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
 553			   int first, int count)
 554{
 555	int i;
 556	struct super_block *sb = e4b->bd_sb;
 557
 558	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 559		return;
 560	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
 561	for (i = 0; i < count; i++) {
 562		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
 563			ext4_fsblk_t blocknr;
 564
 565			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
 566			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
 567			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
 568					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
 569			ext4_grp_locked_error(sb, e4b->bd_group,
 570					      inode ? inode->i_ino : 0,
 571					      blocknr,
 572					      "freeing block already freed "
 573					      "(bit %u)",
 574					      first + i);
 575		}
 576		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
 577	}
 578}
 579
 580static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
 581{
 582	int i;
 583
 584	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 585		return;
 586	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
 587	for (i = 0; i < count; i++) {
 588		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
 589		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
 590	}
 591}
 592
 593static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
 594{
 595	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 596		return;
 597	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
 598		unsigned char *b1, *b2;
 599		int i;
 600		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
 601		b2 = (unsigned char *) bitmap;
 602		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
 603			if (b1[i] != b2[i]) {
 604				ext4_msg(e4b->bd_sb, KERN_ERR,
 605					 "corruption in group %u "
 606					 "at byte %u(%u): %x in copy != %x "
 607					 "on disk/prealloc",
 608					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
 609				BUG();
 610			}
 611		}
 612	}
 613}
 614
 615static void mb_group_bb_bitmap_alloc(struct super_block *sb,
 616			struct ext4_group_info *grp, ext4_group_t group)
 617{
 618	struct buffer_head *bh;
 619
 620	grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
 621	if (!grp->bb_bitmap)
 622		return;
 623
 624	bh = ext4_read_block_bitmap(sb, group);
 625	if (IS_ERR_OR_NULL(bh)) {
 626		kfree(grp->bb_bitmap);
 627		grp->bb_bitmap = NULL;
 628		return;
 629	}
 630
 631	memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
 632	put_bh(bh);
 633}
 634
 635static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
 636{
 637	kfree(grp->bb_bitmap);
 638}
 639
 640#else
 641static inline void mb_free_blocks_double(struct inode *inode,
 642				struct ext4_buddy *e4b, int first, int count)
 643{
 644	return;
 645}
 646static inline void mb_mark_used_double(struct ext4_buddy *e4b,
 647						int first, int count)
 648{
 649	return;
 650}
 651static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
 652{
 653	return;
 654}
 655
 656static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
 657			struct ext4_group_info *grp, ext4_group_t group)
 658{
 659	return;
 660}
 661
 662static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
 663{
 664	return;
 665}
 666#endif
 667
 668#ifdef AGGRESSIVE_CHECK
 669
 670#define MB_CHECK_ASSERT(assert)						\
 671do {									\
 672	if (!(assert)) {						\
 673		printk(KERN_EMERG					\
 674			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
 675			function, file, line, # assert);		\
 676		BUG();							\
 677	}								\
 678} while (0)
 679
 680static void __mb_check_buddy(struct ext4_buddy *e4b, char *file,
 681				const char *function, int line)
 682{
 683	struct super_block *sb = e4b->bd_sb;
 684	int order = e4b->bd_blkbits + 1;
 685	int max;
 686	int max2;
 687	int i;
 688	int j;
 689	int k;
 690	int count;
 691	struct ext4_group_info *grp;
 692	int fragments = 0;
 693	int fstart;
 694	struct list_head *cur;
 695	void *buddy;
 696	void *buddy2;
 697
 698	if (e4b->bd_info->bb_check_counter++ % 10)
 699		return;
 700
 701	while (order > 1) {
 702		buddy = mb_find_buddy(e4b, order, &max);
 703		MB_CHECK_ASSERT(buddy);
 704		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
 705		MB_CHECK_ASSERT(buddy2);
 706		MB_CHECK_ASSERT(buddy != buddy2);
 707		MB_CHECK_ASSERT(max * 2 == max2);
 708
 709		count = 0;
 710		for (i = 0; i < max; i++) {
 711
 712			if (mb_test_bit(i, buddy)) {
 713				/* only single bit in buddy2 may be 0 */
 714				if (!mb_test_bit(i << 1, buddy2)) {
 715					MB_CHECK_ASSERT(
 716						mb_test_bit((i<<1)+1, buddy2));
 717				}
 718				continue;
 719			}
 720
 721			/* both bits in buddy2 must be 1 */
 722			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
 723			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
 724
 725			for (j = 0; j < (1 << order); j++) {
 726				k = (i * (1 << order)) + j;
 727				MB_CHECK_ASSERT(
 728					!mb_test_bit(k, e4b->bd_bitmap));
 729			}
 730			count++;
 731		}
 732		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
 733		order--;
 734	}
 735
 736	fstart = -1;
 737	buddy = mb_find_buddy(e4b, 0, &max);
 738	for (i = 0; i < max; i++) {
 739		if (!mb_test_bit(i, buddy)) {
 740			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
 741			if (fstart == -1) {
 742				fragments++;
 743				fstart = i;
 744			}
 745			continue;
 746		}
 747		fstart = -1;
 748		/* check used bits only */
 749		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
 750			buddy2 = mb_find_buddy(e4b, j, &max2);
 751			k = i >> j;
 752			MB_CHECK_ASSERT(k < max2);
 753			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
 754		}
 755	}
 756	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
 757	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
 758
 759	grp = ext4_get_group_info(sb, e4b->bd_group);
 760	if (!grp)
 761		return;
 762	list_for_each(cur, &grp->bb_prealloc_list) {
 763		ext4_group_t groupnr;
 764		struct ext4_prealloc_space *pa;
 765		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
 766		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
 767		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
 768		for (i = 0; i < pa->pa_len; i++)
 769			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
 770	}
 771}
 772#undef MB_CHECK_ASSERT
 773#define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
 774					__FILE__, __func__, __LINE__)
 775#else
 776#define mb_check_buddy(e4b)
 777#endif
 778
 779/*
 780 * Divide blocks started from @first with length @len into
 781 * smaller chunks with power of 2 blocks.
 782 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
 783 * then increase bb_counters[] for corresponded chunk size.
 784 */
 785static void ext4_mb_mark_free_simple(struct super_block *sb,
 786				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
 787					struct ext4_group_info *grp)
 788{
 789	struct ext4_sb_info *sbi = EXT4_SB(sb);
 790	ext4_grpblk_t min;
 791	ext4_grpblk_t max;
 792	ext4_grpblk_t chunk;
 793	unsigned int border;
 794
 795	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
 796
 797	border = 2 << sb->s_blocksize_bits;
 798
 799	while (len > 0) {
 800		/* find how many blocks can be covered since this position */
 801		max = ffs(first | border) - 1;
 802
 803		/* find how many blocks of power 2 we need to mark */
 804		min = fls(len) - 1;
 805
 806		if (max < min)
 807			min = max;
 808		chunk = 1 << min;
 809
 810		/* mark multiblock chunks only */
 811		grp->bb_counters[min]++;
 812		if (min > 0)
 813			mb_clear_bit(first >> min,
 814				     buddy + sbi->s_mb_offsets[min]);
 815
 816		len -= chunk;
 817		first += chunk;
 818	}
 819}
 820
 821static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
 822{
 823	int order;
 824
 825	/*
 826	 * We don't bother with a special lists groups with only 1 block free
 827	 * extents and for completely empty groups.
 828	 */
 829	order = fls(len) - 2;
 830	if (order < 0)
 831		return 0;
 832	if (order == MB_NUM_ORDERS(sb))
 833		order--;
 834	if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb)))
 835		order = MB_NUM_ORDERS(sb) - 1;
 836	return order;
 837}
 838
 839/* Move group to appropriate avg_fragment_size list */
 840static void
 841mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
 842{
 843	struct ext4_sb_info *sbi = EXT4_SB(sb);
 844	int new_order;
 845
 846	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
 847		return;
 848
 849	new_order = mb_avg_fragment_size_order(sb,
 850					grp->bb_free / grp->bb_fragments);
 851	if (new_order == grp->bb_avg_fragment_size_order)
 852		return;
 853
 854	if (grp->bb_avg_fragment_size_order != -1) {
 855		write_lock(&sbi->s_mb_avg_fragment_size_locks[
 856					grp->bb_avg_fragment_size_order]);
 857		list_del(&grp->bb_avg_fragment_size_node);
 858		write_unlock(&sbi->s_mb_avg_fragment_size_locks[
 859					grp->bb_avg_fragment_size_order]);
 860	}
 861	grp->bb_avg_fragment_size_order = new_order;
 862	write_lock(&sbi->s_mb_avg_fragment_size_locks[
 863					grp->bb_avg_fragment_size_order]);
 864	list_add_tail(&grp->bb_avg_fragment_size_node,
 865		&sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
 866	write_unlock(&sbi->s_mb_avg_fragment_size_locks[
 867					grp->bb_avg_fragment_size_order]);
 868}
 869
 870/*
 871 * Choose next group by traversing largest_free_order lists. Updates *new_cr if
 872 * cr level needs an update.
 873 */
 874static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
 875			enum criteria *new_cr, ext4_group_t *group)
 876{
 877	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 878	struct ext4_group_info *iter;
 879	int i;
 880
 881	if (ac->ac_status == AC_STATUS_FOUND)
 882		return;
 883
 884	if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
 885		atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
 886
 887	for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
 888		if (list_empty(&sbi->s_mb_largest_free_orders[i]))
 889			continue;
 890		read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
 891		if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
 892			read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
 893			continue;
 894		}
 895		list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
 896				    bb_largest_free_order_node) {
 897			if (sbi->s_mb_stats)
 898				atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
 899			if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
 900				*group = iter->bb_group;
 901				ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
 902				read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
 903				return;
 904			}
 905		}
 906		read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
 907	}
 908
 909	/* Increment cr and search again if no group is found */
 910	*new_cr = CR_GOAL_LEN_FAST;
 911}
 912
 913/*
 914 * Find a suitable group of given order from the average fragments list.
 915 */
 916static struct ext4_group_info *
 917ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
 918{
 919	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 920	struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
 921	rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
 922	struct ext4_group_info *grp = NULL, *iter;
 923	enum criteria cr = ac->ac_criteria;
 924
 925	if (list_empty(frag_list))
 926		return NULL;
 927	read_lock(frag_list_lock);
 928	if (list_empty(frag_list)) {
 929		read_unlock(frag_list_lock);
 930		return NULL;
 931	}
 932	list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
 933		if (sbi->s_mb_stats)
 934			atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
 935		if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
 936			grp = iter;
 937			break;
 938		}
 939	}
 940	read_unlock(frag_list_lock);
 941	return grp;
 942}
 943
 944/*
 945 * Choose next group by traversing average fragment size list of suitable
 946 * order. Updates *new_cr if cr level needs an update.
 947 */
 948static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
 949		enum criteria *new_cr, ext4_group_t *group)
 950{
 951	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 952	struct ext4_group_info *grp = NULL;
 953	int i;
 954
 955	if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) {
 956		if (sbi->s_mb_stats)
 957			atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions);
 958	}
 959
 960	for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
 961	     i < MB_NUM_ORDERS(ac->ac_sb); i++) {
 962		grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
 963		if (grp) {
 964			*group = grp->bb_group;
 965			ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
 966			return;
 967		}
 968	}
 969
 970	/*
 971	 * CR_BEST_AVAIL_LEN works based on the concept that we have
 972	 * a larger normalized goal len request which can be trimmed to
 973	 * a smaller goal len such that it can still satisfy original
 974	 * request len. However, allocation request for non-regular
 975	 * files never gets normalized.
 976	 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
 977	 */
 978	if (ac->ac_flags & EXT4_MB_HINT_DATA)
 979		*new_cr = CR_BEST_AVAIL_LEN;
 980	else
 981		*new_cr = CR_GOAL_LEN_SLOW;
 982}
 983
 984/*
 985 * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment
 986 * order we have and proactively trim the goal request length to that order to
 987 * find a suitable group faster.
 988 *
 989 * This optimizes allocation speed at the cost of slightly reduced
 990 * preallocations. However, we make sure that we don't trim the request too
 991 * much and fall to CR_GOAL_LEN_SLOW in that case.
 992 */
 993static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
 994		enum criteria *new_cr, ext4_group_t *group)
 995{
 996	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 997	struct ext4_group_info *grp = NULL;
 998	int i, order, min_order;
 999	unsigned long num_stripe_clusters = 0;
1000
1001	if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) {
1002		if (sbi->s_mb_stats)
1003			atomic_inc(&sbi->s_bal_best_avail_bad_suggestions);
1004	}
1005
1006	/*
1007	 * mb_avg_fragment_size_order() returns order in a way that makes
1008	 * retrieving back the length using (1 << order) inaccurate. Hence, use
1009	 * fls() instead since we need to know the actual length while modifying
1010	 * goal length.
1011	 */
1012	order = fls(ac->ac_g_ex.fe_len) - 1;
1013	if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb)))
1014		order = MB_NUM_ORDERS(ac->ac_sb);
1015	min_order = order - sbi->s_mb_best_avail_max_trim_order;
1016	if (min_order < 0)
1017		min_order = 0;
1018
1019	if (sbi->s_stripe > 0) {
1020		/*
1021		 * We are assuming that stripe size is always a multiple of
1022		 * cluster ratio otherwise __ext4_fill_super exists early.
1023		 */
1024		num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe);
1025		if (1 << min_order < num_stripe_clusters)
1026			/*
1027			 * We consider 1 order less because later we round
1028			 * up the goal len to num_stripe_clusters
1029			 */
1030			min_order = fls(num_stripe_clusters) - 1;
1031	}
1032
1033	if (1 << min_order < ac->ac_o_ex.fe_len)
1034		min_order = fls(ac->ac_o_ex.fe_len);
1035
1036	for (i = order; i >= min_order; i--) {
1037		int frag_order;
1038		/*
1039		 * Scale down goal len to make sure we find something
1040		 * in the free fragments list. Basically, reduce
1041		 * preallocations.
1042		 */
1043		ac->ac_g_ex.fe_len = 1 << i;
1044
1045		if (num_stripe_clusters > 0) {
1046			/*
1047			 * Try to round up the adjusted goal length to
1048			 * stripe size (in cluster units) multiple for
1049			 * efficiency.
1050			 */
1051			ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len,
1052						     num_stripe_clusters);
1053		}
1054
1055		frag_order = mb_avg_fragment_size_order(ac->ac_sb,
1056							ac->ac_g_ex.fe_len);
1057
1058		grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
1059		if (grp) {
1060			*group = grp->bb_group;
1061			ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
1062			return;
1063		}
1064	}
1065
1066	/* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
1067	ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
1068	*new_cr = CR_GOAL_LEN_SLOW;
1069}
1070
1071static inline int should_optimize_scan(struct ext4_allocation_context *ac)
1072{
1073	if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
1074		return 0;
1075	if (ac->ac_criteria >= CR_GOAL_LEN_SLOW)
1076		return 0;
1077	if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
1078		return 0;
1079	return 1;
1080}
1081
1082/*
1083 * Return next linear group for allocation.
 
1084 */
1085static ext4_group_t
1086next_linear_group(ext4_group_t group, ext4_group_t ngroups)
 
1087{
 
 
 
 
 
 
 
 
 
 
1088	/*
1089	 * Artificially restricted ngroups for non-extent
1090	 * files makes group > ngroups possible on first loop.
1091	 */
1092	return group + 1 >= ngroups ? 0 : group + 1;
1093}
1094
1095/*
1096 * ext4_mb_choose_next_group: choose next group for allocation.
1097 *
1098 * @ac        Allocation Context
1099 * @new_cr    This is an output parameter. If the there is no good group
1100 *            available at current CR level, this field is updated to indicate
1101 *            the new cr level that should be used.
1102 * @group     This is an input / output parameter. As an input it indicates the
1103 *            next group that the allocator intends to use for allocation. As
1104 *            output, this field indicates the next group that should be used as
1105 *            determined by the optimization functions.
1106 * @ngroups   Total number of groups
1107 */
1108static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1109		enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1110{
1111	*new_cr = ac->ac_criteria;
1112
1113	if (!should_optimize_scan(ac)) {
1114		*group = next_linear_group(*group, ngroups);
1115		return;
1116	}
1117
1118	/*
1119	 * Optimized scanning can return non adjacent groups which can cause
1120	 * seek overhead for rotational disks. So try few linear groups before
1121	 * trying optimized scan.
1122	 */
1123	if (ac->ac_groups_linear_remaining) {
1124		*group = next_linear_group(*group, ngroups);
1125		ac->ac_groups_linear_remaining--;
1126		return;
1127	}
1128
1129	if (*new_cr == CR_POWER2_ALIGNED) {
1130		ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group);
1131	} else if (*new_cr == CR_GOAL_LEN_FAST) {
1132		ext4_mb_choose_next_group_goal_fast(ac, new_cr, group);
1133	} else if (*new_cr == CR_BEST_AVAIL_LEN) {
1134		ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
1135	} else {
1136		/*
1137		 * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an
1138		 * rb tree sorted by bb_free. But until that happens, we should
1139		 * never come here.
1140		 */
1141		WARN_ON(1);
1142	}
1143}
1144
1145/*
1146 * Cache the order of the largest free extent we have available in this block
1147 * group.
1148 */
1149static void
1150mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1151{
1152	struct ext4_sb_info *sbi = EXT4_SB(sb);
1153	int i;
1154
1155	for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1156		if (grp->bb_counters[i] > 0)
1157			break;
1158	/* No need to move between order lists? */
1159	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1160	    i == grp->bb_largest_free_order) {
1161		grp->bb_largest_free_order = i;
1162		return;
1163	}
1164
1165	if (grp->bb_largest_free_order >= 0) {
1166		write_lock(&sbi->s_mb_largest_free_orders_locks[
1167					      grp->bb_largest_free_order]);
1168		list_del_init(&grp->bb_largest_free_order_node);
1169		write_unlock(&sbi->s_mb_largest_free_orders_locks[
1170					      grp->bb_largest_free_order]);
1171	}
1172	grp->bb_largest_free_order = i;
1173	if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1174		write_lock(&sbi->s_mb_largest_free_orders_locks[
1175					      grp->bb_largest_free_order]);
1176		list_add_tail(&grp->bb_largest_free_order_node,
1177		      &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1178		write_unlock(&sbi->s_mb_largest_free_orders_locks[
1179					      grp->bb_largest_free_order]);
1180	}
1181}
1182
1183static noinline_for_stack
1184void ext4_mb_generate_buddy(struct super_block *sb,
1185			    void *buddy, void *bitmap, ext4_group_t group,
1186			    struct ext4_group_info *grp)
1187{
1188	struct ext4_sb_info *sbi = EXT4_SB(sb);
1189	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1190	ext4_grpblk_t i = 0;
1191	ext4_grpblk_t first;
1192	ext4_grpblk_t len;
1193	unsigned free = 0;
1194	unsigned fragments = 0;
1195	unsigned long long period = get_cycles();
1196
1197	/* initialize buddy from bitmap which is aggregation
1198	 * of on-disk bitmap and preallocations */
1199	i = mb_find_next_zero_bit(bitmap, max, 0);
1200	grp->bb_first_free = i;
1201	while (i < max) {
1202		fragments++;
1203		first = i;
1204		i = mb_find_next_bit(bitmap, max, i);
1205		len = i - first;
1206		free += len;
1207		if (len > 1)
1208			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1209		else
1210			grp->bb_counters[0]++;
1211		if (i < max)
1212			i = mb_find_next_zero_bit(bitmap, max, i);
1213	}
1214	grp->bb_fragments = fragments;
1215
1216	if (free != grp->bb_free) {
1217		ext4_grp_locked_error(sb, group, 0, 0,
1218				      "block bitmap and bg descriptor "
1219				      "inconsistent: %u vs %u free clusters",
1220				      free, grp->bb_free);
1221		/*
1222		 * If we intend to continue, we consider group descriptor
1223		 * corrupt and update bb_free using bitmap value
1224		 */
1225		grp->bb_free = free;
1226		ext4_mark_group_bitmap_corrupted(sb, group,
1227					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1228	}
1229	mb_set_largest_free_order(sb, grp);
1230	mb_update_avg_fragment_size(sb, grp);
1231
1232	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1233
1234	period = get_cycles() - period;
1235	atomic_inc(&sbi->s_mb_buddies_generated);
1236	atomic64_add(period, &sbi->s_mb_generation_time);
1237}
1238
1239static void mb_regenerate_buddy(struct ext4_buddy *e4b)
1240{
1241	int count;
1242	int order = 1;
1243	void *buddy;
1244
1245	while ((buddy = mb_find_buddy(e4b, order++, &count)))
1246		mb_set_bits(buddy, 0, count);
1247
1248	e4b->bd_info->bb_fragments = 0;
1249	memset(e4b->bd_info->bb_counters, 0,
1250		sizeof(*e4b->bd_info->bb_counters) *
1251		(e4b->bd_sb->s_blocksize_bits + 2));
1252
1253	ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
1254		e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
1255}
1256
1257/* The buddy information is attached the buddy cache inode
1258 * for convenience. The information regarding each group
1259 * is loaded via ext4_mb_load_buddy. The information involve
1260 * block bitmap and buddy information. The information are
1261 * stored in the inode as
1262 *
1263 * {                        page                        }
1264 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1265 *
1266 *
1267 * one block each for bitmap and buddy information.
1268 * So for each group we take up 2 blocks. A page can
1269 * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
1270 * So it can have information regarding groups_per_page which
1271 * is blocks_per_page/2
1272 *
1273 * Locking note:  This routine takes the block group lock of all groups
1274 * for this page; do not hold this lock when calling this routine!
1275 */
1276
1277static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
1278{
1279	ext4_group_t ngroups;
1280	unsigned int blocksize;
1281	int blocks_per_page;
1282	int groups_per_page;
1283	int err = 0;
1284	int i;
1285	ext4_group_t first_group, group;
1286	int first_block;
1287	struct super_block *sb;
1288	struct buffer_head *bhs;
1289	struct buffer_head **bh = NULL;
1290	struct inode *inode;
1291	char *data;
1292	char *bitmap;
1293	struct ext4_group_info *grinfo;
1294
1295	inode = folio->mapping->host;
1296	sb = inode->i_sb;
1297	ngroups = ext4_get_groups_count(sb);
1298	blocksize = i_blocksize(inode);
1299	blocks_per_page = PAGE_SIZE / blocksize;
1300
1301	mb_debug(sb, "init folio %lu\n", folio->index);
1302
1303	groups_per_page = blocks_per_page >> 1;
1304	if (groups_per_page == 0)
1305		groups_per_page = 1;
1306
1307	/* allocate buffer_heads to read bitmaps */
1308	if (groups_per_page > 1) {
1309		i = sizeof(struct buffer_head *) * groups_per_page;
1310		bh = kzalloc(i, gfp);
1311		if (bh == NULL)
1312			return -ENOMEM;
1313	} else
1314		bh = &bhs;
1315
1316	first_group = folio->index * blocks_per_page / 2;
1317
1318	/* read all groups the folio covers into the cache */
1319	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1320		if (group >= ngroups)
1321			break;
1322
1323		grinfo = ext4_get_group_info(sb, group);
1324		if (!grinfo)
1325			continue;
1326		/*
1327		 * If page is uptodate then we came here after online resize
1328		 * which added some new uninitialized group info structs, so
1329		 * we must skip all initialized uptodate buddies on the folio,
1330		 * which may be currently in use by an allocating task.
1331		 */
1332		if (folio_test_uptodate(folio) &&
1333				!EXT4_MB_GRP_NEED_INIT(grinfo)) {
1334			bh[i] = NULL;
1335			continue;
1336		}
1337		bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1338		if (IS_ERR(bh[i])) {
1339			err = PTR_ERR(bh[i]);
1340			bh[i] = NULL;
1341			goto out;
1342		}
1343		mb_debug(sb, "read bitmap for group %u\n", group);
1344	}
1345
1346	/* wait for I/O completion */
1347	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1348		int err2;
1349
1350		if (!bh[i])
1351			continue;
1352		err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1353		if (!err)
1354			err = err2;
1355	}
1356
1357	first_block = folio->index * blocks_per_page;
1358	for (i = 0; i < blocks_per_page; i++) {
1359		group = (first_block + i) >> 1;
1360		if (group >= ngroups)
1361			break;
1362
1363		if (!bh[group - first_group])
1364			/* skip initialized uptodate buddy */
1365			continue;
1366
1367		if (!buffer_verified(bh[group - first_group]))
1368			/* Skip faulty bitmaps */
1369			continue;
1370		err = 0;
1371
1372		/*
1373		 * data carry information regarding this
1374		 * particular group in the format specified
1375		 * above
1376		 *
1377		 */
1378		data = folio_address(folio) + (i * blocksize);
1379		bitmap = bh[group - first_group]->b_data;
1380
1381		/*
1382		 * We place the buddy block and bitmap block
1383		 * close together
1384		 */
1385		grinfo = ext4_get_group_info(sb, group);
1386		if (!grinfo) {
1387			err = -EFSCORRUPTED;
1388		        goto out;
1389		}
1390		if ((first_block + i) & 1) {
1391			/* this is block of buddy */
1392			BUG_ON(incore == NULL);
1393			mb_debug(sb, "put buddy for group %u in folio %lu/%x\n",
1394				group, folio->index, i * blocksize);
1395			trace_ext4_mb_buddy_bitmap_load(sb, group);
1396			grinfo->bb_fragments = 0;
1397			memset(grinfo->bb_counters, 0,
1398			       sizeof(*grinfo->bb_counters) *
1399			       (MB_NUM_ORDERS(sb)));
1400			/*
1401			 * incore got set to the group block bitmap below
1402			 */
1403			ext4_lock_group(sb, group);
1404			/* init the buddy */
1405			memset(data, 0xff, blocksize);
1406			ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
1407			ext4_unlock_group(sb, group);
1408			incore = NULL;
1409		} else {
1410			/* this is block of bitmap */
1411			BUG_ON(incore != NULL);
1412			mb_debug(sb, "put bitmap for group %u in folio %lu/%x\n",
1413				group, folio->index, i * blocksize);
1414			trace_ext4_mb_bitmap_load(sb, group);
1415
1416			/* see comments in ext4_mb_put_pa() */
1417			ext4_lock_group(sb, group);
1418			memcpy(data, bitmap, blocksize);
1419
1420			/* mark all preallocated blks used in in-core bitmap */
1421			ext4_mb_generate_from_pa(sb, data, group);
1422			WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
1423			ext4_unlock_group(sb, group);
1424
1425			/* set incore so that the buddy information can be
1426			 * generated using this
1427			 */
1428			incore = data;
1429		}
1430	}
1431	folio_mark_uptodate(folio);
1432
1433out:
1434	if (bh) {
1435		for (i = 0; i < groups_per_page; i++)
1436			brelse(bh[i]);
1437		if (bh != &bhs)
1438			kfree(bh);
1439	}
1440	return err;
1441}
1442
1443/*
1444 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1445 * on the same buddy page doesn't happen whild holding the buddy page lock.
1446 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1447 * are on the same page e4b->bd_buddy_folio is NULL and return value is 0.
1448 */
1449static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1450		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1451{
1452	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1453	int block, pnum, poff;
1454	int blocks_per_page;
1455	struct folio *folio;
1456
1457	e4b->bd_buddy_folio = NULL;
1458	e4b->bd_bitmap_folio = NULL;
1459
1460	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1461	/*
1462	 * the buddy cache inode stores the block bitmap
1463	 * and buddy information in consecutive blocks.
1464	 * So for each group we need two blocks.
1465	 */
1466	block = group * 2;
1467	pnum = block / blocks_per_page;
1468	poff = block % blocks_per_page;
1469	folio = __filemap_get_folio(inode->i_mapping, pnum,
1470			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1471	if (IS_ERR(folio))
1472		return PTR_ERR(folio);
1473	BUG_ON(folio->mapping != inode->i_mapping);
1474	e4b->bd_bitmap_folio = folio;
1475	e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
1476
1477	if (blocks_per_page >= 2) {
1478		/* buddy and bitmap are on the same page */
1479		return 0;
1480	}
1481
1482	/* blocks_per_page == 1, hence we need another page for the buddy */
1483	folio = __filemap_get_folio(inode->i_mapping, block + 1,
1484			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1485	if (IS_ERR(folio))
1486		return PTR_ERR(folio);
1487	BUG_ON(folio->mapping != inode->i_mapping);
1488	e4b->bd_buddy_folio = folio;
1489	return 0;
1490}
1491
1492static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1493{
1494	if (e4b->bd_bitmap_folio) {
1495		folio_unlock(e4b->bd_bitmap_folio);
1496		folio_put(e4b->bd_bitmap_folio);
1497	}
1498	if (e4b->bd_buddy_folio) {
1499		folio_unlock(e4b->bd_buddy_folio);
1500		folio_put(e4b->bd_buddy_folio);
1501	}
1502}
1503
1504/*
1505 * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1506 * block group lock of all groups for this page; do not hold the BG lock when
1507 * calling this routine!
1508 */
1509static noinline_for_stack
1510int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1511{
1512
1513	struct ext4_group_info *this_grp;
1514	struct ext4_buddy e4b;
1515	struct folio *folio;
1516	int ret = 0;
1517
1518	might_sleep();
1519	mb_debug(sb, "init group %u\n", group);
1520	this_grp = ext4_get_group_info(sb, group);
1521	if (!this_grp)
1522		return -EFSCORRUPTED;
1523
1524	/*
1525	 * This ensures that we don't reinit the buddy cache
1526	 * page which map to the group from which we are already
1527	 * allocating. If we are looking at the buddy cache we would
1528	 * have taken a reference using ext4_mb_load_buddy and that
1529	 * would have pinned buddy page to page cache.
1530	 * The call to ext4_mb_get_buddy_page_lock will mark the
1531	 * page accessed.
1532	 */
1533	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1534	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1535		/*
1536		 * somebody initialized the group
1537		 * return without doing anything
1538		 */
1539		goto err;
1540	}
1541
1542	folio = e4b.bd_bitmap_folio;
1543	ret = ext4_mb_init_cache(folio, NULL, gfp);
1544	if (ret)
1545		goto err;
1546	if (!folio_test_uptodate(folio)) {
1547		ret = -EIO;
1548		goto err;
1549	}
1550
1551	if (e4b.bd_buddy_folio == NULL) {
1552		/*
1553		 * If both the bitmap and buddy are in
1554		 * the same page we don't need to force
1555		 * init the buddy
1556		 */
1557		ret = 0;
1558		goto err;
1559	}
1560	/* init buddy cache */
1561	folio = e4b.bd_buddy_folio;
1562	ret = ext4_mb_init_cache(folio, e4b.bd_bitmap, gfp);
1563	if (ret)
1564		goto err;
1565	if (!folio_test_uptodate(folio)) {
1566		ret = -EIO;
1567		goto err;
1568	}
1569err:
1570	ext4_mb_put_buddy_page_lock(&e4b);
1571	return ret;
1572}
1573
1574/*
1575 * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1576 * block group lock of all groups for this page; do not hold the BG lock when
1577 * calling this routine!
1578 */
1579static noinline_for_stack int
1580ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1581		       struct ext4_buddy *e4b, gfp_t gfp)
1582{
1583	int blocks_per_page;
1584	int block;
1585	int pnum;
1586	int poff;
1587	struct folio *folio;
1588	int ret;
1589	struct ext4_group_info *grp;
1590	struct ext4_sb_info *sbi = EXT4_SB(sb);
1591	struct inode *inode = sbi->s_buddy_cache;
1592
1593	might_sleep();
1594	mb_debug(sb, "load group %u\n", group);
1595
1596	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1597	grp = ext4_get_group_info(sb, group);
1598	if (!grp)
1599		return -EFSCORRUPTED;
1600
1601	e4b->bd_blkbits = sb->s_blocksize_bits;
1602	e4b->bd_info = grp;
1603	e4b->bd_sb = sb;
1604	e4b->bd_group = group;
1605	e4b->bd_buddy_folio = NULL;
1606	e4b->bd_bitmap_folio = NULL;
1607
1608	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1609		/*
1610		 * we need full data about the group
1611		 * to make a good selection
1612		 */
1613		ret = ext4_mb_init_group(sb, group, gfp);
1614		if (ret)
1615			return ret;
1616	}
1617
1618	/*
1619	 * the buddy cache inode stores the block bitmap
1620	 * and buddy information in consecutive blocks.
1621	 * So for each group we need two blocks.
1622	 */
1623	block = group * 2;
1624	pnum = block / blocks_per_page;
1625	poff = block % blocks_per_page;
1626
1627	/* Avoid locking the folio in the fast path ... */
1628	folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
1629	if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
1630		if (!IS_ERR(folio))
 
1631			/*
1632			 * drop the folio reference and try
1633			 * to get the folio with lock. If we
1634			 * are not uptodate that implies
1635			 * somebody just created the folio but
1636			 * is yet to initialize it. So
1637			 * wait for it to initialize.
1638			 */
1639			folio_put(folio);
1640		folio = __filemap_get_folio(inode->i_mapping, pnum,
1641				FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1642		if (!IS_ERR(folio)) {
1643			if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
1644	"ext4: bitmap's mapping != inode->i_mapping\n")) {
1645				/* should never happen */
1646				folio_unlock(folio);
1647				ret = -EINVAL;
1648				goto err;
1649			}
1650			if (!folio_test_uptodate(folio)) {
1651				ret = ext4_mb_init_cache(folio, NULL, gfp);
1652				if (ret) {
1653					folio_unlock(folio);
1654					goto err;
1655				}
1656				mb_cmp_bitmaps(e4b, folio_address(folio) +
1657					       (poff * sb->s_blocksize));
1658			}
1659			folio_unlock(folio);
1660		}
1661	}
1662	if (IS_ERR(folio)) {
1663		ret = PTR_ERR(folio);
1664		goto err;
1665	}
1666	if (!folio_test_uptodate(folio)) {
1667		ret = -EIO;
1668		goto err;
1669	}
1670
1671	/* Folios marked accessed already */
1672	e4b->bd_bitmap_folio = folio;
1673	e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
1674
1675	block++;
1676	pnum = block / blocks_per_page;
1677	poff = block % blocks_per_page;
1678
1679	folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
1680	if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
1681		if (!IS_ERR(folio))
1682			folio_put(folio);
1683		folio = __filemap_get_folio(inode->i_mapping, pnum,
1684				FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1685		if (!IS_ERR(folio)) {
1686			if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
1687	"ext4: buddy bitmap's mapping != inode->i_mapping\n")) {
1688				/* should never happen */
1689				folio_unlock(folio);
1690				ret = -EINVAL;
1691				goto err;
1692			}
1693			if (!folio_test_uptodate(folio)) {
1694				ret = ext4_mb_init_cache(folio, e4b->bd_bitmap,
1695							 gfp);
1696				if (ret) {
1697					folio_unlock(folio);
1698					goto err;
1699				}
1700			}
1701			folio_unlock(folio);
1702		}
1703	}
1704	if (IS_ERR(folio)) {
1705		ret = PTR_ERR(folio);
1706		goto err;
1707	}
1708	if (!folio_test_uptodate(folio)) {
1709		ret = -EIO;
1710		goto err;
1711	}
1712
1713	/* Folios marked accessed already */
1714	e4b->bd_buddy_folio = folio;
1715	e4b->bd_buddy = folio_address(folio) + (poff * sb->s_blocksize);
1716
1717	return 0;
1718
1719err:
1720	if (!IS_ERR_OR_NULL(folio))
1721		folio_put(folio);
1722	if (e4b->bd_bitmap_folio)
1723		folio_put(e4b->bd_bitmap_folio);
1724
1725	e4b->bd_buddy = NULL;
1726	e4b->bd_bitmap = NULL;
1727	return ret;
1728}
1729
1730static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1731			      struct ext4_buddy *e4b)
1732{
1733	return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1734}
1735
1736static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1737{
1738	if (e4b->bd_bitmap_folio)
1739		folio_put(e4b->bd_bitmap_folio);
1740	if (e4b->bd_buddy_folio)
1741		folio_put(e4b->bd_buddy_folio);
1742}
1743
1744
1745static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1746{
1747	int order = 1, max;
1748	void *bb;
1749
1750	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1751	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1752
1753	while (order <= e4b->bd_blkbits + 1) {
1754		bb = mb_find_buddy(e4b, order, &max);
1755		if (!mb_test_bit(block >> order, bb)) {
1756			/* this block is part of buddy of order 'order' */
1757			return order;
1758		}
1759		order++;
1760	}
1761	return 0;
1762}
1763
1764static void mb_clear_bits(void *bm, int cur, int len)
1765{
1766	__u32 *addr;
1767
1768	len = cur + len;
1769	while (cur < len) {
1770		if ((cur & 31) == 0 && (len - cur) >= 32) {
1771			/* fast path: clear whole word at once */
1772			addr = bm + (cur >> 3);
1773			*addr = 0;
1774			cur += 32;
1775			continue;
1776		}
1777		mb_clear_bit(cur, bm);
1778		cur++;
1779	}
1780}
1781
1782/* clear bits in given range
1783 * will return first found zero bit if any, -1 otherwise
1784 */
1785static int mb_test_and_clear_bits(void *bm, int cur, int len)
1786{
1787	__u32 *addr;
1788	int zero_bit = -1;
1789
1790	len = cur + len;
1791	while (cur < len) {
1792		if ((cur & 31) == 0 && (len - cur) >= 32) {
1793			/* fast path: clear whole word at once */
1794			addr = bm + (cur >> 3);
1795			if (*addr != (__u32)(-1) && zero_bit == -1)
1796				zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1797			*addr = 0;
1798			cur += 32;
1799			continue;
1800		}
1801		if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1802			zero_bit = cur;
1803		cur++;
1804	}
1805
1806	return zero_bit;
1807}
1808
1809void mb_set_bits(void *bm, int cur, int len)
1810{
1811	__u32 *addr;
1812
1813	len = cur + len;
1814	while (cur < len) {
1815		if ((cur & 31) == 0 && (len - cur) >= 32) {
1816			/* fast path: set whole word at once */
1817			addr = bm + (cur >> 3);
1818			*addr = 0xffffffff;
1819			cur += 32;
1820			continue;
1821		}
1822		mb_set_bit(cur, bm);
1823		cur++;
1824	}
1825}
1826
1827static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1828{
1829	if (mb_test_bit(*bit + side, bitmap)) {
1830		mb_clear_bit(*bit, bitmap);
1831		(*bit) -= side;
1832		return 1;
1833	}
1834	else {
1835		(*bit) += side;
1836		mb_set_bit(*bit, bitmap);
1837		return -1;
1838	}
1839}
1840
1841static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1842{
1843	int max;
1844	int order = 1;
1845	void *buddy = mb_find_buddy(e4b, order, &max);
1846
1847	while (buddy) {
1848		void *buddy2;
1849
1850		/* Bits in range [first; last] are known to be set since
1851		 * corresponding blocks were allocated. Bits in range
1852		 * (first; last) will stay set because they form buddies on
1853		 * upper layer. We just deal with borders if they don't
1854		 * align with upper layer and then go up.
1855		 * Releasing entire group is all about clearing
1856		 * single bit of highest order buddy.
1857		 */
1858
1859		/* Example:
1860		 * ---------------------------------
1861		 * |   1   |   1   |   1   |   1   |
1862		 * ---------------------------------
1863		 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1864		 * ---------------------------------
1865		 *   0   1   2   3   4   5   6   7
1866		 *      \_____________________/
1867		 *
1868		 * Neither [1] nor [6] is aligned to above layer.
1869		 * Left neighbour [0] is free, so mark it busy,
1870		 * decrease bb_counters and extend range to
1871		 * [0; 6]
1872		 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1873		 * mark [6] free, increase bb_counters and shrink range to
1874		 * [0; 5].
1875		 * Then shift range to [0; 2], go up and do the same.
1876		 */
1877
1878
1879		if (first & 1)
1880			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1881		if (!(last & 1))
1882			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1883		if (first > last)
1884			break;
1885		order++;
1886
1887		buddy2 = mb_find_buddy(e4b, order, &max);
1888		if (!buddy2) {
1889			mb_clear_bits(buddy, first, last - first + 1);
1890			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1891			break;
1892		}
1893		first >>= 1;
1894		last >>= 1;
1895		buddy = buddy2;
1896	}
1897}
1898
1899static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1900			   int first, int count)
1901{
1902	int left_is_free = 0;
1903	int right_is_free = 0;
1904	int block;
1905	int last = first + count - 1;
1906	struct super_block *sb = e4b->bd_sb;
1907
1908	if (WARN_ON(count == 0))
1909		return;
1910	BUG_ON(last >= (sb->s_blocksize << 3));
1911	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1912	/* Don't bother if the block group is corrupt. */
1913	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1914		return;
1915
1916	mb_check_buddy(e4b);
1917	mb_free_blocks_double(inode, e4b, first, count);
1918
1919	/* access memory sequentially: check left neighbour,
1920	 * clear range and then check right neighbour
1921	 */
1922	if (first != 0)
1923		left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1924	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1925	if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1926		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1927
1928	if (unlikely(block != -1)) {
1929		struct ext4_sb_info *sbi = EXT4_SB(sb);
1930		ext4_fsblk_t blocknr;
1931
1932		/*
1933		 * Fastcommit replay can free already freed blocks which
1934		 * corrupts allocation info. Regenerate it.
1935		 */
1936		if (sbi->s_mount_state & EXT4_FC_REPLAY) {
1937			mb_regenerate_buddy(e4b);
1938			goto check;
1939		}
1940
1941		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1942		blocknr += EXT4_C2B(sbi, block);
1943		ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
1944				EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1945		ext4_grp_locked_error(sb, e4b->bd_group,
1946				      inode ? inode->i_ino : 0, blocknr,
1947				      "freeing already freed block (bit %u); block bitmap corrupt.",
1948				      block);
1949		return;
1950	}
1951
1952	this_cpu_inc(discard_pa_seq);
1953	e4b->bd_info->bb_free += count;
1954	if (first < e4b->bd_info->bb_first_free)
1955		e4b->bd_info->bb_first_free = first;
1956
1957	/* let's maintain fragments counter */
1958	if (left_is_free && right_is_free)
1959		e4b->bd_info->bb_fragments--;
1960	else if (!left_is_free && !right_is_free)
1961		e4b->bd_info->bb_fragments++;
1962
1963	/* buddy[0] == bd_bitmap is a special case, so handle
1964	 * it right away and let mb_buddy_mark_free stay free of
1965	 * zero order checks.
1966	 * Check if neighbours are to be coaleasced,
1967	 * adjust bitmap bb_counters and borders appropriately.
1968	 */
1969	if (first & 1) {
1970		first += !left_is_free;
1971		e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1972	}
1973	if (!(last & 1)) {
1974		last -= !right_is_free;
1975		e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1976	}
1977
1978	if (first <= last)
1979		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1980
1981	mb_set_largest_free_order(sb, e4b->bd_info);
1982	mb_update_avg_fragment_size(sb, e4b->bd_info);
1983check:
1984	mb_check_buddy(e4b);
1985}
1986
1987static int mb_find_extent(struct ext4_buddy *e4b, int block,
1988				int needed, struct ext4_free_extent *ex)
1989{
1990	int max, order, next;
1991	void *buddy;
1992
1993	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1994	BUG_ON(ex == NULL);
1995
1996	buddy = mb_find_buddy(e4b, 0, &max);
1997	BUG_ON(buddy == NULL);
1998	BUG_ON(block >= max);
1999	if (mb_test_bit(block, buddy)) {
2000		ex->fe_len = 0;
2001		ex->fe_start = 0;
2002		ex->fe_group = 0;
2003		return 0;
2004	}
2005
2006	/* find actual order */
2007	order = mb_find_order_for_block(e4b, block);
2008
2009	ex->fe_len = (1 << order) - (block & ((1 << order) - 1));
2010	ex->fe_start = block;
2011	ex->fe_group = e4b->bd_group;
2012
2013	block = block >> order;
2014
2015	while (needed > ex->fe_len &&
2016	       mb_find_buddy(e4b, order, &max)) {
2017
2018		if (block + 1 >= max)
2019			break;
2020
2021		next = (block + 1) * (1 << order);
2022		if (mb_test_bit(next, e4b->bd_bitmap))
2023			break;
2024
2025		order = mb_find_order_for_block(e4b, next);
2026
2027		block = next >> order;
2028		ex->fe_len += 1 << order;
2029	}
2030
2031	if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
2032		/* Should never happen! (but apparently sometimes does?!?) */
2033		WARN_ON(1);
2034		ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
2035			"corruption or bug in mb_find_extent "
2036			"block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
2037			block, order, needed, ex->fe_group, ex->fe_start,
2038			ex->fe_len, ex->fe_logical);
2039		ex->fe_len = 0;
2040		ex->fe_start = 0;
2041		ex->fe_group = 0;
2042	}
2043	return ex->fe_len;
2044}
2045
2046static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
2047{
2048	int ord;
2049	int mlen = 0;
2050	int max = 0;
 
2051	int start = ex->fe_start;
2052	int len = ex->fe_len;
2053	unsigned ret = 0;
2054	int len0 = len;
2055	void *buddy;
2056	int ord_start, ord_end;
2057
2058	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
2059	BUG_ON(e4b->bd_group != ex->fe_group);
2060	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
2061	mb_check_buddy(e4b);
2062	mb_mark_used_double(e4b, start, len);
2063
2064	this_cpu_inc(discard_pa_seq);
2065	e4b->bd_info->bb_free -= len;
2066	if (e4b->bd_info->bb_first_free == start)
2067		e4b->bd_info->bb_first_free += len;
2068
2069	/* let's maintain fragments counter */
2070	if (start != 0)
2071		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
2072	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
2073		max = !mb_test_bit(start + len, e4b->bd_bitmap);
2074	if (mlen && max)
2075		e4b->bd_info->bb_fragments++;
2076	else if (!mlen && !max)
2077		e4b->bd_info->bb_fragments--;
2078
2079	/* let's maintain buddy itself */
2080	while (len) {
2081		ord = mb_find_order_for_block(e4b, start);
 
2082
2083		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
2084			/* the whole chunk may be allocated at once! */
2085			mlen = 1 << ord;
2086			buddy = mb_find_buddy(e4b, ord, &max);
 
 
 
2087			BUG_ON((start >> ord) >= max);
2088			mb_set_bit(start >> ord, buddy);
2089			e4b->bd_info->bb_counters[ord]--;
2090			start += mlen;
2091			len -= mlen;
2092			BUG_ON(len < 0);
2093			continue;
2094		}
2095
2096		/* store for history */
2097		if (ret == 0)
2098			ret = len | (ord << 16);
2099
 
2100		BUG_ON(ord <= 0);
2101		buddy = mb_find_buddy(e4b, ord, &max);
2102		mb_set_bit(start >> ord, buddy);
2103		e4b->bd_info->bb_counters[ord]--;
2104
2105		ord_start = (start >> ord) << ord;
2106		ord_end = ord_start + (1 << ord);
2107		/* first chunk */
2108		if (start > ord_start)
2109			ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy,
2110						 ord_start, start - ord_start,
2111						 e4b->bd_info);
2112
2113		/* last chunk */
2114		if (start + len < ord_end) {
2115			ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy,
2116						 start + len,
2117						 ord_end - (start + len),
2118						 e4b->bd_info);
2119			break;
2120		}
2121		len = start + len - ord_end;
2122		start = ord_end;
2123	}
2124	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
2125
2126	mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
2127	mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
2128	mb_check_buddy(e4b);
2129
2130	return ret;
2131}
2132
2133/*
2134 * Must be called under group lock!
2135 */
2136static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2137					struct ext4_buddy *e4b)
2138{
2139	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2140	int ret;
2141
2142	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2143	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2144
2145	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2146	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2147	ret = mb_mark_used(e4b, &ac->ac_b_ex);
2148
2149	/* preallocation can change ac_b_ex, thus we store actually
2150	 * allocated blocks for history */
2151	ac->ac_f_ex = ac->ac_b_ex;
2152
2153	ac->ac_status = AC_STATUS_FOUND;
2154	ac->ac_tail = ret & 0xffff;
2155	ac->ac_buddy = ret >> 16;
2156
2157	/*
2158	 * take the page reference. We want the page to be pinned
2159	 * so that we don't get a ext4_mb_init_cache_call for this
2160	 * group until we update the bitmap. That would mean we
2161	 * double allocate blocks. The reference is dropped
2162	 * in ext4_mb_release_context
2163	 */
2164	ac->ac_bitmap_folio = e4b->bd_bitmap_folio;
2165	folio_get(ac->ac_bitmap_folio);
2166	ac->ac_buddy_folio = e4b->bd_buddy_folio;
2167	folio_get(ac->ac_buddy_folio);
2168	/* store last allocated for subsequent stream allocation */
2169	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2170		spin_lock(&sbi->s_md_lock);
2171		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2172		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2173		spin_unlock(&sbi->s_md_lock);
2174	}
2175	/*
2176	 * As we've just preallocated more space than
2177	 * user requested originally, we store allocated
2178	 * space in a special descriptor.
2179	 */
2180	if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2181		ext4_mb_new_preallocation(ac);
2182
2183}
2184
2185static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2186					struct ext4_buddy *e4b,
2187					int finish_group)
2188{
2189	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2190	struct ext4_free_extent *bex = &ac->ac_b_ex;
2191	struct ext4_free_extent *gex = &ac->ac_g_ex;
2192
2193	if (ac->ac_status == AC_STATUS_FOUND)
2194		return;
2195	/*
2196	 * We don't want to scan for a whole year
2197	 */
2198	if (ac->ac_found > sbi->s_mb_max_to_scan &&
2199			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2200		ac->ac_status = AC_STATUS_BREAK;
2201		return;
2202	}
2203
2204	/*
2205	 * Haven't found good chunk so far, let's continue
2206	 */
2207	if (bex->fe_len < gex->fe_len)
2208		return;
2209
2210	if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2211		ext4_mb_use_best_found(ac, e4b);
2212}
2213
2214/*
2215 * The routine checks whether found extent is good enough. If it is,
2216 * then the extent gets marked used and flag is set to the context
2217 * to stop scanning. Otherwise, the extent is compared with the
2218 * previous found extent and if new one is better, then it's stored
2219 * in the context. Later, the best found extent will be used, if
2220 * mballoc can't find good enough extent.
2221 *
2222 * The algorithm used is roughly as follows:
2223 *
2224 * * If free extent found is exactly as big as goal, then
2225 *   stop the scan and use it immediately
2226 *
2227 * * If free extent found is smaller than goal, then keep retrying
2228 *   upto a max of sbi->s_mb_max_to_scan times (default 200). After
2229 *   that stop scanning and use whatever we have.
2230 *
2231 * * If free extent found is bigger than goal, then keep retrying
2232 *   upto a max of sbi->s_mb_min_to_scan times (default 10) before
2233 *   stopping the scan and using the extent.
2234 *
2235 *
2236 * FIXME: real allocation policy is to be designed yet!
2237 */
2238static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2239					struct ext4_free_extent *ex,
2240					struct ext4_buddy *e4b)
2241{
2242	struct ext4_free_extent *bex = &ac->ac_b_ex;
2243	struct ext4_free_extent *gex = &ac->ac_g_ex;
2244
2245	BUG_ON(ex->fe_len <= 0);
2246	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2247	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2248	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2249
2250	ac->ac_found++;
2251	ac->ac_cX_found[ac->ac_criteria]++;
2252
2253	/*
2254	 * The special case - take what you catch first
2255	 */
2256	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2257		*bex = *ex;
2258		ext4_mb_use_best_found(ac, e4b);
2259		return;
2260	}
2261
2262	/*
2263	 * Let's check whether the chuck is good enough
2264	 */
2265	if (ex->fe_len == gex->fe_len) {
2266		*bex = *ex;
2267		ext4_mb_use_best_found(ac, e4b);
2268		return;
2269	}
2270
2271	/*
2272	 * If this is first found extent, just store it in the context
2273	 */
2274	if (bex->fe_len == 0) {
2275		*bex = *ex;
2276		return;
2277	}
2278
2279	/*
2280	 * If new found extent is better, store it in the context
2281	 */
2282	if (bex->fe_len < gex->fe_len) {
2283		/* if the request isn't satisfied, any found extent
2284		 * larger than previous best one is better */
2285		if (ex->fe_len > bex->fe_len)
2286			*bex = *ex;
2287	} else if (ex->fe_len > gex->fe_len) {
2288		/* if the request is satisfied, then we try to find
2289		 * an extent that still satisfy the request, but is
2290		 * smaller than previous one */
2291		if (ex->fe_len < bex->fe_len)
2292			*bex = *ex;
2293	}
2294
2295	ext4_mb_check_limits(ac, e4b, 0);
2296}
2297
2298static noinline_for_stack
2299void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2300					struct ext4_buddy *e4b)
2301{
2302	struct ext4_free_extent ex = ac->ac_b_ex;
2303	ext4_group_t group = ex.fe_group;
2304	int max;
2305	int err;
2306
2307	BUG_ON(ex.fe_len <= 0);
2308	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2309	if (err)
2310		return;
2311
2312	ext4_lock_group(ac->ac_sb, group);
2313	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2314		goto out;
2315
2316	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2317
2318	if (max > 0) {
2319		ac->ac_b_ex = ex;
2320		ext4_mb_use_best_found(ac, e4b);
2321	}
2322
2323out:
2324	ext4_unlock_group(ac->ac_sb, group);
2325	ext4_mb_unload_buddy(e4b);
2326}
2327
2328static noinline_for_stack
2329int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2330				struct ext4_buddy *e4b)
2331{
2332	ext4_group_t group = ac->ac_g_ex.fe_group;
2333	int max;
2334	int err;
2335	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2336	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2337	struct ext4_free_extent ex;
2338
2339	if (!grp)
2340		return -EFSCORRUPTED;
2341	if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
2342		return 0;
2343	if (grp->bb_free == 0)
2344		return 0;
2345
2346	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2347	if (err)
2348		return err;
2349
2350	ext4_lock_group(ac->ac_sb, group);
2351	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2352		goto out;
2353
2354	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2355			     ac->ac_g_ex.fe_len, &ex);
2356	ex.fe_logical = 0xDEADFA11; /* debug value */
2357
2358	if (max >= ac->ac_g_ex.fe_len &&
2359	    ac->ac_g_ex.fe_len == EXT4_NUM_B2C(sbi, sbi->s_stripe)) {
2360		ext4_fsblk_t start;
2361
2362		start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
2363		/* use do_div to get remainder (would be 64-bit modulo) */
2364		if (do_div(start, sbi->s_stripe) == 0) {
2365			ac->ac_found++;
2366			ac->ac_b_ex = ex;
2367			ext4_mb_use_best_found(ac, e4b);
2368		}
2369	} else if (max >= ac->ac_g_ex.fe_len) {
2370		BUG_ON(ex.fe_len <= 0);
2371		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2372		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2373		ac->ac_found++;
2374		ac->ac_b_ex = ex;
2375		ext4_mb_use_best_found(ac, e4b);
2376	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2377		/* Sometimes, caller may want to merge even small
2378		 * number of blocks to an existing extent */
2379		BUG_ON(ex.fe_len <= 0);
2380		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2381		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2382		ac->ac_found++;
2383		ac->ac_b_ex = ex;
2384		ext4_mb_use_best_found(ac, e4b);
2385	}
2386out:
2387	ext4_unlock_group(ac->ac_sb, group);
2388	ext4_mb_unload_buddy(e4b);
2389
2390	return 0;
2391}
2392
2393/*
2394 * The routine scans buddy structures (not bitmap!) from given order
2395 * to max order and tries to find big enough chunk to satisfy the req
2396 */
2397static noinline_for_stack
2398void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2399					struct ext4_buddy *e4b)
2400{
2401	struct super_block *sb = ac->ac_sb;
2402	struct ext4_group_info *grp = e4b->bd_info;
2403	void *buddy;
2404	int i;
2405	int k;
2406	int max;
2407
2408	BUG_ON(ac->ac_2order <= 0);
2409	for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2410		if (grp->bb_counters[i] == 0)
2411			continue;
2412
2413		buddy = mb_find_buddy(e4b, i, &max);
2414		if (WARN_RATELIMIT(buddy == NULL,
2415			 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i))
2416			continue;
2417
2418		k = mb_find_next_zero_bit(buddy, max, 0);
2419		if (k >= max) {
2420			ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2421					e4b->bd_group,
2422					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2423			ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2424				"%d free clusters of order %d. But found 0",
2425				grp->bb_counters[i], i);
2426			break;
2427		}
2428		ac->ac_found++;
2429		ac->ac_cX_found[ac->ac_criteria]++;
2430
2431		ac->ac_b_ex.fe_len = 1 << i;
2432		ac->ac_b_ex.fe_start = k << i;
2433		ac->ac_b_ex.fe_group = e4b->bd_group;
2434
2435		ext4_mb_use_best_found(ac, e4b);
2436
2437		BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2438
2439		if (EXT4_SB(sb)->s_mb_stats)
2440			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2441
2442		break;
2443	}
2444}
2445
2446/*
2447 * The routine scans the group and measures all found extents.
2448 * In order to optimize scanning, caller must pass number of
2449 * free blocks in the group, so the routine can know upper limit.
2450 */
2451static noinline_for_stack
2452void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2453					struct ext4_buddy *e4b)
2454{
2455	struct super_block *sb = ac->ac_sb;
2456	void *bitmap = e4b->bd_bitmap;
2457	struct ext4_free_extent ex;
2458	int i, j, freelen;
2459	int free;
2460
2461	free = e4b->bd_info->bb_free;
2462	if (WARN_ON(free <= 0))
2463		return;
2464
2465	i = e4b->bd_info->bb_first_free;
2466
2467	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2468		i = mb_find_next_zero_bit(bitmap,
2469						EXT4_CLUSTERS_PER_GROUP(sb), i);
2470		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2471			/*
2472			 * IF we have corrupt bitmap, we won't find any
2473			 * free blocks even though group info says we
2474			 * have free blocks
2475			 */
2476			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2477					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2478			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2479					"%d free clusters as per "
2480					"group info. But bitmap says 0",
2481					free);
2482			break;
2483		}
2484
2485		if (!ext4_mb_cr_expensive(ac->ac_criteria)) {
2486			/*
2487			 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are
2488			 * sure that this group will have a large enough
2489			 * continuous free extent, so skip over the smaller free
2490			 * extents
2491			 */
2492			j = mb_find_next_bit(bitmap,
2493						EXT4_CLUSTERS_PER_GROUP(sb), i);
2494			freelen = j - i;
2495
2496			if (freelen < ac->ac_g_ex.fe_len) {
2497				i = j;
2498				free -= freelen;
2499				continue;
2500			}
2501		}
2502
2503		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2504		if (WARN_ON(ex.fe_len <= 0))
2505			break;
2506		if (free < ex.fe_len) {
2507			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2508					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2509			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2510					"%d free clusters as per "
2511					"group info. But got %d blocks",
2512					free, ex.fe_len);
2513			/*
2514			 * The number of free blocks differs. This mostly
2515			 * indicate that the bitmap is corrupt. So exit
2516			 * without claiming the space.
2517			 */
2518			break;
2519		}
2520		ex.fe_logical = 0xDEADC0DE; /* debug value */
2521		ext4_mb_measure_extent(ac, &ex, e4b);
2522
2523		i += ex.fe_len;
2524		free -= ex.fe_len;
2525	}
2526
2527	ext4_mb_check_limits(ac, e4b, 1);
2528}
2529
2530/*
2531 * This is a special case for storages like raid5
2532 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2533 */
2534static noinline_for_stack
2535void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2536				 struct ext4_buddy *e4b)
2537{
2538	struct super_block *sb = ac->ac_sb;
2539	struct ext4_sb_info *sbi = EXT4_SB(sb);
2540	void *bitmap = e4b->bd_bitmap;
2541	struct ext4_free_extent ex;
2542	ext4_fsblk_t first_group_block;
2543	ext4_fsblk_t a;
2544	ext4_grpblk_t i, stripe;
2545	int max;
2546
2547	BUG_ON(sbi->s_stripe == 0);
2548
2549	/* find first stripe-aligned block in group */
2550	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2551
2552	a = first_group_block + sbi->s_stripe - 1;
2553	do_div(a, sbi->s_stripe);
2554	i = (a * sbi->s_stripe) - first_group_block;
2555
2556	stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe);
2557	i = EXT4_B2C(sbi, i);
2558	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2559		if (!mb_test_bit(i, bitmap)) {
2560			max = mb_find_extent(e4b, i, stripe, &ex);
2561			if (max >= stripe) {
2562				ac->ac_found++;
2563				ac->ac_cX_found[ac->ac_criteria]++;
2564				ex.fe_logical = 0xDEADF00D; /* debug value */
2565				ac->ac_b_ex = ex;
2566				ext4_mb_use_best_found(ac, e4b);
2567				break;
2568			}
2569		}
2570		i += stripe;
2571	}
2572}
2573
2574/*
2575 * This is also called BEFORE we load the buddy bitmap.
2576 * Returns either 1 or 0 indicating that the group is either suitable
2577 * for the allocation or not.
2578 */
2579static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2580				ext4_group_t group, enum criteria cr)
2581{
2582	ext4_grpblk_t free, fragments;
2583	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2584	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2585
2586	BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
2587
2588	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2589		return false;
2590
2591	free = grp->bb_free;
2592	if (free == 0)
2593		return false;
2594
2595	fragments = grp->bb_fragments;
2596	if (fragments == 0)
2597		return false;
2598
2599	switch (cr) {
2600	case CR_POWER2_ALIGNED:
2601		BUG_ON(ac->ac_2order == 0);
2602
2603		/* Avoid using the first bg of a flexgroup for data files */
2604		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2605		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2606		    ((group % flex_size) == 0))
2607			return false;
2608
2609		if (free < ac->ac_g_ex.fe_len)
2610			return false;
2611
2612		if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2613			return true;
2614
2615		if (grp->bb_largest_free_order < ac->ac_2order)
2616			return false;
2617
2618		return true;
2619	case CR_GOAL_LEN_FAST:
2620	case CR_BEST_AVAIL_LEN:
2621		if ((free / fragments) >= ac->ac_g_ex.fe_len)
2622			return true;
2623		break;
2624	case CR_GOAL_LEN_SLOW:
2625		if (free >= ac->ac_g_ex.fe_len)
2626			return true;
2627		break;
2628	case CR_ANY_FREE:
2629		return true;
2630	default:
2631		BUG();
2632	}
2633
2634	return false;
2635}
2636
2637/*
2638 * This could return negative error code if something goes wrong
2639 * during ext4_mb_init_group(). This should not be called with
2640 * ext4_lock_group() held.
2641 *
2642 * Note: because we are conditionally operating with the group lock in
2643 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2644 * function using __acquire and __release.  This means we need to be
2645 * super careful before messing with the error path handling via "goto
2646 * out"!
2647 */
2648static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2649				     ext4_group_t group, enum criteria cr)
2650{
2651	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2652	struct super_block *sb = ac->ac_sb;
2653	struct ext4_sb_info *sbi = EXT4_SB(sb);
2654	bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2655	ext4_grpblk_t free;
2656	int ret = 0;
2657
2658	if (!grp)
2659		return -EFSCORRUPTED;
2660	if (sbi->s_mb_stats)
2661		atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2662	if (should_lock) {
2663		ext4_lock_group(sb, group);
2664		__release(ext4_group_lock_ptr(sb, group));
2665	}
2666	free = grp->bb_free;
2667	if (free == 0)
2668		goto out;
2669	/*
2670	 * In all criterias except CR_ANY_FREE we try to avoid groups that
2671	 * can't possibly satisfy the full goal request due to insufficient
2672	 * free blocks.
2673	 */
2674	if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len)
2675		goto out;
2676	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2677		goto out;
2678	if (should_lock) {
2679		__acquire(ext4_group_lock_ptr(sb, group));
2680		ext4_unlock_group(sb, group);
2681	}
2682
2683	/* We only do this if the grp has never been initialized */
2684	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2685		struct ext4_group_desc *gdp =
2686			ext4_get_group_desc(sb, group, NULL);
2687		int ret;
2688
2689		/*
2690		 * CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
2691		 * search to find large good chunks almost for free. If buddy
2692		 * data is not ready, then this optimization makes no sense. But
2693		 * we never skip the first block group in a flex_bg, since this
2694		 * gets used for metadata block allocation, and we want to make
2695		 * sure we locate metadata blocks in the first block group in
2696		 * the flex_bg if possible.
2697		 */
2698		if (!ext4_mb_cr_expensive(cr) &&
2699		    (!sbi->s_log_groups_per_flex ||
2700		     ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2701		    !(ext4_has_group_desc_csum(sb) &&
2702		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2703			return 0;
2704		ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2705		if (ret)
2706			return ret;
2707	}
2708
2709	if (should_lock) {
2710		ext4_lock_group(sb, group);
2711		__release(ext4_group_lock_ptr(sb, group));
2712	}
2713	ret = ext4_mb_good_group(ac, group, cr);
2714out:
2715	if (should_lock) {
2716		__acquire(ext4_group_lock_ptr(sb, group));
2717		ext4_unlock_group(sb, group);
2718	}
2719	return ret;
2720}
2721
2722/*
2723 * Start prefetching @nr block bitmaps starting at @group.
2724 * Return the next group which needs to be prefetched.
2725 */
2726ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2727			      unsigned int nr, int *cnt)
2728{
2729	ext4_group_t ngroups = ext4_get_groups_count(sb);
2730	struct buffer_head *bh;
2731	struct blk_plug plug;
2732
2733	blk_start_plug(&plug);
2734	while (nr-- > 0) {
2735		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2736								  NULL);
2737		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2738
2739		/*
2740		 * Prefetch block groups with free blocks; but don't
2741		 * bother if it is marked uninitialized on disk, since
2742		 * it won't require I/O to read.  Also only try to
2743		 * prefetch once, so we avoid getblk() call, which can
2744		 * be expensive.
2745		 */
2746		if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2747		    EXT4_MB_GRP_NEED_INIT(grp) &&
2748		    ext4_free_group_clusters(sb, gdp) > 0 ) {
2749			bh = ext4_read_block_bitmap_nowait(sb, group, true);
2750			if (bh && !IS_ERR(bh)) {
2751				if (!buffer_uptodate(bh) && cnt)
2752					(*cnt)++;
2753				brelse(bh);
2754			}
2755		}
2756		if (++group >= ngroups)
2757			group = 0;
2758	}
2759	blk_finish_plug(&plug);
2760	return group;
2761}
2762
2763/*
2764 * Prefetching reads the block bitmap into the buffer cache; but we
2765 * need to make sure that the buddy bitmap in the page cache has been
2766 * initialized.  Note that ext4_mb_init_group() will block if the I/O
2767 * is not yet completed, or indeed if it was not initiated by
2768 * ext4_mb_prefetch did not start the I/O.
2769 *
2770 * TODO: We should actually kick off the buddy bitmap setup in a work
2771 * queue when the buffer I/O is completed, so that we don't block
2772 * waiting for the block allocation bitmap read to finish when
2773 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2774 */
2775void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2776			   unsigned int nr)
2777{
2778	struct ext4_group_desc *gdp;
2779	struct ext4_group_info *grp;
2780
2781	while (nr-- > 0) {
2782		if (!group)
2783			group = ext4_get_groups_count(sb);
2784		group--;
2785		gdp = ext4_get_group_desc(sb, group, NULL);
2786		grp = ext4_get_group_info(sb, group);
2787
2788		if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2789		    ext4_free_group_clusters(sb, gdp) > 0) {
2790			if (ext4_mb_init_group(sb, group, GFP_NOFS))
2791				break;
2792		}
2793	}
2794}
2795
2796static noinline_for_stack int
2797ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2798{
2799	ext4_group_t prefetch_grp = 0, ngroups, group, i;
2800	enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
2801	int err = 0, first_err = 0;
2802	unsigned int nr = 0, prefetch_ios = 0;
2803	struct ext4_sb_info *sbi;
2804	struct super_block *sb;
2805	struct ext4_buddy e4b;
2806	int lost;
2807
2808	sb = ac->ac_sb;
2809	sbi = EXT4_SB(sb);
2810	ngroups = ext4_get_groups_count(sb);
2811	/* non-extent files are limited to low blocks/groups */
2812	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2813		ngroups = sbi->s_blockfile_groups;
2814
2815	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2816
2817	/* first, try the goal */
2818	err = ext4_mb_find_by_goal(ac, &e4b);
2819	if (err || ac->ac_status == AC_STATUS_FOUND)
2820		goto out;
2821
2822	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2823		goto out;
2824
2825	/*
2826	 * ac->ac_2order is set only if the fe_len is a power of 2
2827	 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED
2828	 * so that we try exact allocation using buddy.
2829	 */
2830	i = fls(ac->ac_g_ex.fe_len);
2831	ac->ac_2order = 0;
2832	/*
2833	 * We search using buddy data only if the order of the request
2834	 * is greater than equal to the sbi_s_mb_order2_reqs
2835	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2836	 * We also support searching for power-of-two requests only for
2837	 * requests upto maximum buddy size we have constructed.
2838	 */
2839	if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2840		if (is_power_of_2(ac->ac_g_ex.fe_len))
2841			ac->ac_2order = array_index_nospec(i - 1,
2842							   MB_NUM_ORDERS(sb));
2843	}
2844
2845	/* if stream allocation is enabled, use global goal */
2846	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2847		/* TBD: may be hot point */
2848		spin_lock(&sbi->s_md_lock);
2849		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2850		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2851		spin_unlock(&sbi->s_md_lock);
2852	}
2853
2854	/*
2855	 * Let's just scan groups to find more-less suitable blocks We
2856	 * start with CR_GOAL_LEN_FAST, unless it is power of 2
2857	 * aligned, in which case let's do that faster approach first.
2858	 */
2859	if (ac->ac_2order)
2860		cr = CR_POWER2_ALIGNED;
2861repeat:
2862	for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2863		ac->ac_criteria = cr;
2864		/*
2865		 * searching for the right group start
2866		 * from the goal value specified
2867		 */
2868		group = ac->ac_g_ex.fe_group;
2869		ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2870		prefetch_grp = group;
2871		nr = 0;
2872
2873		for (i = 0, new_cr = cr; i < ngroups; i++,
2874		     ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2875			int ret = 0;
2876
2877			cond_resched();
2878			if (new_cr != cr) {
2879				cr = new_cr;
2880				goto repeat;
2881			}
2882
2883			/*
2884			 * Batch reads of the block allocation bitmaps
2885			 * to get multiple READs in flight; limit
2886			 * prefetching at inexpensive CR, otherwise mballoc
2887			 * can spend a lot of time loading imperfect groups
2888			 */
2889			if ((prefetch_grp == group) &&
2890			    (ext4_mb_cr_expensive(cr) ||
2891			     prefetch_ios < sbi->s_mb_prefetch_limit)) {
2892				nr = sbi->s_mb_prefetch;
2893				if (ext4_has_feature_flex_bg(sb)) {
2894					nr = 1 << sbi->s_log_groups_per_flex;
2895					nr -= group & (nr - 1);
2896					nr = min(nr, sbi->s_mb_prefetch);
2897				}
2898				prefetch_grp = ext4_mb_prefetch(sb, group,
2899							nr, &prefetch_ios);
2900			}
2901
2902			/* This now checks without needing the buddy page */
2903			ret = ext4_mb_good_group_nolock(ac, group, cr);
2904			if (ret <= 0) {
2905				if (!first_err)
2906					first_err = ret;
2907				continue;
2908			}
2909
2910			err = ext4_mb_load_buddy(sb, group, &e4b);
2911			if (err)
2912				goto out;
2913
2914			ext4_lock_group(sb, group);
2915
2916			/*
2917			 * We need to check again after locking the
2918			 * block group
2919			 */
2920			ret = ext4_mb_good_group(ac, group, cr);
2921			if (ret == 0) {
2922				ext4_unlock_group(sb, group);
2923				ext4_mb_unload_buddy(&e4b);
2924				continue;
2925			}
2926
2927			ac->ac_groups_scanned++;
2928			if (cr == CR_POWER2_ALIGNED)
2929				ext4_mb_simple_scan_group(ac, &e4b);
2930			else {
2931				bool is_stripe_aligned =
2932					(sbi->s_stripe >=
2933					 sbi->s_cluster_ratio) &&
2934					!(ac->ac_g_ex.fe_len %
2935					  EXT4_NUM_B2C(sbi, sbi->s_stripe));
2936
2937				if ((cr == CR_GOAL_LEN_FAST ||
2938				     cr == CR_BEST_AVAIL_LEN) &&
2939				    is_stripe_aligned)
2940					ext4_mb_scan_aligned(ac, &e4b);
2941
2942				if (ac->ac_status == AC_STATUS_CONTINUE)
2943					ext4_mb_complex_scan_group(ac, &e4b);
2944			}
2945
2946			ext4_unlock_group(sb, group);
2947			ext4_mb_unload_buddy(&e4b);
2948
2949			if (ac->ac_status != AC_STATUS_CONTINUE)
2950				break;
2951		}
2952		/* Processed all groups and haven't found blocks */
2953		if (sbi->s_mb_stats && i == ngroups)
2954			atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2955
2956		if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN)
2957			/* Reset goal length to original goal length before
2958			 * falling into CR_GOAL_LEN_SLOW */
2959			ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
2960	}
2961
2962	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2963	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2964		/*
2965		 * We've been searching too long. Let's try to allocate
2966		 * the best chunk we've found so far
2967		 */
2968		ext4_mb_try_best_found(ac, &e4b);
2969		if (ac->ac_status != AC_STATUS_FOUND) {
2970			/*
2971			 * Someone more lucky has already allocated it.
2972			 * The only thing we can do is just take first
2973			 * found block(s)
2974			 */
2975			lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2976			mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2977				 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2978				 ac->ac_b_ex.fe_len, lost);
2979
2980			ac->ac_b_ex.fe_group = 0;
2981			ac->ac_b_ex.fe_start = 0;
2982			ac->ac_b_ex.fe_len = 0;
2983			ac->ac_status = AC_STATUS_CONTINUE;
2984			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2985			cr = CR_ANY_FREE;
2986			goto repeat;
2987		}
2988	}
2989
2990	if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2991		atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2992out:
2993	if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2994		err = first_err;
2995
2996	mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2997		 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2998		 ac->ac_flags, cr, err);
2999
3000	if (nr)
3001		ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
3002
3003	return err;
3004}
3005
3006static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
3007{
3008	struct super_block *sb = pde_data(file_inode(seq->file));
3009	ext4_group_t group;
3010
3011	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
3012		return NULL;
3013	group = *pos + 1;
3014	return (void *) ((unsigned long) group);
3015}
3016
3017static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
3018{
3019	struct super_block *sb = pde_data(file_inode(seq->file));
3020	ext4_group_t group;
3021
3022	++*pos;
3023	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
3024		return NULL;
3025	group = *pos + 1;
3026	return (void *) ((unsigned long) group);
3027}
3028
3029static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
3030{
3031	struct super_block *sb = pde_data(file_inode(seq->file));
3032	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
3033	int i, err;
3034	char nbuf[16];
3035	struct ext4_buddy e4b;
3036	struct ext4_group_info *grinfo;
3037	unsigned char blocksize_bits = min_t(unsigned char,
3038					     sb->s_blocksize_bits,
3039					     EXT4_MAX_BLOCK_LOG_SIZE);
3040	struct sg {
3041		struct ext4_group_info info;
3042		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
3043	} sg;
3044
3045	group--;
3046	if (group == 0)
3047		seq_puts(seq, "#group: free  frags first ["
3048			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
3049			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
3050
3051	i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
3052		sizeof(struct ext4_group_info);
3053
3054	grinfo = ext4_get_group_info(sb, group);
3055	if (!grinfo)
3056		return 0;
3057	/* Load the group info in memory only if not already loaded. */
3058	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
3059		err = ext4_mb_load_buddy(sb, group, &e4b);
3060		if (err) {
3061			seq_printf(seq, "#%-5u: %s\n", group, ext4_decode_error(NULL, err, nbuf));
3062			return 0;
3063		}
3064		ext4_mb_unload_buddy(&e4b);
3065	}
3066
3067	/*
3068	 * We care only about free space counters in the group info and
3069	 * these are safe to access even after the buddy has been unloaded
3070	 */
3071	memcpy(&sg, grinfo, i);
3072	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
3073			sg.info.bb_fragments, sg.info.bb_first_free);
3074	for (i = 0; i <= 13; i++)
3075		seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
3076				sg.info.bb_counters[i] : 0);
3077	seq_puts(seq, " ]");
3078	if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
3079		seq_puts(seq, " Block bitmap corrupted!");
3080	seq_putc(seq, '\n');
 
3081	return 0;
3082}
3083
3084static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
3085{
3086}
3087
3088const struct seq_operations ext4_mb_seq_groups_ops = {
3089	.start  = ext4_mb_seq_groups_start,
3090	.next   = ext4_mb_seq_groups_next,
3091	.stop   = ext4_mb_seq_groups_stop,
3092	.show   = ext4_mb_seq_groups_show,
3093};
3094
3095int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
3096{
3097	struct super_block *sb = seq->private;
3098	struct ext4_sb_info *sbi = EXT4_SB(sb);
3099
3100	seq_puts(seq, "mballoc:\n");
3101	if (!sbi->s_mb_stats) {
3102		seq_puts(seq, "\tmb stats collection turned off.\n");
3103		seq_puts(
3104			seq,
3105			"\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
3106		return 0;
3107	}
3108	seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
3109	seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
3110
3111	seq_printf(seq, "\tgroups_scanned: %u\n",
3112		   atomic_read(&sbi->s_bal_groups_scanned));
3113
3114	/* CR_POWER2_ALIGNED stats */
3115	seq_puts(seq, "\tcr_p2_aligned_stats:\n");
3116	seq_printf(seq, "\t\thits: %llu\n",
3117		   atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED]));
3118	seq_printf(
3119		seq, "\t\tgroups_considered: %llu\n",
3120		atomic64_read(
3121			&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]));
3122	seq_printf(seq, "\t\textents_scanned: %u\n",
3123		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED]));
3124	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3125		   atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED]));
3126	seq_printf(seq, "\t\tbad_suggestions: %u\n",
3127		   atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions));
3128
3129	/* CR_GOAL_LEN_FAST stats */
3130	seq_puts(seq, "\tcr_goal_fast_stats:\n");
3131	seq_printf(seq, "\t\thits: %llu\n",
3132		   atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST]));
3133	seq_printf(seq, "\t\tgroups_considered: %llu\n",
3134		   atomic64_read(
3135			   &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST]));
3136	seq_printf(seq, "\t\textents_scanned: %u\n",
3137		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST]));
3138	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3139		   atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST]));
3140	seq_printf(seq, "\t\tbad_suggestions: %u\n",
3141		   atomic_read(&sbi->s_bal_goal_fast_bad_suggestions));
3142
3143	/* CR_BEST_AVAIL_LEN stats */
3144	seq_puts(seq, "\tcr_best_avail_stats:\n");
3145	seq_printf(seq, "\t\thits: %llu\n",
3146		   atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN]));
3147	seq_printf(
3148		seq, "\t\tgroups_considered: %llu\n",
3149		atomic64_read(
3150			&sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN]));
3151	seq_printf(seq, "\t\textents_scanned: %u\n",
3152		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN]));
3153	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3154		   atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN]));
3155	seq_printf(seq, "\t\tbad_suggestions: %u\n",
3156		   atomic_read(&sbi->s_bal_best_avail_bad_suggestions));
3157
3158	/* CR_GOAL_LEN_SLOW stats */
3159	seq_puts(seq, "\tcr_goal_slow_stats:\n");
3160	seq_printf(seq, "\t\thits: %llu\n",
3161		   atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW]));
3162	seq_printf(seq, "\t\tgroups_considered: %llu\n",
3163		   atomic64_read(
3164			   &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW]));
3165	seq_printf(seq, "\t\textents_scanned: %u\n",
3166		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW]));
3167	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3168		   atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW]));
3169
3170	/* CR_ANY_FREE stats */
3171	seq_puts(seq, "\tcr_any_free_stats:\n");
3172	seq_printf(seq, "\t\thits: %llu\n",
3173		   atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE]));
3174	seq_printf(
3175		seq, "\t\tgroups_considered: %llu\n",
3176		atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE]));
3177	seq_printf(seq, "\t\textents_scanned: %u\n",
3178		   atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE]));
3179	seq_printf(seq, "\t\tuseless_loops: %llu\n",
3180		   atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE]));
3181
3182	/* Aggregates */
3183	seq_printf(seq, "\textents_scanned: %u\n",
3184		   atomic_read(&sbi->s_bal_ex_scanned));
3185	seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
3186	seq_printf(seq, "\t\tlen_goal_hits: %u\n",
3187		   atomic_read(&sbi->s_bal_len_goals));
3188	seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
3189	seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
3190	seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
3191	seq_printf(seq, "\tbuddies_generated: %u/%u\n",
3192		   atomic_read(&sbi->s_mb_buddies_generated),
3193		   ext4_get_groups_count(sb));
3194	seq_printf(seq, "\tbuddies_time_used: %llu\n",
3195		   atomic64_read(&sbi->s_mb_generation_time));
3196	seq_printf(seq, "\tpreallocated: %u\n",
3197		   atomic_read(&sbi->s_mb_preallocated));
3198	seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded));
3199	return 0;
3200}
3201
3202static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
 
3203{
3204	struct super_block *sb = pde_data(file_inode(seq->file));
3205	unsigned long position;
3206
3207	if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3208		return NULL;
3209	position = *pos + 1;
3210	return (void *) ((unsigned long) position);
3211}
3212
3213static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3214{
3215	struct super_block *sb = pde_data(file_inode(seq->file));
3216	unsigned long position;
3217
3218	++*pos;
3219	if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3220		return NULL;
3221	position = *pos + 1;
3222	return (void *) ((unsigned long) position);
3223}
3224
3225static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3226{
3227	struct super_block *sb = pde_data(file_inode(seq->file));
3228	struct ext4_sb_info *sbi = EXT4_SB(sb);
3229	unsigned long position = ((unsigned long) v);
3230	struct ext4_group_info *grp;
3231	unsigned int count;
3232
3233	position--;
3234	if (position >= MB_NUM_ORDERS(sb)) {
3235		position -= MB_NUM_ORDERS(sb);
3236		if (position == 0)
3237			seq_puts(seq, "avg_fragment_size_lists:\n");
3238
3239		count = 0;
3240		read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
3241		list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
3242				    bb_avg_fragment_size_node)
3243			count++;
3244		read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
3245		seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3246					(unsigned int)position, count);
3247		return 0;
3248	}
3249
3250	if (position == 0) {
3251		seq_printf(seq, "optimize_scan: %d\n",
3252			   test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3253		seq_puts(seq, "max_free_order_lists:\n");
3254	}
3255	count = 0;
3256	read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
3257	list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3258			    bb_largest_free_order_node)
3259		count++;
3260	read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
3261	seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3262		   (unsigned int)position, count);
3263
3264	return 0;
3265}
3266
3267static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3268{
3269}
3270
3271const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3272	.start  = ext4_mb_seq_structs_summary_start,
3273	.next   = ext4_mb_seq_structs_summary_next,
3274	.stop   = ext4_mb_seq_structs_summary_stop,
3275	.show   = ext4_mb_seq_structs_summary_show,
3276};
3277
3278static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3279{
3280	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3281	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3282
3283	BUG_ON(!cachep);
3284	return cachep;
3285}
3286
3287/*
3288 * Allocate the top-level s_group_info array for the specified number
3289 * of groups
3290 */
3291int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3292{
3293	struct ext4_sb_info *sbi = EXT4_SB(sb);
3294	unsigned size;
3295	struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3296
3297	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3298		EXT4_DESC_PER_BLOCK_BITS(sb);
3299	if (size <= sbi->s_group_info_size)
3300		return 0;
3301
3302	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3303	new_groupinfo = kvzalloc(size, GFP_KERNEL);
3304	if (!new_groupinfo) {
3305		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3306		return -ENOMEM;
3307	}
3308	rcu_read_lock();
3309	old_groupinfo = rcu_dereference(sbi->s_group_info);
3310	if (old_groupinfo)
3311		memcpy(new_groupinfo, old_groupinfo,
3312		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3313	rcu_read_unlock();
3314	rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3315	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3316	if (old_groupinfo)
3317		ext4_kvfree_array_rcu(old_groupinfo);
3318	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3319		   sbi->s_group_info_size);
3320	return 0;
3321}
3322
3323/* Create and initialize ext4_group_info data for the given group. */
3324int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3325			  struct ext4_group_desc *desc)
3326{
3327	int i;
3328	int metalen = 0;
3329	int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3330	struct ext4_sb_info *sbi = EXT4_SB(sb);
3331	struct ext4_group_info **meta_group_info;
3332	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3333
3334	/*
3335	 * First check if this group is the first of a reserved block.
3336	 * If it's true, we have to allocate a new table of pointers
3337	 * to ext4_group_info structures
3338	 */
3339	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3340		metalen = sizeof(*meta_group_info) <<
3341			EXT4_DESC_PER_BLOCK_BITS(sb);
3342		meta_group_info = kmalloc(metalen, GFP_NOFS);
3343		if (meta_group_info == NULL) {
3344			ext4_msg(sb, KERN_ERR, "can't allocate mem "
3345				 "for a buddy group");
3346			return -ENOMEM;
3347		}
3348		rcu_read_lock();
3349		rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3350		rcu_read_unlock();
3351	}
3352
3353	meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3354	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3355
3356	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3357	if (meta_group_info[i] == NULL) {
3358		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3359		goto exit_group_info;
3360	}
3361	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3362		&(meta_group_info[i]->bb_state));
3363
3364	/*
3365	 * initialize bb_free to be able to skip
3366	 * empty groups without initialization
3367	 */
3368	if (ext4_has_group_desc_csum(sb) &&
3369	    (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3370		meta_group_info[i]->bb_free =
3371			ext4_free_clusters_after_init(sb, group, desc);
3372	} else {
3373		meta_group_info[i]->bb_free =
3374			ext4_free_group_clusters(sb, desc);
3375	}
3376
3377	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3378	init_rwsem(&meta_group_info[i]->alloc_sem);
3379	meta_group_info[i]->bb_free_root = RB_ROOT;
3380	INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3381	INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3382	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
3383	meta_group_info[i]->bb_avg_fragment_size_order = -1;  /* uninit */
3384	meta_group_info[i]->bb_group = group;
3385
3386	mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3387	return 0;
3388
3389exit_group_info:
3390	/* If a meta_group_info table has been allocated, release it now */
3391	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3392		struct ext4_group_info ***group_info;
3393
3394		rcu_read_lock();
3395		group_info = rcu_dereference(sbi->s_group_info);
3396		kfree(group_info[idx]);
3397		group_info[idx] = NULL;
3398		rcu_read_unlock();
3399	}
3400	return -ENOMEM;
3401} /* ext4_mb_add_groupinfo */
3402
3403static int ext4_mb_init_backend(struct super_block *sb)
3404{
3405	ext4_group_t ngroups = ext4_get_groups_count(sb);
3406	ext4_group_t i;
3407	struct ext4_sb_info *sbi = EXT4_SB(sb);
3408	int err;
3409	struct ext4_group_desc *desc;
3410	struct ext4_group_info ***group_info;
3411	struct kmem_cache *cachep;
3412
3413	err = ext4_mb_alloc_groupinfo(sb, ngroups);
3414	if (err)
3415		return err;
3416
3417	sbi->s_buddy_cache = new_inode(sb);
3418	if (sbi->s_buddy_cache == NULL) {
3419		ext4_msg(sb, KERN_ERR, "can't get new inode");
3420		goto err_freesgi;
3421	}
3422	/* To avoid potentially colliding with an valid on-disk inode number,
3423	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
3424	 * not in the inode hash, so it should never be found by iget(), but
3425	 * this will avoid confusion if it ever shows up during debugging. */
3426	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3427	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3428	for (i = 0; i < ngroups; i++) {
3429		cond_resched();
3430		desc = ext4_get_group_desc(sb, i, NULL);
3431		if (desc == NULL) {
3432			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3433			goto err_freebuddy;
3434		}
3435		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3436			goto err_freebuddy;
3437	}
3438
3439	if (ext4_has_feature_flex_bg(sb)) {
3440		/* a single flex group is supposed to be read by a single IO.
3441		 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3442		 * unsigned integer, so the maximum shift is 32.
3443		 */
3444		if (sbi->s_es->s_log_groups_per_flex >= 32) {
3445			ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3446			goto err_freebuddy;
3447		}
3448		sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3449			BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3450		sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3451	} else {
3452		sbi->s_mb_prefetch = 32;
3453	}
3454	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3455		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3456	/*
3457	 * now many real IOs to prefetch within a single allocation at
3458	 * CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related
3459	 * optimization we shouldn't try to load too many groups, at some point
3460	 * we should start to use what we've got in memory.
3461	 * with an average random access time 5ms, it'd take a second to get
3462	 * 200 groups (* N with flex_bg), so let's make this limit 4
3463	 */
3464	sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3465	if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3466		sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3467
3468	return 0;
3469
3470err_freebuddy:
3471	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3472	while (i-- > 0) {
3473		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3474
3475		if (grp)
3476			kmem_cache_free(cachep, grp);
3477	}
3478	i = sbi->s_group_info_size;
3479	rcu_read_lock();
3480	group_info = rcu_dereference(sbi->s_group_info);
3481	while (i-- > 0)
3482		kfree(group_info[i]);
3483	rcu_read_unlock();
3484	iput(sbi->s_buddy_cache);
3485err_freesgi:
3486	rcu_read_lock();
3487	kvfree(rcu_dereference(sbi->s_group_info));
3488	rcu_read_unlock();
3489	return -ENOMEM;
3490}
3491
3492static void ext4_groupinfo_destroy_slabs(void)
3493{
3494	int i;
3495
3496	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3497		kmem_cache_destroy(ext4_groupinfo_caches[i]);
3498		ext4_groupinfo_caches[i] = NULL;
3499	}
3500}
3501
3502static int ext4_groupinfo_create_slab(size_t size)
3503{
3504	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3505	int slab_size;
3506	int blocksize_bits = order_base_2(size);
3507	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3508	struct kmem_cache *cachep;
3509
3510	if (cache_index >= NR_GRPINFO_CACHES)
3511		return -EINVAL;
3512
3513	if (unlikely(cache_index < 0))
3514		cache_index = 0;
3515
3516	mutex_lock(&ext4_grpinfo_slab_create_mutex);
3517	if (ext4_groupinfo_caches[cache_index]) {
3518		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3519		return 0;	/* Already created */
3520	}
3521
3522	slab_size = offsetof(struct ext4_group_info,
3523				bb_counters[blocksize_bits + 2]);
3524
3525	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3526					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3527					NULL);
3528
3529	ext4_groupinfo_caches[cache_index] = cachep;
3530
3531	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3532	if (!cachep) {
3533		printk(KERN_EMERG
3534		       "EXT4-fs: no memory for groupinfo slab cache\n");
3535		return -ENOMEM;
3536	}
3537
3538	return 0;
3539}
3540
3541static void ext4_discard_work(struct work_struct *work)
3542{
3543	struct ext4_sb_info *sbi = container_of(work,
3544			struct ext4_sb_info, s_discard_work);
3545	struct super_block *sb = sbi->s_sb;
3546	struct ext4_free_data *fd, *nfd;
3547	struct ext4_buddy e4b;
3548	LIST_HEAD(discard_list);
3549	ext4_group_t grp, load_grp;
3550	int err = 0;
3551
3552	spin_lock(&sbi->s_md_lock);
3553	list_splice_init(&sbi->s_discard_list, &discard_list);
3554	spin_unlock(&sbi->s_md_lock);
3555
3556	load_grp = UINT_MAX;
3557	list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3558		/*
3559		 * If filesystem is umounting or no memory or suffering
3560		 * from no space, give up the discard
3561		 */
3562		if ((sb->s_flags & SB_ACTIVE) && !err &&
3563		    !atomic_read(&sbi->s_retry_alloc_pending)) {
3564			grp = fd->efd_group;
3565			if (grp != load_grp) {
3566				if (load_grp != UINT_MAX)
3567					ext4_mb_unload_buddy(&e4b);
3568
3569				err = ext4_mb_load_buddy(sb, grp, &e4b);
3570				if (err) {
3571					kmem_cache_free(ext4_free_data_cachep, fd);
3572					load_grp = UINT_MAX;
3573					continue;
3574				} else {
3575					load_grp = grp;
3576				}
3577			}
3578
3579			ext4_lock_group(sb, grp);
3580			ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3581						fd->efd_start_cluster + fd->efd_count - 1, 1);
3582			ext4_unlock_group(sb, grp);
3583		}
3584		kmem_cache_free(ext4_free_data_cachep, fd);
3585	}
3586
3587	if (load_grp != UINT_MAX)
3588		ext4_mb_unload_buddy(&e4b);
3589}
3590
3591int ext4_mb_init(struct super_block *sb)
3592{
3593	struct ext4_sb_info *sbi = EXT4_SB(sb);
3594	unsigned i, j;
3595	unsigned offset, offset_incr;
3596	unsigned max;
3597	int ret;
3598
3599	i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3600
3601	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3602	if (sbi->s_mb_offsets == NULL) {
3603		ret = -ENOMEM;
3604		goto out;
3605	}
3606
3607	i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3608	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3609	if (sbi->s_mb_maxs == NULL) {
3610		ret = -ENOMEM;
3611		goto out;
3612	}
3613
3614	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3615	if (ret < 0)
3616		goto out;
3617
3618	/* order 0 is regular bitmap */
3619	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3620	sbi->s_mb_offsets[0] = 0;
3621
3622	i = 1;
3623	offset = 0;
3624	offset_incr = 1 << (sb->s_blocksize_bits - 1);
3625	max = sb->s_blocksize << 2;
3626	do {
3627		sbi->s_mb_offsets[i] = offset;
3628		sbi->s_mb_maxs[i] = max;
3629		offset += offset_incr;
3630		offset_incr = offset_incr >> 1;
3631		max = max >> 1;
3632		i++;
3633	} while (i < MB_NUM_ORDERS(sb));
3634
3635	sbi->s_mb_avg_fragment_size =
3636		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3637			GFP_KERNEL);
3638	if (!sbi->s_mb_avg_fragment_size) {
3639		ret = -ENOMEM;
3640		goto out;
3641	}
3642	sbi->s_mb_avg_fragment_size_locks =
3643		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3644			GFP_KERNEL);
3645	if (!sbi->s_mb_avg_fragment_size_locks) {
3646		ret = -ENOMEM;
3647		goto out;
3648	}
3649	for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3650		INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3651		rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3652	}
3653	sbi->s_mb_largest_free_orders =
3654		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3655			GFP_KERNEL);
3656	if (!sbi->s_mb_largest_free_orders) {
3657		ret = -ENOMEM;
3658		goto out;
3659	}
3660	sbi->s_mb_largest_free_orders_locks =
3661		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3662			GFP_KERNEL);
3663	if (!sbi->s_mb_largest_free_orders_locks) {
3664		ret = -ENOMEM;
3665		goto out;
3666	}
3667	for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3668		INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3669		rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3670	}
3671
3672	spin_lock_init(&sbi->s_md_lock);
3673	sbi->s_mb_free_pending = 0;
3674	INIT_LIST_HEAD(&sbi->s_freed_data_list[0]);
3675	INIT_LIST_HEAD(&sbi->s_freed_data_list[1]);
3676	INIT_LIST_HEAD(&sbi->s_discard_list);
3677	INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3678	atomic_set(&sbi->s_retry_alloc_pending, 0);
3679
3680	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3681	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3682	sbi->s_mb_stats = MB_DEFAULT_STATS;
3683	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3684	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3685	sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER;
3686
3687	/*
3688	 * The default group preallocation is 512, which for 4k block
3689	 * sizes translates to 2 megabytes.  However for bigalloc file
3690	 * systems, this is probably too big (i.e, if the cluster size
3691	 * is 1 megabyte, then group preallocation size becomes half a
3692	 * gigabyte!).  As a default, we will keep a two megabyte
3693	 * group pralloc size for cluster sizes up to 64k, and after
3694	 * that, we will force a minimum group preallocation size of
3695	 * 32 clusters.  This translates to 8 megs when the cluster
3696	 * size is 256k, and 32 megs when the cluster size is 1 meg,
3697	 * which seems reasonable as a default.
3698	 */
3699	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3700				       sbi->s_cluster_bits, 32);
3701	/*
3702	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3703	 * to the lowest multiple of s_stripe which is bigger than
3704	 * the s_mb_group_prealloc as determined above. We want
3705	 * the preallocation size to be an exact multiple of the
3706	 * RAID stripe size so that preallocations don't fragment
3707	 * the stripes.
3708	 */
3709	if (sbi->s_stripe > 1) {
3710		sbi->s_mb_group_prealloc = roundup(
3711			sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe));
3712	}
3713
3714	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3715	if (sbi->s_locality_groups == NULL) {
3716		ret = -ENOMEM;
3717		goto out;
3718	}
3719	for_each_possible_cpu(i) {
3720		struct ext4_locality_group *lg;
3721		lg = per_cpu_ptr(sbi->s_locality_groups, i);
3722		mutex_init(&lg->lg_mutex);
3723		for (j = 0; j < PREALLOC_TB_SIZE; j++)
3724			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3725		spin_lock_init(&lg->lg_prealloc_lock);
3726	}
3727
3728	if (bdev_nonrot(sb->s_bdev))
3729		sbi->s_mb_max_linear_groups = 0;
3730	else
3731		sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3732	/* init file for buddy data */
3733	ret = ext4_mb_init_backend(sb);
3734	if (ret != 0)
3735		goto out_free_locality_groups;
3736
3737	return 0;
3738
3739out_free_locality_groups:
3740	free_percpu(sbi->s_locality_groups);
3741	sbi->s_locality_groups = NULL;
3742out:
3743	kfree(sbi->s_mb_avg_fragment_size);
3744	kfree(sbi->s_mb_avg_fragment_size_locks);
3745	kfree(sbi->s_mb_largest_free_orders);
3746	kfree(sbi->s_mb_largest_free_orders_locks);
3747	kfree(sbi->s_mb_offsets);
3748	sbi->s_mb_offsets = NULL;
3749	kfree(sbi->s_mb_maxs);
3750	sbi->s_mb_maxs = NULL;
3751	return ret;
3752}
3753
3754/* need to called with the ext4 group lock held */
3755static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3756{
3757	struct ext4_prealloc_space *pa;
3758	struct list_head *cur, *tmp;
3759	int count = 0;
3760
3761	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3762		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3763		list_del(&pa->pa_group_list);
3764		count++;
3765		kmem_cache_free(ext4_pspace_cachep, pa);
3766	}
3767	return count;
3768}
3769
3770void ext4_mb_release(struct super_block *sb)
3771{
3772	ext4_group_t ngroups = ext4_get_groups_count(sb);
3773	ext4_group_t i;
3774	int num_meta_group_infos;
3775	struct ext4_group_info *grinfo, ***group_info;
3776	struct ext4_sb_info *sbi = EXT4_SB(sb);
3777	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3778	int count;
3779
3780	if (test_opt(sb, DISCARD)) {
3781		/*
3782		 * wait the discard work to drain all of ext4_free_data
3783		 */
3784		flush_work(&sbi->s_discard_work);
3785		WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3786	}
3787
3788	if (sbi->s_group_info) {
3789		for (i = 0; i < ngroups; i++) {
3790			cond_resched();
3791			grinfo = ext4_get_group_info(sb, i);
3792			if (!grinfo)
3793				continue;
3794			mb_group_bb_bitmap_free(grinfo);
3795			ext4_lock_group(sb, i);
3796			count = ext4_mb_cleanup_pa(grinfo);
3797			if (count)
3798				mb_debug(sb, "mballoc: %d PAs left\n",
3799					 count);
3800			ext4_unlock_group(sb, i);
3801			kmem_cache_free(cachep, grinfo);
3802		}
3803		num_meta_group_infos = (ngroups +
3804				EXT4_DESC_PER_BLOCK(sb) - 1) >>
3805			EXT4_DESC_PER_BLOCK_BITS(sb);
3806		rcu_read_lock();
3807		group_info = rcu_dereference(sbi->s_group_info);
3808		for (i = 0; i < num_meta_group_infos; i++)
3809			kfree(group_info[i]);
3810		kvfree(group_info);
3811		rcu_read_unlock();
3812	}
3813	kfree(sbi->s_mb_avg_fragment_size);
3814	kfree(sbi->s_mb_avg_fragment_size_locks);
3815	kfree(sbi->s_mb_largest_free_orders);
3816	kfree(sbi->s_mb_largest_free_orders_locks);
3817	kfree(sbi->s_mb_offsets);
3818	kfree(sbi->s_mb_maxs);
3819	iput(sbi->s_buddy_cache);
3820	if (sbi->s_mb_stats) {
3821		ext4_msg(sb, KERN_INFO,
3822		       "mballoc: %u blocks %u reqs (%u success)",
3823				atomic_read(&sbi->s_bal_allocated),
3824				atomic_read(&sbi->s_bal_reqs),
3825				atomic_read(&sbi->s_bal_success));
3826		ext4_msg(sb, KERN_INFO,
3827		      "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3828				"%u 2^N hits, %u breaks, %u lost",
3829				atomic_read(&sbi->s_bal_ex_scanned),
3830				atomic_read(&sbi->s_bal_groups_scanned),
3831				atomic_read(&sbi->s_bal_goals),
3832				atomic_read(&sbi->s_bal_2orders),
3833				atomic_read(&sbi->s_bal_breaks),
3834				atomic_read(&sbi->s_mb_lost_chunks));
3835		ext4_msg(sb, KERN_INFO,
3836		       "mballoc: %u generated and it took %llu",
3837				atomic_read(&sbi->s_mb_buddies_generated),
3838				atomic64_read(&sbi->s_mb_generation_time));
3839		ext4_msg(sb, KERN_INFO,
3840		       "mballoc: %u preallocated, %u discarded",
3841				atomic_read(&sbi->s_mb_preallocated),
3842				atomic_read(&sbi->s_mb_discarded));
3843	}
3844
3845	free_percpu(sbi->s_locality_groups);
3846}
3847
3848static inline int ext4_issue_discard(struct super_block *sb,
3849		ext4_group_t block_group, ext4_grpblk_t cluster, int count)
3850{
3851	ext4_fsblk_t discard_block;
3852
3853	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3854			 ext4_group_first_block_no(sb, block_group));
3855	count = EXT4_C2B(EXT4_SB(sb), count);
3856	trace_ext4_discard_blocks(sb,
3857			(unsigned long long) discard_block, count);
3858
3859	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3860}
3861
3862static void ext4_free_data_in_buddy(struct super_block *sb,
3863				    struct ext4_free_data *entry)
3864{
3865	struct ext4_buddy e4b;
3866	struct ext4_group_info *db;
3867	int err, count = 0;
3868
3869	mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3870		 entry->efd_count, entry->efd_group, entry);
3871
3872	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3873	/* we expect to find existing buddy because it's pinned */
3874	BUG_ON(err != 0);
3875
3876	spin_lock(&EXT4_SB(sb)->s_md_lock);
3877	EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3878	spin_unlock(&EXT4_SB(sb)->s_md_lock);
3879
3880	db = e4b.bd_info;
3881	/* there are blocks to put in buddy to make them really free */
3882	count += entry->efd_count;
3883	ext4_lock_group(sb, entry->efd_group);
3884	/* Take it out of per group rb tree */
3885	rb_erase(&entry->efd_node, &(db->bb_free_root));
3886	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3887
3888	/*
3889	 * Clear the trimmed flag for the group so that the next
3890	 * ext4_trim_fs can trim it.
 
 
3891	 */
3892	EXT4_MB_GRP_CLEAR_TRIMMED(db);
 
3893
3894	if (!db->bb_free_root.rb_node) {
3895		/* No more items in the per group rb tree
3896		 * balance refcounts from ext4_mb_free_metadata()
3897		 */
3898		folio_put(e4b.bd_buddy_folio);
3899		folio_put(e4b.bd_bitmap_folio);
3900	}
3901	ext4_unlock_group(sb, entry->efd_group);
3902	ext4_mb_unload_buddy(&e4b);
3903
3904	mb_debug(sb, "freed %d blocks in 1 structures\n", count);
3905}
3906
3907/*
3908 * This function is called by the jbd2 layer once the commit has finished,
3909 * so we know we can free the blocks that were released with that commit.
3910 */
3911void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3912{
3913	struct ext4_sb_info *sbi = EXT4_SB(sb);
3914	struct ext4_free_data *entry, *tmp;
3915	LIST_HEAD(freed_data_list);
3916	struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1];
3917	bool wake;
3918
3919	list_replace_init(s_freed_head, &freed_data_list);
3920
3921	list_for_each_entry(entry, &freed_data_list, efd_list)
3922		ext4_free_data_in_buddy(sb, entry);
3923
3924	if (test_opt(sb, DISCARD)) {
3925		spin_lock(&sbi->s_md_lock);
3926		wake = list_empty(&sbi->s_discard_list);
3927		list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3928		spin_unlock(&sbi->s_md_lock);
3929		if (wake)
3930			queue_work(system_unbound_wq, &sbi->s_discard_work);
3931	} else {
3932		list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3933			kmem_cache_free(ext4_free_data_cachep, entry);
3934	}
3935}
3936
3937int __init ext4_init_mballoc(void)
3938{
3939	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3940					SLAB_RECLAIM_ACCOUNT);
3941	if (ext4_pspace_cachep == NULL)
3942		goto out;
3943
3944	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3945				    SLAB_RECLAIM_ACCOUNT);
3946	if (ext4_ac_cachep == NULL)
3947		goto out_pa_free;
3948
3949	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3950					   SLAB_RECLAIM_ACCOUNT);
3951	if (ext4_free_data_cachep == NULL)
3952		goto out_ac_free;
3953
3954	return 0;
3955
3956out_ac_free:
3957	kmem_cache_destroy(ext4_ac_cachep);
3958out_pa_free:
3959	kmem_cache_destroy(ext4_pspace_cachep);
3960out:
3961	return -ENOMEM;
3962}
3963
3964void ext4_exit_mballoc(void)
3965{
3966	/*
3967	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3968	 * before destroying the slab cache.
3969	 */
3970	rcu_barrier();
3971	kmem_cache_destroy(ext4_pspace_cachep);
3972	kmem_cache_destroy(ext4_ac_cachep);
3973	kmem_cache_destroy(ext4_free_data_cachep);
3974	ext4_groupinfo_destroy_slabs();
3975}
3976
3977#define EXT4_MB_BITMAP_MARKED_CHECK 0x0001
3978#define EXT4_MB_SYNC_UPDATE 0x0002
3979static int
3980ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state,
3981		     ext4_group_t group, ext4_grpblk_t blkoff,
3982		     ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed)
3983{
3984	struct ext4_sb_info *sbi = EXT4_SB(sb);
3985	struct buffer_head *bitmap_bh = NULL;
3986	struct ext4_group_desc *gdp;
3987	struct buffer_head *gdp_bh;
3988	int err;
3989	unsigned int i, already, changed = len;
3990
3991	KUNIT_STATIC_STUB_REDIRECT(ext4_mb_mark_context,
3992				   handle, sb, state, group, blkoff, len,
3993				   flags, ret_changed);
3994
3995	if (ret_changed)
3996		*ret_changed = 0;
3997	bitmap_bh = ext4_read_block_bitmap(sb, group);
3998	if (IS_ERR(bitmap_bh))
3999		return PTR_ERR(bitmap_bh);
4000
4001	if (handle) {
4002		BUFFER_TRACE(bitmap_bh, "getting write access");
4003		err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
4004						    EXT4_JTR_NONE);
4005		if (err)
4006			goto out_err;
4007	}
4008
4009	err = -EIO;
4010	gdp = ext4_get_group_desc(sb, group, &gdp_bh);
4011	if (!gdp)
4012		goto out_err;
4013
4014	if (handle) {
4015		BUFFER_TRACE(gdp_bh, "get_write_access");
4016		err = ext4_journal_get_write_access(handle, sb, gdp_bh,
4017						    EXT4_JTR_NONE);
4018		if (err)
4019			goto out_err;
4020	}
4021
4022	ext4_lock_group(sb, group);
4023	if (ext4_has_group_desc_csum(sb) &&
4024	    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
4025		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
4026		ext4_free_group_clusters_set(sb, gdp,
4027			ext4_free_clusters_after_init(sb, group, gdp));
4028	}
4029
4030	if (flags & EXT4_MB_BITMAP_MARKED_CHECK) {
4031		already = 0;
4032		for (i = 0; i < len; i++)
4033			if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
4034					state)
4035				already++;
4036		changed = len - already;
4037	}
4038
4039	if (state) {
4040		mb_set_bits(bitmap_bh->b_data, blkoff, len);
4041		ext4_free_group_clusters_set(sb, gdp,
4042			ext4_free_group_clusters(sb, gdp) - changed);
4043	} else {
4044		mb_clear_bits(bitmap_bh->b_data, blkoff, len);
4045		ext4_free_group_clusters_set(sb, gdp,
4046			ext4_free_group_clusters(sb, gdp) + changed);
4047	}
4048
4049	ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
4050	ext4_group_desc_csum_set(sb, group, gdp);
4051	ext4_unlock_group(sb, group);
4052	if (ret_changed)
4053		*ret_changed = changed;
4054
4055	if (sbi->s_log_groups_per_flex) {
4056		ext4_group_t flex_group = ext4_flex_group(sbi, group);
4057		struct flex_groups *fg = sbi_array_rcu_deref(sbi,
4058					   s_flex_groups, flex_group);
4059
4060		if (state)
4061			atomic64_sub(changed, &fg->free_clusters);
4062		else
4063			atomic64_add(changed, &fg->free_clusters);
4064	}
4065
4066	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4067	if (err)
4068		goto out_err;
4069	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
4070	if (err)
4071		goto out_err;
4072
4073	if (flags & EXT4_MB_SYNC_UPDATE) {
4074		sync_dirty_buffer(bitmap_bh);
4075		sync_dirty_buffer(gdp_bh);
4076	}
4077
4078out_err:
4079	brelse(bitmap_bh);
4080	return err;
4081}
4082
4083/*
4084 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
4085 * Returns 0 if success or error code
4086 */
4087static noinline_for_stack int
4088ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
4089				handle_t *handle, unsigned int reserv_clstrs)
4090{
4091	struct ext4_group_desc *gdp;
4092	struct ext4_sb_info *sbi;
4093	struct super_block *sb;
4094	ext4_fsblk_t block;
4095	int err, len;
4096	int flags = 0;
4097	ext4_grpblk_t changed;
4098
4099	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4100	BUG_ON(ac->ac_b_ex.fe_len <= 0);
4101
4102	sb = ac->ac_sb;
4103	sbi = EXT4_SB(sb);
4104
4105	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL);
4106	if (!gdp)
4107		return -EIO;
4108	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
4109			ext4_free_group_clusters(sb, gdp));
4110
4111	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4112	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4113	if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
4114		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
4115			   "fs metadata", block, block+len);
4116		/* File system mounted not to panic on error
4117		 * Fix the bitmap and return EFSCORRUPTED
4118		 * We leak some of the blocks here.
4119		 */
4120		err = ext4_mb_mark_context(handle, sb, true,
4121					   ac->ac_b_ex.fe_group,
4122					   ac->ac_b_ex.fe_start,
4123					   ac->ac_b_ex.fe_len,
4124					   0, NULL);
4125		if (!err)
4126			err = -EFSCORRUPTED;
4127		return err;
4128	}
4129
4130#ifdef AGGRESSIVE_CHECK
4131	flags |= EXT4_MB_BITMAP_MARKED_CHECK;
4132#endif
4133	err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group,
4134				   ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len,
4135				   flags, &changed);
4136
4137	if (err && changed == 0)
4138		return err;
4139
4140#ifdef AGGRESSIVE_CHECK
4141	BUG_ON(changed != ac->ac_b_ex.fe_len);
4142#endif
4143	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
4144	/*
4145	 * Now reduce the dirty block count also. Should not go negative
4146	 */
4147	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
4148		/* release all the reserved blocks if non delalloc */
4149		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4150				   reserv_clstrs);
4151
4152	return err;
4153}
4154
4155/*
4156 * Idempotent helper for Ext4 fast commit replay path to set the state of
4157 * blocks in bitmaps and update counters.
4158 */
4159void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
4160		     int len, bool state)
4161{
4162	struct ext4_sb_info *sbi = EXT4_SB(sb);
4163	ext4_group_t group;
4164	ext4_grpblk_t blkoff;
4165	int err = 0;
4166	unsigned int clen, thisgrp_len;
4167
4168	while (len > 0) {
4169		ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
4170
4171		/*
4172		 * Check to see if we are freeing blocks across a group
4173		 * boundary.
4174		 * In case of flex_bg, this can happen that (block, len) may
4175		 * span across more than one group. In that case we need to
4176		 * get the corresponding group metadata to work with.
4177		 * For this we have goto again loop.
4178		 */
4179		thisgrp_len = min_t(unsigned int, (unsigned int)len,
4180			EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
4181		clen = EXT4_NUM_B2C(sbi, thisgrp_len);
4182
4183		if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
4184			ext4_error(sb, "Marking blocks in system zone - "
4185				   "Block = %llu, len = %u",
4186				   block, thisgrp_len);
4187			break;
4188		}
4189
4190		err = ext4_mb_mark_context(NULL, sb, state,
4191					   group, blkoff, clen,
4192					   EXT4_MB_BITMAP_MARKED_CHECK |
4193					   EXT4_MB_SYNC_UPDATE,
4194					   NULL);
4195		if (err)
4196			break;
4197
4198		block += thisgrp_len;
4199		len -= thisgrp_len;
4200		BUG_ON(len < 0);
4201	}
4202}
4203
4204/*
4205 * here we normalize request for locality group
4206 * Group request are normalized to s_mb_group_prealloc, which goes to
4207 * s_strip if we set the same via mount option.
4208 * s_mb_group_prealloc can be configured via
4209 * /sys/fs/ext4/<partition>/mb_group_prealloc
4210 *
4211 * XXX: should we try to preallocate more than the group has now?
4212 */
4213static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4214{
4215	struct super_block *sb = ac->ac_sb;
4216	struct ext4_locality_group *lg = ac->ac_lg;
4217
4218	BUG_ON(lg == NULL);
4219	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
4220	mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
4221}
4222
4223/*
4224 * This function returns the next element to look at during inode
4225 * PA rbtree walk. We assume that we have held the inode PA rbtree lock
4226 * (ei->i_prealloc_lock)
4227 *
4228 * new_start	The start of the range we want to compare
4229 * cur_start	The existing start that we are comparing against
4230 * node	The node of the rb_tree
4231 */
4232static inline struct rb_node*
4233ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node)
4234{
4235	if (new_start < cur_start)
4236		return node->rb_left;
4237	else
4238		return node->rb_right;
4239}
4240
4241static inline void
4242ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
4243			  ext4_lblk_t start, loff_t end)
4244{
4245	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4246	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4247	struct ext4_prealloc_space *tmp_pa;
4248	ext4_lblk_t tmp_pa_start;
4249	loff_t tmp_pa_end;
4250	struct rb_node *iter;
4251
4252	read_lock(&ei->i_prealloc_lock);
4253	for (iter = ei->i_prealloc_node.rb_node; iter;
4254	     iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) {
4255		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4256				  pa_node.inode_node);
4257		tmp_pa_start = tmp_pa->pa_lstart;
4258		tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4259
4260		spin_lock(&tmp_pa->pa_lock);
4261		if (tmp_pa->pa_deleted == 0)
4262			BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
4263		spin_unlock(&tmp_pa->pa_lock);
4264	}
4265	read_unlock(&ei->i_prealloc_lock);
4266}
4267
4268/*
4269 * Given an allocation context "ac" and a range "start", "end", check
4270 * and adjust boundaries if the range overlaps with any of the existing
4271 * preallocatoins stored in the corresponding inode of the allocation context.
4272 *
4273 * Parameters:
4274 *	ac			allocation context
4275 *	start			start of the new range
4276 *	end			end of the new range
4277 */
4278static inline void
4279ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
4280			  ext4_lblk_t *start, loff_t *end)
4281{
4282	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4283	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4284	struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
4285	struct rb_node *iter;
4286	ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
4287	loff_t new_end, tmp_pa_end, left_pa_end = -1;
4288
4289	new_start = *start;
4290	new_end = *end;
4291
4292	/*
4293	 * Adjust the normalized range so that it doesn't overlap with any
4294	 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock
4295	 * so it doesn't change underneath us.
4296	 */
4297	read_lock(&ei->i_prealloc_lock);
4298
4299	/* Step 1: find any one immediate neighboring PA of the normalized range */
4300	for (iter = ei->i_prealloc_node.rb_node; iter;
4301	     iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4302					    tmp_pa_start, iter)) {
4303		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4304				  pa_node.inode_node);
4305		tmp_pa_start = tmp_pa->pa_lstart;
4306		tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4307
4308		/* PA must not overlap original request */
4309		spin_lock(&tmp_pa->pa_lock);
4310		if (tmp_pa->pa_deleted == 0)
4311			BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
4312				 ac->ac_o_ex.fe_logical < tmp_pa_start));
4313		spin_unlock(&tmp_pa->pa_lock);
4314	}
4315
4316	/*
4317	 * Step 2: check if the found PA is left or right neighbor and
4318	 * get the other neighbor
4319	 */
4320	if (tmp_pa) {
4321		if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) {
4322			struct rb_node *tmp;
4323
4324			left_pa = tmp_pa;
4325			tmp = rb_next(&left_pa->pa_node.inode_node);
4326			if (tmp) {
4327				right_pa = rb_entry(tmp,
4328						    struct ext4_prealloc_space,
4329						    pa_node.inode_node);
4330			}
4331		} else {
4332			struct rb_node *tmp;
4333
4334			right_pa = tmp_pa;
4335			tmp = rb_prev(&right_pa->pa_node.inode_node);
4336			if (tmp) {
4337				left_pa = rb_entry(tmp,
4338						   struct ext4_prealloc_space,
4339						   pa_node.inode_node);
4340			}
4341		}
4342	}
4343
4344	/* Step 3: get the non deleted neighbors */
4345	if (left_pa) {
4346		for (iter = &left_pa->pa_node.inode_node;;
4347		     iter = rb_prev(iter)) {
4348			if (!iter) {
4349				left_pa = NULL;
4350				break;
4351			}
4352
4353			tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4354					  pa_node.inode_node);
4355			left_pa = tmp_pa;
4356			spin_lock(&tmp_pa->pa_lock);
4357			if (tmp_pa->pa_deleted == 0) {
4358				spin_unlock(&tmp_pa->pa_lock);
4359				break;
4360			}
4361			spin_unlock(&tmp_pa->pa_lock);
4362		}
4363	}
4364
4365	if (right_pa) {
4366		for (iter = &right_pa->pa_node.inode_node;;
4367		     iter = rb_next(iter)) {
4368			if (!iter) {
4369				right_pa = NULL;
4370				break;
4371			}
4372
4373			tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4374					  pa_node.inode_node);
4375			right_pa = tmp_pa;
4376			spin_lock(&tmp_pa->pa_lock);
4377			if (tmp_pa->pa_deleted == 0) {
4378				spin_unlock(&tmp_pa->pa_lock);
4379				break;
4380			}
4381			spin_unlock(&tmp_pa->pa_lock);
4382		}
4383	}
4384
4385	if (left_pa) {
4386		left_pa_end = pa_logical_end(sbi, left_pa);
4387		BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
4388	}
4389
4390	if (right_pa) {
4391		right_pa_start = right_pa->pa_lstart;
4392		BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical);
4393	}
4394
4395	/* Step 4: trim our normalized range to not overlap with the neighbors */
4396	if (left_pa) {
4397		if (left_pa_end > new_start)
4398			new_start = left_pa_end;
4399	}
4400
4401	if (right_pa) {
4402		if (right_pa_start < new_end)
4403			new_end = right_pa_start;
4404	}
4405	read_unlock(&ei->i_prealloc_lock);
4406
4407	/* XXX: extra loop to check we really don't overlap preallocations */
4408	ext4_mb_pa_assert_overlap(ac, new_start, new_end);
4409
4410	*start = new_start;
4411	*end = new_end;
4412}
4413
4414/*
4415 * Normalization means making request better in terms of
4416 * size and alignment
4417 */
4418static noinline_for_stack void
4419ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4420				struct ext4_allocation_request *ar)
4421{
4422	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4423	struct ext4_super_block *es = sbi->s_es;
4424	int bsbits, max;
4425	loff_t size, start_off, end;
4426	loff_t orig_size __maybe_unused;
4427	ext4_lblk_t start;
4428
4429	/* do normalize only data requests, metadata requests
4430	   do not need preallocation */
4431	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4432		return;
4433
4434	/* sometime caller may want exact blocks */
4435	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4436		return;
4437
4438	/* caller may indicate that preallocation isn't
4439	 * required (it's a tail, for example) */
4440	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4441		return;
4442
4443	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4444		ext4_mb_normalize_group_request(ac);
4445		return ;
4446	}
4447
4448	bsbits = ac->ac_sb->s_blocksize_bits;
4449
4450	/* first, let's learn actual file size
4451	 * given current request is allocated */
4452	size = extent_logical_end(sbi, &ac->ac_o_ex);
4453	size = size << bsbits;
4454	if (size < i_size_read(ac->ac_inode))
4455		size = i_size_read(ac->ac_inode);
4456	orig_size = size;
4457
4458	/* max size of free chunks */
4459	max = 2 << bsbits;
4460
4461#define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
4462		(req <= (size) || max <= (chunk_size))
4463
4464	/* first, try to predict filesize */
4465	/* XXX: should this table be tunable? */
4466	start_off = 0;
4467	if (size <= 16 * 1024) {
4468		size = 16 * 1024;
4469	} else if (size <= 32 * 1024) {
4470		size = 32 * 1024;
4471	} else if (size <= 64 * 1024) {
4472		size = 64 * 1024;
4473	} else if (size <= 128 * 1024) {
4474		size = 128 * 1024;
4475	} else if (size <= 256 * 1024) {
4476		size = 256 * 1024;
4477	} else if (size <= 512 * 1024) {
4478		size = 512 * 1024;
4479	} else if (size <= 1024 * 1024) {
4480		size = 1024 * 1024;
4481	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4482		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4483						(21 - bsbits)) << 21;
4484		size = 2 * 1024 * 1024;
4485	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4486		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4487							(22 - bsbits)) << 22;
4488		size = 4 * 1024 * 1024;
4489	} else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len),
4490					(8<<20)>>bsbits, max, 8 * 1024)) {
4491		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4492							(23 - bsbits)) << 23;
4493		size = 8 * 1024 * 1024;
4494	} else {
4495		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4496		size	  = (loff_t) EXT4_C2B(sbi,
4497					      ac->ac_o_ex.fe_len) << bsbits;
4498	}
4499	size = size >> bsbits;
4500	start = start_off >> bsbits;
4501
4502	/*
4503	 * For tiny groups (smaller than 8MB) the chosen allocation
4504	 * alignment may be larger than group size. Make sure the
4505	 * alignment does not move allocation to a different group which
4506	 * makes mballoc fail assertions later.
4507	 */
4508	start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4509			(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4510
4511	/* avoid unnecessary preallocation that may trigger assertions */
4512	if (start + size > EXT_MAX_BLOCKS)
4513		size = EXT_MAX_BLOCKS - start;
4514
4515	/* don't cover already allocated blocks in selected range */
4516	if (ar->pleft && start <= ar->lleft) {
4517		size -= ar->lleft + 1 - start;
4518		start = ar->lleft + 1;
4519	}
4520	if (ar->pright && start + size - 1 >= ar->lright)
4521		size -= start + size - ar->lright;
4522
4523	/*
4524	 * Trim allocation request for filesystems with artificially small
4525	 * groups.
4526	 */
4527	if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4528		size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4529
4530	end = start + size;
4531
4532	ext4_mb_pa_adjust_overlap(ac, &start, &end);
4533
4534	size = end - start;
4535
4536	/*
4537	 * In this function "start" and "size" are normalized for better
4538	 * alignment and length such that we could preallocate more blocks.
4539	 * This normalization is done such that original request of
4540	 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4541	 * "size" boundaries.
4542	 * (Note fe_len can be relaxed since FS block allocation API does not
4543	 * provide gurantee on number of contiguous blocks allocation since that
4544	 * depends upon free space left, etc).
4545	 * In case of inode pa, later we use the allocated blocks
4546	 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated
4547	 * range of goal/best blocks [start, size] to put it at the
4548	 * ac_o_ex.fe_logical extent of this inode.
4549	 * (See ext4_mb_use_inode_pa() for more details)
4550	 */
4551	if (start + size <= ac->ac_o_ex.fe_logical ||
4552			start > ac->ac_o_ex.fe_logical) {
4553		ext4_msg(ac->ac_sb, KERN_ERR,
4554			 "start %lu, size %lu, fe_logical %lu",
4555			 (unsigned long) start, (unsigned long) size,
4556			 (unsigned long) ac->ac_o_ex.fe_logical);
4557		BUG();
4558	}
4559	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4560
4561	/* now prepare goal request */
4562
4563	/* XXX: is it better to align blocks WRT to logical
4564	 * placement or satisfy big request as is */
4565	ac->ac_g_ex.fe_logical = start;
4566	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4567	ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
4568
4569	/* define goal start in order to merge */
4570	if (ar->pright && (ar->lright == (start + size)) &&
4571	    ar->pright >= size &&
4572	    ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
4573		/* merge to the right */
4574		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4575						&ac->ac_g_ex.fe_group,
4576						&ac->ac_g_ex.fe_start);
4577		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4578	}
4579	if (ar->pleft && (ar->lleft + 1 == start) &&
4580	    ar->pleft + 1 < ext4_blocks_count(es)) {
4581		/* merge to the left */
4582		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4583						&ac->ac_g_ex.fe_group,
4584						&ac->ac_g_ex.fe_start);
4585		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4586	}
4587
4588	mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4589		 orig_size, start);
4590}
4591
4592static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4593{
4594	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4595
4596	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4597		atomic_inc(&sbi->s_bal_reqs);
4598		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4599		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4600			atomic_inc(&sbi->s_bal_success);
4601
4602		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4603		for (int i=0; i<EXT4_MB_NUM_CRS; i++) {
4604			atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]);
4605		}
4606
4607		atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4608		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4609				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4610			atomic_inc(&sbi->s_bal_goals);
4611		/* did we allocate as much as normalizer originally wanted? */
4612		if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len)
4613			atomic_inc(&sbi->s_bal_len_goals);
4614
4615		if (ac->ac_found > sbi->s_mb_max_to_scan)
4616			atomic_inc(&sbi->s_bal_breaks);
4617	}
4618
4619	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4620		trace_ext4_mballoc_alloc(ac);
4621	else
4622		trace_ext4_mballoc_prealloc(ac);
4623}
4624
4625/*
4626 * Called on failure; free up any blocks from the inode PA for this
4627 * context.  We don't need this for MB_GROUP_PA because we only change
4628 * pa_free in ext4_mb_release_context(), but on failure, we've already
4629 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4630 */
4631static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4632{
4633	struct ext4_prealloc_space *pa = ac->ac_pa;
4634	struct ext4_buddy e4b;
4635	int err;
4636
4637	if (pa == NULL) {
4638		if (ac->ac_f_ex.fe_len == 0)
4639			return;
4640		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4641		if (WARN_RATELIMIT(err,
4642				   "ext4: mb_load_buddy failed (%d)", err))
4643			/*
4644			 * This should never happen since we pin the
4645			 * pages in the ext4_allocation_context so
4646			 * ext4_mb_load_buddy() should never fail.
4647			 */
4648			return;
4649		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4650		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4651			       ac->ac_f_ex.fe_len);
4652		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4653		ext4_mb_unload_buddy(&e4b);
4654		return;
4655	}
4656	if (pa->pa_type == MB_INODE_PA) {
4657		spin_lock(&pa->pa_lock);
4658		pa->pa_free += ac->ac_b_ex.fe_len;
4659		spin_unlock(&pa->pa_lock);
4660	}
4661}
4662
4663/*
4664 * use blocks preallocated to inode
4665 */
4666static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4667				struct ext4_prealloc_space *pa)
4668{
4669	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4670	ext4_fsblk_t start;
4671	ext4_fsblk_t end;
4672	int len;
4673
4674	/* found preallocated blocks, use them */
4675	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4676	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4677		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4678	len = EXT4_NUM_B2C(sbi, end - start);
4679	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4680					&ac->ac_b_ex.fe_start);
4681	ac->ac_b_ex.fe_len = len;
4682	ac->ac_status = AC_STATUS_FOUND;
4683	ac->ac_pa = pa;
4684
4685	BUG_ON(start < pa->pa_pstart);
4686	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4687	BUG_ON(pa->pa_free < len);
4688	BUG_ON(ac->ac_b_ex.fe_len <= 0);
4689	pa->pa_free -= len;
4690
4691	mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4692}
4693
4694/*
4695 * use blocks preallocated to locality group
4696 */
4697static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4698				struct ext4_prealloc_space *pa)
4699{
4700	unsigned int len = ac->ac_o_ex.fe_len;
4701
4702	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4703					&ac->ac_b_ex.fe_group,
4704					&ac->ac_b_ex.fe_start);
4705	ac->ac_b_ex.fe_len = len;
4706	ac->ac_status = AC_STATUS_FOUND;
4707	ac->ac_pa = pa;
4708
4709	/* we don't correct pa_pstart or pa_len here to avoid
4710	 * possible race when the group is being loaded concurrently
4711	 * instead we correct pa later, after blocks are marked
4712	 * in on-disk bitmap -- see ext4_mb_release_context()
4713	 * Other CPUs are prevented from allocating from this pa by lg_mutex
4714	 */
4715	mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4716		 pa->pa_lstart, len, pa);
4717}
4718
4719/*
4720 * Return the prealloc space that have minimal distance
4721 * from the goal block. @cpa is the prealloc
4722 * space that is having currently known minimal distance
4723 * from the goal block.
4724 */
4725static struct ext4_prealloc_space *
4726ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4727			struct ext4_prealloc_space *pa,
4728			struct ext4_prealloc_space *cpa)
4729{
4730	ext4_fsblk_t cur_distance, new_distance;
4731
4732	if (cpa == NULL) {
4733		atomic_inc(&pa->pa_count);
4734		return pa;
4735	}
4736	cur_distance = abs(goal_block - cpa->pa_pstart);
4737	new_distance = abs(goal_block - pa->pa_pstart);
4738
4739	if (cur_distance <= new_distance)
4740		return cpa;
4741
4742	/* drop the previous reference */
4743	atomic_dec(&cpa->pa_count);
4744	atomic_inc(&pa->pa_count);
4745	return pa;
4746}
4747
4748/*
4749 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY
4750 */
4751static bool
4752ext4_mb_pa_goal_check(struct ext4_allocation_context *ac,
4753		      struct ext4_prealloc_space *pa)
4754{
4755	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4756	ext4_fsblk_t start;
4757
4758	if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)))
4759		return true;
4760
4761	/*
4762	 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted
4763	 * in ext4_mb_normalize_request and will keep same with ac_o_ex
4764	 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep
4765	 * consistent with ext4_mb_find_by_goal.
4766	 */
4767	start = pa->pa_pstart +
4768		(ac->ac_g_ex.fe_logical - pa->pa_lstart);
4769	if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start)
4770		return false;
4771
4772	if (ac->ac_g_ex.fe_len > pa->pa_len -
4773	    EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart))
4774		return false;
4775
4776	return true;
4777}
4778
4779/*
4780 * search goal blocks in preallocated space
4781 */
4782static noinline_for_stack bool
4783ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4784{
4785	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4786	int order, i;
4787	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4788	struct ext4_locality_group *lg;
4789	struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
4790	struct rb_node *iter;
4791	ext4_fsblk_t goal_block;
4792
4793	/* only data can be preallocated */
4794	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4795		return false;
4796
4797	/*
4798	 * first, try per-file preallocation by searching the inode pa rbtree.
4799	 *
4800	 * Here, we can't do a direct traversal of the tree because
4801	 * ext4_mb_discard_group_preallocation() can paralelly mark the pa
4802	 * deleted and that can cause direct traversal to skip some entries.
4803	 */
4804	read_lock(&ei->i_prealloc_lock);
4805
4806	if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) {
4807		goto try_group_pa;
4808	}
4809
4810	/*
4811	 * Step 1: Find a pa with logical start immediately adjacent to the
4812	 * original logical start. This could be on the left or right.
4813	 *
4814	 * (tmp_pa->pa_lstart never changes so we can skip locking for it).
4815	 */
4816	for (iter = ei->i_prealloc_node.rb_node; iter;
4817	     iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4818					    tmp_pa->pa_lstart, iter)) {
4819		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4820				  pa_node.inode_node);
4821	}
4822
4823	/*
4824	 * Step 2: The adjacent pa might be to the right of logical start, find
4825	 * the left adjacent pa. After this step we'd have a valid tmp_pa whose
4826	 * logical start is towards the left of original request's logical start
4827	 */
4828	if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4829		struct rb_node *tmp;
4830		tmp = rb_prev(&tmp_pa->pa_node.inode_node);
4831
4832		if (tmp) {
4833			tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
4834					    pa_node.inode_node);
4835		} else {
4836			/*
4837			 * If there is no adjacent pa to the left then finding
4838			 * an overlapping pa is not possible hence stop searching
4839			 * inode pa tree
4840			 */
4841			goto try_group_pa;
4842		}
4843	}
4844
4845	BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4846
4847	/*
4848	 * Step 3: If the left adjacent pa is deleted, keep moving left to find
4849	 * the first non deleted adjacent pa. After this step we should have a
4850	 * valid tmp_pa which is guaranteed to be non deleted.
4851	 */
4852	for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
4853		if (!iter) {
4854			/*
4855			 * no non deleted left adjacent pa, so stop searching
4856			 * inode pa tree
4857			 */
4858			goto try_group_pa;
4859		}
4860		tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4861				  pa_node.inode_node);
4862		spin_lock(&tmp_pa->pa_lock);
4863		if (tmp_pa->pa_deleted == 0) {
4864			/*
4865			 * We will keep holding the pa_lock from
4866			 * this point on because we don't want group discard
4867			 * to delete this pa underneath us. Since group
4868			 * discard is anyways an ENOSPC operation it
4869			 * should be okay for it to wait a few more cycles.
4870			 */
4871			break;
4872		} else {
4873			spin_unlock(&tmp_pa->pa_lock);
4874		}
4875	}
4876
4877	BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4878	BUG_ON(tmp_pa->pa_deleted == 1);
4879
4880	/*
4881	 * Step 4: We now have the non deleted left adjacent pa. Only this
4882	 * pa can possibly satisfy the request hence check if it overlaps
4883	 * original logical start and stop searching if it doesn't.
4884	 */
4885	if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
4886		spin_unlock(&tmp_pa->pa_lock);
4887		goto try_group_pa;
4888	}
4889
4890	/* non-extent files can't have physical blocks past 2^32 */
4891	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4892	    (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
4893	     EXT4_MAX_BLOCK_FILE_PHYS)) {
4894		/*
4895		 * Since PAs don't overlap, we won't find any other PA to
4896		 * satisfy this.
4897		 */
4898		spin_unlock(&tmp_pa->pa_lock);
4899		goto try_group_pa;
4900	}
4901
4902	if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
4903		atomic_inc(&tmp_pa->pa_count);
4904		ext4_mb_use_inode_pa(ac, tmp_pa);
4905		spin_unlock(&tmp_pa->pa_lock);
4906		read_unlock(&ei->i_prealloc_lock);
4907		return true;
4908	} else {
4909		/*
4910		 * We found a valid overlapping pa but couldn't use it because
4911		 * it had no free blocks. This should ideally never happen
4912		 * because:
4913		 *
4914		 * 1. When a new inode pa is added to rbtree it must have
4915		 *    pa_free > 0 since otherwise we won't actually need
4916		 *    preallocation.
4917		 *
4918		 * 2. An inode pa that is in the rbtree can only have it's
4919		 *    pa_free become zero when another thread calls:
4920		 *      ext4_mb_new_blocks
4921		 *       ext4_mb_use_preallocated
4922		 *        ext4_mb_use_inode_pa
4923		 *
4924		 * 3. Further, after the above calls make pa_free == 0, we will
4925		 *    immediately remove it from the rbtree in:
4926		 *      ext4_mb_new_blocks
4927		 *       ext4_mb_release_context
4928		 *        ext4_mb_put_pa
4929		 *
4930		 * 4. Since the pa_free becoming 0 and pa_free getting removed
4931		 * from tree both happen in ext4_mb_new_blocks, which is always
4932		 * called with i_data_sem held for data allocations, we can be
4933		 * sure that another process will never see a pa in rbtree with
4934		 * pa_free == 0.
4935		 */
4936		WARN_ON_ONCE(tmp_pa->pa_free == 0);
4937	}
4938	spin_unlock(&tmp_pa->pa_lock);
4939try_group_pa:
4940	read_unlock(&ei->i_prealloc_lock);
4941
4942	/* can we use group allocation? */
4943	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4944		return false;
4945
4946	/* inode may have no locality group for some reason */
4947	lg = ac->ac_lg;
4948	if (lg == NULL)
4949		return false;
4950	order  = fls(ac->ac_o_ex.fe_len) - 1;
4951	if (order > PREALLOC_TB_SIZE - 1)
4952		/* The max size of hash table is PREALLOC_TB_SIZE */
4953		order = PREALLOC_TB_SIZE - 1;
4954
4955	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4956	/*
4957	 * search for the prealloc space that is having
4958	 * minimal distance from the goal block.
4959	 */
4960	for (i = order; i < PREALLOC_TB_SIZE; i++) {
4961		rcu_read_lock();
4962		list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
4963					pa_node.lg_list) {
4964			spin_lock(&tmp_pa->pa_lock);
4965			if (tmp_pa->pa_deleted == 0 &&
4966					tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
4967
4968				cpa = ext4_mb_check_group_pa(goal_block,
4969								tmp_pa, cpa);
4970			}
4971			spin_unlock(&tmp_pa->pa_lock);
4972		}
4973		rcu_read_unlock();
4974	}
4975	if (cpa) {
4976		ext4_mb_use_group_pa(ac, cpa);
4977		return true;
4978	}
4979	return false;
4980}
4981
4982/*
4983 * the function goes through all preallocation in this group and marks them
4984 * used in in-core bitmap. buddy must be generated from this bitmap
4985 * Need to be called with ext4 group lock held
4986 */
4987static noinline_for_stack
4988void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4989					ext4_group_t group)
4990{
4991	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4992	struct ext4_prealloc_space *pa;
4993	struct list_head *cur;
4994	ext4_group_t groupnr;
4995	ext4_grpblk_t start;
4996	int preallocated = 0;
4997	int len;
4998
4999	if (!grp)
5000		return;
5001
5002	/* all form of preallocation discards first load group,
5003	 * so the only competing code is preallocation use.
5004	 * we don't need any locking here
5005	 * notice we do NOT ignore preallocations with pa_deleted
5006	 * otherwise we could leave used blocks available for
5007	 * allocation in buddy when concurrent ext4_mb_put_pa()
5008	 * is dropping preallocation
5009	 */
5010	list_for_each(cur, &grp->bb_prealloc_list) {
5011		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
5012		spin_lock(&pa->pa_lock);
5013		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5014					     &groupnr, &start);
5015		len = pa->pa_len;
5016		spin_unlock(&pa->pa_lock);
5017		if (unlikely(len == 0))
5018			continue;
5019		BUG_ON(groupnr != group);
5020		mb_set_bits(bitmap, start, len);
5021		preallocated += len;
5022	}
5023	mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
5024}
5025
5026static void ext4_mb_mark_pa_deleted(struct super_block *sb,
5027				    struct ext4_prealloc_space *pa)
5028{
5029	struct ext4_inode_info *ei;
5030
5031	if (pa->pa_deleted) {
5032		ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
5033			     pa->pa_type, pa->pa_pstart, pa->pa_lstart,
5034			     pa->pa_len);
5035		return;
5036	}
5037
5038	pa->pa_deleted = 1;
5039
5040	if (pa->pa_type == MB_INODE_PA) {
5041		ei = EXT4_I(pa->pa_inode);
5042		atomic_dec(&ei->i_prealloc_active);
5043	}
5044}
5045
5046static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa)
5047{
5048	BUG_ON(!pa);
5049	BUG_ON(atomic_read(&pa->pa_count));
5050	BUG_ON(pa->pa_deleted == 0);
5051	kmem_cache_free(ext4_pspace_cachep, pa);
5052}
5053
5054static void ext4_mb_pa_callback(struct rcu_head *head)
5055{
5056	struct ext4_prealloc_space *pa;
5057
5058	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
5059	ext4_mb_pa_free(pa);
5060}
5061
5062/*
5063 * drops a reference to preallocated space descriptor
5064 * if this was the last reference and the space is consumed
5065 */
5066static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
5067			struct super_block *sb, struct ext4_prealloc_space *pa)
5068{
5069	ext4_group_t grp;
5070	ext4_fsblk_t grp_blk;
5071	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
5072
5073	/* in this short window concurrent discard can set pa_deleted */
5074	spin_lock(&pa->pa_lock);
5075	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
5076		spin_unlock(&pa->pa_lock);
5077		return;
5078	}
5079
5080	if (pa->pa_deleted == 1) {
5081		spin_unlock(&pa->pa_lock);
5082		return;
5083	}
5084
5085	ext4_mb_mark_pa_deleted(sb, pa);
5086	spin_unlock(&pa->pa_lock);
5087
5088	grp_blk = pa->pa_pstart;
5089	/*
5090	 * If doing group-based preallocation, pa_pstart may be in the
5091	 * next group when pa is used up
5092	 */
5093	if (pa->pa_type == MB_GROUP_PA)
5094		grp_blk--;
5095
5096	grp = ext4_get_group_number(sb, grp_blk);
5097
5098	/*
5099	 * possible race:
5100	 *
5101	 *  P1 (buddy init)			P2 (regular allocation)
5102	 *					find block B in PA
5103	 *  copy on-disk bitmap to buddy
5104	 *  					mark B in on-disk bitmap
5105	 *					drop PA from group
5106	 *  mark all PAs in buddy
5107	 *
5108	 * thus, P1 initializes buddy with B available. to prevent this
5109	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
5110	 * against that pair
5111	 */
5112	ext4_lock_group(sb, grp);
5113	list_del(&pa->pa_group_list);
5114	ext4_unlock_group(sb, grp);
5115
5116	if (pa->pa_type == MB_INODE_PA) {
5117		write_lock(pa->pa_node_lock.inode_lock);
5118		rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5119		write_unlock(pa->pa_node_lock.inode_lock);
5120		ext4_mb_pa_free(pa);
5121	} else {
5122		spin_lock(pa->pa_node_lock.lg_lock);
5123		list_del_rcu(&pa->pa_node.lg_list);
5124		spin_unlock(pa->pa_node_lock.lg_lock);
5125		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5126	}
5127}
5128
5129static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new)
5130{
5131	struct rb_node **iter = &root->rb_node, *parent = NULL;
5132	struct ext4_prealloc_space *iter_pa, *new_pa;
5133	ext4_lblk_t iter_start, new_start;
5134
5135	while (*iter) {
5136		iter_pa = rb_entry(*iter, struct ext4_prealloc_space,
5137				   pa_node.inode_node);
5138		new_pa = rb_entry(new, struct ext4_prealloc_space,
5139				   pa_node.inode_node);
5140		iter_start = iter_pa->pa_lstart;
5141		new_start = new_pa->pa_lstart;
5142
5143		parent = *iter;
5144		if (new_start < iter_start)
5145			iter = &((*iter)->rb_left);
5146		else
5147			iter = &((*iter)->rb_right);
5148	}
5149
5150	rb_link_node(new, parent, iter);
5151	rb_insert_color(new, root);
5152}
5153
5154/*
5155 * creates new preallocated space for given inode
5156 */
5157static noinline_for_stack void
5158ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
5159{
5160	struct super_block *sb = ac->ac_sb;
5161	struct ext4_sb_info *sbi = EXT4_SB(sb);
5162	struct ext4_prealloc_space *pa;
5163	struct ext4_group_info *grp;
5164	struct ext4_inode_info *ei;
5165
5166	/* preallocate only when found space is larger then requested */
5167	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5168	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5169	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5170	BUG_ON(ac->ac_pa == NULL);
5171
5172	pa = ac->ac_pa;
5173
5174	if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
5175		struct ext4_free_extent ex = {
5176			.fe_logical = ac->ac_g_ex.fe_logical,
5177			.fe_len = ac->ac_orig_goal_len,
5178		};
5179		loff_t orig_goal_end = extent_logical_end(sbi, &ex);
5180		loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
5181
5182		/*
5183		 * We can't allocate as much as normalizer wants, so we try
5184		 * to get proper lstart to cover the original request, except
5185		 * when the goal doesn't cover the original request as below:
5186		 *
5187		 * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
5188		 * best_ex:0/200(200) -> adjusted: 1848/2048(200)
5189		 */
5190		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
5191		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
5192
5193		/*
5194		 * Use the below logic for adjusting best extent as it keeps
5195		 * fragmentation in check while ensuring logical range of best
5196		 * extent doesn't overflow out of goal extent:
5197		 *
5198		 * 1. Check if best ex can be kept at end of goal (before
5199		 *    cr_best_avail trimmed it) and still cover original start
5200		 * 2. Else, check if best ex can be kept at start of goal and
5201		 *    still cover original end
5202		 * 3. Else, keep the best ex at start of original request.
5203		 */
5204		ex.fe_len = ac->ac_b_ex.fe_len;
5205
5206		ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
5207		if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
5208			goto adjust_bex;
5209
5210		ex.fe_logical = ac->ac_g_ex.fe_logical;
5211		if (o_ex_end <= extent_logical_end(sbi, &ex))
5212			goto adjust_bex;
5213
5214		ex.fe_logical = ac->ac_o_ex.fe_logical;
5215adjust_bex:
5216		ac->ac_b_ex.fe_logical = ex.fe_logical;
5217
5218		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
5219		BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
5220	}
5221
5222	pa->pa_lstart = ac->ac_b_ex.fe_logical;
5223	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5224	pa->pa_len = ac->ac_b_ex.fe_len;
5225	pa->pa_free = pa->pa_len;
5226	spin_lock_init(&pa->pa_lock);
5227	INIT_LIST_HEAD(&pa->pa_group_list);
5228	pa->pa_deleted = 0;
5229	pa->pa_type = MB_INODE_PA;
5230
5231	mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5232		 pa->pa_len, pa->pa_lstart);
5233	trace_ext4_mb_new_inode_pa(ac, pa);
5234
5235	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
5236	ext4_mb_use_inode_pa(ac, pa);
5237
5238	ei = EXT4_I(ac->ac_inode);
5239	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5240	if (!grp)
5241		return;
5242
5243	pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
5244	pa->pa_inode = ac->ac_inode;
5245
5246	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5247
5248	write_lock(pa->pa_node_lock.inode_lock);
5249	ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node);
5250	write_unlock(pa->pa_node_lock.inode_lock);
5251	atomic_inc(&ei->i_prealloc_active);
5252}
5253
5254/*
5255 * creates new preallocated space for locality group inodes belongs to
5256 */
5257static noinline_for_stack void
5258ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
5259{
5260	struct super_block *sb = ac->ac_sb;
5261	struct ext4_locality_group *lg;
5262	struct ext4_prealloc_space *pa;
5263	struct ext4_group_info *grp;
5264
5265	/* preallocate only when found space is larger then requested */
5266	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5267	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5268	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5269	BUG_ON(ac->ac_pa == NULL);
5270
5271	pa = ac->ac_pa;
5272
5273	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5274	pa->pa_lstart = pa->pa_pstart;
5275	pa->pa_len = ac->ac_b_ex.fe_len;
5276	pa->pa_free = pa->pa_len;
5277	spin_lock_init(&pa->pa_lock);
5278	INIT_LIST_HEAD(&pa->pa_node.lg_list);
5279	INIT_LIST_HEAD(&pa->pa_group_list);
5280	pa->pa_deleted = 0;
5281	pa->pa_type = MB_GROUP_PA;
5282
5283	mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5284		 pa->pa_len, pa->pa_lstart);
5285	trace_ext4_mb_new_group_pa(ac, pa);
5286
5287	ext4_mb_use_group_pa(ac, pa);
5288	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
5289
5290	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5291	if (!grp)
5292		return;
5293	lg = ac->ac_lg;
5294	BUG_ON(lg == NULL);
5295
5296	pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock;
5297	pa->pa_inode = NULL;
5298
5299	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5300
5301	/*
5302	 * We will later add the new pa to the right bucket
5303	 * after updating the pa_free in ext4_mb_release_context
5304	 */
5305}
5306
5307static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
5308{
5309	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5310		ext4_mb_new_group_pa(ac);
5311	else
5312		ext4_mb_new_inode_pa(ac);
5313}
5314
5315/*
5316 * finds all unused blocks in on-disk bitmap, frees them in
5317 * in-core bitmap and buddy.
5318 * @pa must be unlinked from inode and group lists, so that
5319 * nobody else can find/use it.
5320 * the caller MUST hold group/inode locks.
5321 * TODO: optimize the case when there are no in-core structures yet
5322 */
5323static noinline_for_stack void
5324ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
5325			struct ext4_prealloc_space *pa)
5326{
5327	struct super_block *sb = e4b->bd_sb;
5328	struct ext4_sb_info *sbi = EXT4_SB(sb);
5329	unsigned int end;
5330	unsigned int next;
5331	ext4_group_t group;
5332	ext4_grpblk_t bit;
5333	unsigned long long grp_blk_start;
5334	int free = 0;
5335
5336	BUG_ON(pa->pa_deleted == 0);
5337	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5338	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
5339	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
5340	end = bit + pa->pa_len;
5341
5342	while (bit < end) {
5343		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
5344		if (bit >= end)
5345			break;
5346		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
5347		mb_debug(sb, "free preallocated %u/%u in group %u\n",
5348			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
5349			 (unsigned) next - bit, (unsigned) group);
5350		free += next - bit;
5351
5352		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
5353		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
5354						    EXT4_C2B(sbi, bit)),
5355					       next - bit);
5356		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
5357		bit = next + 1;
5358	}
5359	if (free != pa->pa_free) {
5360		ext4_msg(e4b->bd_sb, KERN_CRIT,
5361			 "pa %p: logic %lu, phys. %lu, len %d",
5362			 pa, (unsigned long) pa->pa_lstart,
5363			 (unsigned long) pa->pa_pstart,
5364			 pa->pa_len);
5365		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
5366					free, pa->pa_free);
5367		/*
5368		 * pa is already deleted so we use the value obtained
5369		 * from the bitmap and continue.
5370		 */
5371	}
5372	atomic_add(free, &sbi->s_mb_discarded);
5373}
5374
5375static noinline_for_stack void
5376ext4_mb_release_group_pa(struct ext4_buddy *e4b,
5377				struct ext4_prealloc_space *pa)
5378{
5379	struct super_block *sb = e4b->bd_sb;
5380	ext4_group_t group;
5381	ext4_grpblk_t bit;
5382
5383	trace_ext4_mb_release_group_pa(sb, pa);
5384	BUG_ON(pa->pa_deleted == 0);
5385	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5386	if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
5387		ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
5388			     e4b->bd_group, group, pa->pa_pstart);
5389		return;
5390	}
5391	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
5392	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
5393	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
5394}
5395
5396/*
5397 * releases all preallocations in given group
5398 *
5399 * first, we need to decide discard policy:
5400 * - when do we discard
5401 *   1) ENOSPC
5402 * - how many do we discard
5403 *   1) how many requested
5404 */
5405static noinline_for_stack int
5406ext4_mb_discard_group_preallocations(struct super_block *sb,
5407				     ext4_group_t group, int *busy)
5408{
5409	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
5410	struct buffer_head *bitmap_bh = NULL;
5411	struct ext4_prealloc_space *pa, *tmp;
5412	LIST_HEAD(list);
5413	struct ext4_buddy e4b;
5414	struct ext4_inode_info *ei;
5415	int err;
5416	int free = 0;
5417
5418	if (!grp)
5419		return 0;
5420	mb_debug(sb, "discard preallocation for group %u\n", group);
5421	if (list_empty(&grp->bb_prealloc_list))
5422		goto out_dbg;
5423
5424	bitmap_bh = ext4_read_block_bitmap(sb, group);
5425	if (IS_ERR(bitmap_bh)) {
5426		err = PTR_ERR(bitmap_bh);
5427		ext4_error_err(sb, -err,
5428			       "Error %d reading block bitmap for %u",
5429			       err, group);
5430		goto out_dbg;
5431	}
5432
5433	err = ext4_mb_load_buddy(sb, group, &e4b);
5434	if (err) {
5435		ext4_warning(sb, "Error %d loading buddy information for %u",
5436			     err, group);
5437		put_bh(bitmap_bh);
5438		goto out_dbg;
5439	}
5440
5441	ext4_lock_group(sb, group);
5442	list_for_each_entry_safe(pa, tmp,
5443				&grp->bb_prealloc_list, pa_group_list) {
5444		spin_lock(&pa->pa_lock);
5445		if (atomic_read(&pa->pa_count)) {
5446			spin_unlock(&pa->pa_lock);
5447			*busy = 1;
5448			continue;
5449		}
5450		if (pa->pa_deleted) {
5451			spin_unlock(&pa->pa_lock);
5452			continue;
5453		}
5454
5455		/* seems this one can be freed ... */
5456		ext4_mb_mark_pa_deleted(sb, pa);
5457
5458		if (!free)
5459			this_cpu_inc(discard_pa_seq);
5460
5461		/* we can trust pa_free ... */
5462		free += pa->pa_free;
5463
5464		spin_unlock(&pa->pa_lock);
5465
5466		list_del(&pa->pa_group_list);
5467		list_add(&pa->u.pa_tmp_list, &list);
5468	}
5469
5470	/* now free all selected PAs */
5471	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5472
5473		/* remove from object (inode or locality group) */
5474		if (pa->pa_type == MB_GROUP_PA) {
5475			spin_lock(pa->pa_node_lock.lg_lock);
5476			list_del_rcu(&pa->pa_node.lg_list);
5477			spin_unlock(pa->pa_node_lock.lg_lock);
5478		} else {
5479			write_lock(pa->pa_node_lock.inode_lock);
5480			ei = EXT4_I(pa->pa_inode);
5481			rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5482			write_unlock(pa->pa_node_lock.inode_lock);
5483		}
5484
5485		list_del(&pa->u.pa_tmp_list);
5486
5487		if (pa->pa_type == MB_GROUP_PA) {
5488			ext4_mb_release_group_pa(&e4b, pa);
5489			call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5490		} else {
5491			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5492			ext4_mb_pa_free(pa);
5493		}
5494	}
5495
5496	ext4_unlock_group(sb, group);
5497	ext4_mb_unload_buddy(&e4b);
5498	put_bh(bitmap_bh);
5499out_dbg:
5500	mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
5501		 free, group, grp->bb_free);
5502	return free;
5503}
5504
5505/*
5506 * releases all non-used preallocated blocks for given inode
5507 *
5508 * It's important to discard preallocations under i_data_sem
5509 * We don't want another block to be served from the prealloc
5510 * space when we are discarding the inode prealloc space.
5511 *
5512 * FIXME!! Make sure it is valid at all the call sites
5513 */
5514void ext4_discard_preallocations(struct inode *inode)
5515{
5516	struct ext4_inode_info *ei = EXT4_I(inode);
5517	struct super_block *sb = inode->i_sb;
5518	struct buffer_head *bitmap_bh = NULL;
5519	struct ext4_prealloc_space *pa, *tmp;
5520	ext4_group_t group = 0;
5521	LIST_HEAD(list);
5522	struct ext4_buddy e4b;
5523	struct rb_node *iter;
5524	int err;
5525
5526	if (!S_ISREG(inode->i_mode))
5527		return;
5528
5529	if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
5530		return;
5531
5532	mb_debug(sb, "discard preallocation for inode %lu\n",
5533		 inode->i_ino);
5534	trace_ext4_discard_preallocations(inode,
5535			atomic_read(&ei->i_prealloc_active));
5536
5537repeat:
5538	/* first, collect all pa's in the inode */
5539	write_lock(&ei->i_prealloc_lock);
5540	for (iter = rb_first(&ei->i_prealloc_node); iter;
5541	     iter = rb_next(iter)) {
5542		pa = rb_entry(iter, struct ext4_prealloc_space,
5543			      pa_node.inode_node);
5544		BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock);
5545
5546		spin_lock(&pa->pa_lock);
5547		if (atomic_read(&pa->pa_count)) {
5548			/* this shouldn't happen often - nobody should
5549			 * use preallocation while we're discarding it */
5550			spin_unlock(&pa->pa_lock);
5551			write_unlock(&ei->i_prealloc_lock);
5552			ext4_msg(sb, KERN_ERR,
5553				 "uh-oh! used pa while discarding");
5554			WARN_ON(1);
5555			schedule_timeout_uninterruptible(HZ);
5556			goto repeat;
5557
5558		}
5559		if (pa->pa_deleted == 0) {
5560			ext4_mb_mark_pa_deleted(sb, pa);
5561			spin_unlock(&pa->pa_lock);
5562			rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5563			list_add(&pa->u.pa_tmp_list, &list);
5564			continue;
5565		}
5566
5567		/* someone is deleting pa right now */
5568		spin_unlock(&pa->pa_lock);
5569		write_unlock(&ei->i_prealloc_lock);
5570
5571		/* we have to wait here because pa_deleted
5572		 * doesn't mean pa is already unlinked from
5573		 * the list. as we might be called from
5574		 * ->clear_inode() the inode will get freed
5575		 * and concurrent thread which is unlinking
5576		 * pa from inode's list may access already
5577		 * freed memory, bad-bad-bad */
5578
5579		/* XXX: if this happens too often, we can
5580		 * add a flag to force wait only in case
5581		 * of ->clear_inode(), but not in case of
5582		 * regular truncate */
5583		schedule_timeout_uninterruptible(HZ);
5584		goto repeat;
5585	}
5586	write_unlock(&ei->i_prealloc_lock);
5587
5588	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5589		BUG_ON(pa->pa_type != MB_INODE_PA);
5590		group = ext4_get_group_number(sb, pa->pa_pstart);
5591
5592		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5593					     GFP_NOFS|__GFP_NOFAIL);
5594		if (err) {
5595			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5596				       err, group);
5597			continue;
5598		}
5599
5600		bitmap_bh = ext4_read_block_bitmap(sb, group);
5601		if (IS_ERR(bitmap_bh)) {
5602			err = PTR_ERR(bitmap_bh);
5603			ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5604				       err, group);
5605			ext4_mb_unload_buddy(&e4b);
5606			continue;
5607		}
5608
5609		ext4_lock_group(sb, group);
5610		list_del(&pa->pa_group_list);
5611		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5612		ext4_unlock_group(sb, group);
5613
5614		ext4_mb_unload_buddy(&e4b);
5615		put_bh(bitmap_bh);
5616
5617		list_del(&pa->u.pa_tmp_list);
5618		ext4_mb_pa_free(pa);
5619	}
5620}
5621
5622static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5623{
5624	struct ext4_prealloc_space *pa;
5625
5626	BUG_ON(ext4_pspace_cachep == NULL);
5627	pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5628	if (!pa)
5629		return -ENOMEM;
5630	atomic_set(&pa->pa_count, 1);
5631	ac->ac_pa = pa;
5632	return 0;
5633}
5634
5635static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac)
5636{
5637	struct ext4_prealloc_space *pa = ac->ac_pa;
5638
5639	BUG_ON(!pa);
5640	ac->ac_pa = NULL;
5641	WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5642	/*
5643	 * current function is only called due to an error or due to
5644	 * len of found blocks < len of requested blocks hence the PA has not
5645	 * been added to grp->bb_prealloc_list. So we don't need to lock it
5646	 */
5647	pa->pa_deleted = 1;
5648	ext4_mb_pa_free(pa);
5649}
5650
5651#ifdef CONFIG_EXT4_DEBUG
5652static inline void ext4_mb_show_pa(struct super_block *sb)
5653{
5654	ext4_group_t i, ngroups;
5655
5656	if (ext4_forced_shutdown(sb))
5657		return;
5658
5659	ngroups = ext4_get_groups_count(sb);
5660	mb_debug(sb, "groups: ");
5661	for (i = 0; i < ngroups; i++) {
5662		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5663		struct ext4_prealloc_space *pa;
5664		ext4_grpblk_t start;
5665		struct list_head *cur;
5666
5667		if (!grp)
5668			continue;
5669		ext4_lock_group(sb, i);
5670		list_for_each(cur, &grp->bb_prealloc_list) {
5671			pa = list_entry(cur, struct ext4_prealloc_space,
5672					pa_group_list);
5673			spin_lock(&pa->pa_lock);
5674			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5675						     NULL, &start);
5676			spin_unlock(&pa->pa_lock);
5677			mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5678				 pa->pa_len);
5679		}
5680		ext4_unlock_group(sb, i);
5681		mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5682			 grp->bb_fragments);
5683	}
5684}
5685
5686static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5687{
5688	struct super_block *sb = ac->ac_sb;
5689
5690	if (ext4_forced_shutdown(sb))
5691		return;
5692
5693	mb_debug(sb, "Can't allocate:"
5694			" Allocation context details:");
5695	mb_debug(sb, "status %u flags 0x%x",
5696			ac->ac_status, ac->ac_flags);
5697	mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5698			"goal %lu/%lu/%lu@%lu, "
5699			"best %lu/%lu/%lu@%lu cr %d",
5700			(unsigned long)ac->ac_o_ex.fe_group,
5701			(unsigned long)ac->ac_o_ex.fe_start,
5702			(unsigned long)ac->ac_o_ex.fe_len,
5703			(unsigned long)ac->ac_o_ex.fe_logical,
5704			(unsigned long)ac->ac_g_ex.fe_group,
5705			(unsigned long)ac->ac_g_ex.fe_start,
5706			(unsigned long)ac->ac_g_ex.fe_len,
5707			(unsigned long)ac->ac_g_ex.fe_logical,
5708			(unsigned long)ac->ac_b_ex.fe_group,
5709			(unsigned long)ac->ac_b_ex.fe_start,
5710			(unsigned long)ac->ac_b_ex.fe_len,
5711			(unsigned long)ac->ac_b_ex.fe_logical,
5712			(int)ac->ac_criteria);
5713	mb_debug(sb, "%u found", ac->ac_found);
5714	mb_debug(sb, "used pa: %s, ", str_yes_no(ac->ac_pa));
5715	if (ac->ac_pa)
5716		mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ?
5717			 "group pa" : "inode pa");
5718	ext4_mb_show_pa(sb);
5719}
5720#else
5721static inline void ext4_mb_show_pa(struct super_block *sb)
5722{
5723}
5724static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5725{
5726	ext4_mb_show_pa(ac->ac_sb);
5727}
5728#endif
5729
5730/*
5731 * We use locality group preallocation for small size file. The size of the
5732 * file is determined by the current size or the resulting size after
5733 * allocation which ever is larger
5734 *
5735 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5736 */
5737static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5738{
5739	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5740	int bsbits = ac->ac_sb->s_blocksize_bits;
5741	loff_t size, isize;
5742	bool inode_pa_eligible, group_pa_eligible;
5743
5744	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5745		return;
5746
5747	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5748		return;
5749
5750	group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5751	inode_pa_eligible = true;
5752	size = extent_logical_end(sbi, &ac->ac_o_ex);
5753	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5754		>> bsbits;
5755
5756	/* No point in using inode preallocation for closed files */
5757	if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5758	    !inode_is_open_for_write(ac->ac_inode))
5759		inode_pa_eligible = false;
5760
5761	size = max(size, isize);
5762	/* Don't use group allocation for large files */
5763	if (size > sbi->s_mb_stream_request)
5764		group_pa_eligible = false;
5765
5766	if (!group_pa_eligible) {
5767		if (inode_pa_eligible)
5768			ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5769		else
5770			ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5771		return;
5772	}
5773
5774	BUG_ON(ac->ac_lg != NULL);
5775	/*
5776	 * locality group prealloc space are per cpu. The reason for having
5777	 * per cpu locality group is to reduce the contention between block
5778	 * request from multiple CPUs.
5779	 */
5780	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5781
5782	/* we're going to use group allocation */
5783	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5784
5785	/* serialize all allocations in the group */
5786	mutex_lock(&ac->ac_lg->lg_mutex);
5787}
5788
5789static noinline_for_stack void
5790ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5791				struct ext4_allocation_request *ar)
5792{
5793	struct super_block *sb = ar->inode->i_sb;
5794	struct ext4_sb_info *sbi = EXT4_SB(sb);
5795	struct ext4_super_block *es = sbi->s_es;
5796	ext4_group_t group;
5797	unsigned int len;
5798	ext4_fsblk_t goal;
5799	ext4_grpblk_t block;
5800
5801	/* we can't allocate > group size */
5802	len = ar->len;
5803
5804	/* just a dirty hack to filter too big requests  */
5805	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5806		len = EXT4_CLUSTERS_PER_GROUP(sb);
5807
5808	/* start searching from the goal */
5809	goal = ar->goal;
5810	if (goal < le32_to_cpu(es->s_first_data_block) ||
5811			goal >= ext4_blocks_count(es))
5812		goal = le32_to_cpu(es->s_first_data_block);
5813	ext4_get_group_no_and_offset(sb, goal, &group, &block);
5814
5815	/* set up allocation goals */
5816	ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5817	ac->ac_status = AC_STATUS_CONTINUE;
5818	ac->ac_sb = sb;
5819	ac->ac_inode = ar->inode;
5820	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5821	ac->ac_o_ex.fe_group = group;
5822	ac->ac_o_ex.fe_start = block;
5823	ac->ac_o_ex.fe_len = len;
5824	ac->ac_g_ex = ac->ac_o_ex;
5825	ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
5826	ac->ac_flags = ar->flags;
5827
5828	/* we have to define context: we'll work with a file or
5829	 * locality group. this is a policy, actually */
5830	ext4_mb_group_or_file(ac);
5831
5832	mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5833			"left: %u/%u, right %u/%u to %swritable\n",
5834			(unsigned) ar->len, (unsigned) ar->logical,
5835			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5836			(unsigned) ar->lleft, (unsigned) ar->pleft,
5837			(unsigned) ar->lright, (unsigned) ar->pright,
5838			inode_is_open_for_write(ar->inode) ? "" : "non-");
5839}
5840
5841static noinline_for_stack void
5842ext4_mb_discard_lg_preallocations(struct super_block *sb,
5843					struct ext4_locality_group *lg,
5844					int order, int total_entries)
5845{
5846	ext4_group_t group = 0;
5847	struct ext4_buddy e4b;
5848	LIST_HEAD(discard_list);
5849	struct ext4_prealloc_space *pa, *tmp;
5850
5851	mb_debug(sb, "discard locality group preallocation\n");
5852
5853	spin_lock(&lg->lg_prealloc_lock);
5854	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5855				pa_node.lg_list,
5856				lockdep_is_held(&lg->lg_prealloc_lock)) {
5857		spin_lock(&pa->pa_lock);
5858		if (atomic_read(&pa->pa_count)) {
5859			/*
5860			 * This is the pa that we just used
5861			 * for block allocation. So don't
5862			 * free that
5863			 */
5864			spin_unlock(&pa->pa_lock);
5865			continue;
5866		}
5867		if (pa->pa_deleted) {
5868			spin_unlock(&pa->pa_lock);
5869			continue;
5870		}
5871		/* only lg prealloc space */
5872		BUG_ON(pa->pa_type != MB_GROUP_PA);
5873
5874		/* seems this one can be freed ... */
5875		ext4_mb_mark_pa_deleted(sb, pa);
5876		spin_unlock(&pa->pa_lock);
5877
5878		list_del_rcu(&pa->pa_node.lg_list);
5879		list_add(&pa->u.pa_tmp_list, &discard_list);
5880
5881		total_entries--;
5882		if (total_entries <= 5) {
5883			/*
5884			 * we want to keep only 5 entries
5885			 * allowing it to grow to 8. This
5886			 * mak sure we don't call discard
5887			 * soon for this list.
5888			 */
5889			break;
5890		}
5891	}
5892	spin_unlock(&lg->lg_prealloc_lock);
5893
5894	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5895		int err;
5896
5897		group = ext4_get_group_number(sb, pa->pa_pstart);
5898		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5899					     GFP_NOFS|__GFP_NOFAIL);
5900		if (err) {
5901			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5902				       err, group);
5903			continue;
5904		}
5905		ext4_lock_group(sb, group);
5906		list_del(&pa->pa_group_list);
5907		ext4_mb_release_group_pa(&e4b, pa);
5908		ext4_unlock_group(sb, group);
5909
5910		ext4_mb_unload_buddy(&e4b);
5911		list_del(&pa->u.pa_tmp_list);
5912		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5913	}
5914}
5915
5916/*
5917 * We have incremented pa_count. So it cannot be freed at this
5918 * point. Also we hold lg_mutex. So no parallel allocation is
5919 * possible from this lg. That means pa_free cannot be updated.
5920 *
5921 * A parallel ext4_mb_discard_group_preallocations is possible.
5922 * which can cause the lg_prealloc_list to be updated.
5923 */
5924
5925static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5926{
5927	int order, added = 0, lg_prealloc_count = 1;
5928	struct super_block *sb = ac->ac_sb;
5929	struct ext4_locality_group *lg = ac->ac_lg;
5930	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5931
5932	order = fls(pa->pa_free) - 1;
5933	if (order > PREALLOC_TB_SIZE - 1)
5934		/* The max size of hash table is PREALLOC_TB_SIZE */
5935		order = PREALLOC_TB_SIZE - 1;
5936	/* Add the prealloc space to lg */
5937	spin_lock(&lg->lg_prealloc_lock);
5938	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5939				pa_node.lg_list,
5940				lockdep_is_held(&lg->lg_prealloc_lock)) {
5941		spin_lock(&tmp_pa->pa_lock);
5942		if (tmp_pa->pa_deleted) {
5943			spin_unlock(&tmp_pa->pa_lock);
5944			continue;
5945		}
5946		if (!added && pa->pa_free < tmp_pa->pa_free) {
5947			/* Add to the tail of the previous entry */
5948			list_add_tail_rcu(&pa->pa_node.lg_list,
5949						&tmp_pa->pa_node.lg_list);
5950			added = 1;
5951			/*
5952			 * we want to count the total
5953			 * number of entries in the list
5954			 */
5955		}
5956		spin_unlock(&tmp_pa->pa_lock);
5957		lg_prealloc_count++;
5958	}
5959	if (!added)
5960		list_add_tail_rcu(&pa->pa_node.lg_list,
5961					&lg->lg_prealloc_list[order]);
5962	spin_unlock(&lg->lg_prealloc_lock);
5963
5964	/* Now trim the list to be not more than 8 elements */
5965	if (lg_prealloc_count > 8)
5966		ext4_mb_discard_lg_preallocations(sb, lg,
5967						  order, lg_prealloc_count);
5968}
5969
5970/*
5971 * release all resource we used in allocation
5972 */
5973static void ext4_mb_release_context(struct ext4_allocation_context *ac)
5974{
5975	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5976	struct ext4_prealloc_space *pa = ac->ac_pa;
5977	if (pa) {
5978		if (pa->pa_type == MB_GROUP_PA) {
5979			/* see comment in ext4_mb_use_group_pa() */
5980			spin_lock(&pa->pa_lock);
5981			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5982			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5983			pa->pa_free -= ac->ac_b_ex.fe_len;
5984			pa->pa_len -= ac->ac_b_ex.fe_len;
5985			spin_unlock(&pa->pa_lock);
5986
5987			/*
5988			 * We want to add the pa to the right bucket.
5989			 * Remove it from the list and while adding
5990			 * make sure the list to which we are adding
5991			 * doesn't grow big.
5992			 */
5993			if (likely(pa->pa_free)) {
5994				spin_lock(pa->pa_node_lock.lg_lock);
5995				list_del_rcu(&pa->pa_node.lg_list);
5996				spin_unlock(pa->pa_node_lock.lg_lock);
5997				ext4_mb_add_n_trim(ac);
5998			}
5999		}
6000
6001		ext4_mb_put_pa(ac, ac->ac_sb, pa);
6002	}
6003	if (ac->ac_bitmap_folio)
6004		folio_put(ac->ac_bitmap_folio);
6005	if (ac->ac_buddy_folio)
6006		folio_put(ac->ac_buddy_folio);
6007	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
6008		mutex_unlock(&ac->ac_lg->lg_mutex);
6009	ext4_mb_collect_stats(ac);
6010}
6011
6012static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
6013{
6014	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
6015	int ret;
6016	int freed = 0, busy = 0;
6017	int retry = 0;
6018
6019	trace_ext4_mb_discard_preallocations(sb, needed);
6020
6021	if (needed == 0)
6022		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
6023 repeat:
6024	for (i = 0; i < ngroups && needed > 0; i++) {
6025		ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
6026		freed += ret;
6027		needed -= ret;
6028		cond_resched();
6029	}
6030
6031	if (needed > 0 && busy && ++retry < 3) {
6032		busy = 0;
6033		goto repeat;
6034	}
6035
6036	return freed;
6037}
6038
6039static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
6040			struct ext4_allocation_context *ac, u64 *seq)
6041{
6042	int freed;
6043	u64 seq_retry = 0;
6044	bool ret = false;
6045
6046	freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
6047	if (freed) {
6048		ret = true;
6049		goto out_dbg;
6050	}
6051	seq_retry = ext4_get_discard_pa_seq_sum();
6052	if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
6053		ac->ac_flags |= EXT4_MB_STRICT_CHECK;
6054		*seq = seq_retry;
6055		ret = true;
6056	}
6057
6058out_dbg:
6059	mb_debug(sb, "freed %d, retry ? %s\n", freed, str_yes_no(ret));
6060	return ret;
6061}
6062
6063/*
6064 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
6065 * linearly starting at the goal block and also excludes the blocks which
6066 * are going to be in use after fast commit replay.
6067 */
6068static ext4_fsblk_t
6069ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
6070{
6071	struct buffer_head *bitmap_bh;
6072	struct super_block *sb = ar->inode->i_sb;
6073	struct ext4_sb_info *sbi = EXT4_SB(sb);
6074	ext4_group_t group, nr;
6075	ext4_grpblk_t blkoff;
6076	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
6077	ext4_grpblk_t i = 0;
6078	ext4_fsblk_t goal, block;
6079	struct ext4_super_block *es = sbi->s_es;
6080
6081	goal = ar->goal;
6082	if (goal < le32_to_cpu(es->s_first_data_block) ||
6083			goal >= ext4_blocks_count(es))
6084		goal = le32_to_cpu(es->s_first_data_block);
6085
6086	ar->len = 0;
6087	ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
6088	for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
6089		bitmap_bh = ext4_read_block_bitmap(sb, group);
6090		if (IS_ERR(bitmap_bh)) {
6091			*errp = PTR_ERR(bitmap_bh);
6092			pr_warn("Failed to read block bitmap\n");
6093			return 0;
6094		}
6095
6096		while (1) {
6097			i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
6098						blkoff);
6099			if (i >= max)
6100				break;
6101			if (ext4_fc_replay_check_excluded(sb,
6102				ext4_group_first_block_no(sb, group) +
6103				EXT4_C2B(sbi, i))) {
6104				blkoff = i + 1;
6105			} else
6106				break;
6107		}
6108		brelse(bitmap_bh);
6109		if (i < max)
6110			break;
6111
6112		if (++group >= ext4_get_groups_count(sb))
6113			group = 0;
6114
6115		blkoff = 0;
6116	}
6117
6118	if (i >= max) {
6119		*errp = -ENOSPC;
6120		return 0;
6121	}
6122
6123	block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
6124	ext4_mb_mark_bb(sb, block, 1, true);
6125	ar->len = 1;
6126
6127	*errp = 0;
6128	return block;
6129}
6130
6131/*
6132 * Main entry point into mballoc to allocate blocks
6133 * it tries to use preallocation first, then falls back
6134 * to usual allocation
6135 */
6136ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
6137				struct ext4_allocation_request *ar, int *errp)
6138{
6139	struct ext4_allocation_context *ac = NULL;
6140	struct ext4_sb_info *sbi;
6141	struct super_block *sb;
6142	ext4_fsblk_t block = 0;
6143	unsigned int inquota = 0;
6144	unsigned int reserv_clstrs = 0;
6145	int retries = 0;
6146	u64 seq;
6147
6148	might_sleep();
6149	sb = ar->inode->i_sb;
6150	sbi = EXT4_SB(sb);
6151
6152	trace_ext4_request_blocks(ar);
6153	if (sbi->s_mount_state & EXT4_FC_REPLAY)
6154		return ext4_mb_new_blocks_simple(ar, errp);
6155
6156	/* Allow to use superuser reservation for quota file */
6157	if (ext4_is_quota_file(ar->inode))
6158		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
6159
6160	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
6161		/* Without delayed allocation we need to verify
6162		 * there is enough free blocks to do block allocation
6163		 * and verify allocation doesn't exceed the quota limits.
6164		 */
6165		while (ar->len &&
6166			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
6167
6168			/* let others to free the space */
6169			cond_resched();
6170			ar->len = ar->len >> 1;
6171		}
6172		if (!ar->len) {
6173			ext4_mb_show_pa(sb);
6174			*errp = -ENOSPC;
6175			return 0;
6176		}
6177		reserv_clstrs = ar->len;
6178		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
6179			dquot_alloc_block_nofail(ar->inode,
6180						 EXT4_C2B(sbi, ar->len));
6181		} else {
6182			while (ar->len &&
6183				dquot_alloc_block(ar->inode,
6184						  EXT4_C2B(sbi, ar->len))) {
6185
6186				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
6187				ar->len--;
6188			}
6189		}
6190		inquota = ar->len;
6191		if (ar->len == 0) {
6192			*errp = -EDQUOT;
6193			goto out;
6194		}
6195	}
6196
6197	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
6198	if (!ac) {
6199		ar->len = 0;
6200		*errp = -ENOMEM;
6201		goto out;
6202	}
6203
6204	ext4_mb_initialize_context(ac, ar);
6205
6206	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
6207	seq = this_cpu_read(discard_pa_seq);
6208	if (!ext4_mb_use_preallocated(ac)) {
6209		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
6210		ext4_mb_normalize_request(ac, ar);
6211
6212		*errp = ext4_mb_pa_alloc(ac);
6213		if (*errp)
6214			goto errout;
6215repeat:
6216		/* allocate space in core */
6217		*errp = ext4_mb_regular_allocator(ac);
6218		/*
6219		 * pa allocated above is added to grp->bb_prealloc_list only
6220		 * when we were able to allocate some block i.e. when
6221		 * ac->ac_status == AC_STATUS_FOUND.
6222		 * And error from above mean ac->ac_status != AC_STATUS_FOUND
6223		 * So we have to free this pa here itself.
6224		 */
6225		if (*errp) {
6226			ext4_mb_pa_put_free(ac);
6227			ext4_discard_allocated_blocks(ac);
6228			goto errout;
6229		}
6230		if (ac->ac_status == AC_STATUS_FOUND &&
6231			ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
6232			ext4_mb_pa_put_free(ac);
6233	}
6234	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
6235		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
6236		if (*errp) {
6237			ext4_discard_allocated_blocks(ac);
6238			goto errout;
6239		} else {
6240			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
6241			ar->len = ac->ac_b_ex.fe_len;
6242		}
6243	} else {
6244		if (++retries < 3 &&
6245		    ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
6246			goto repeat;
6247		/*
6248		 * If block allocation fails then the pa allocated above
6249		 * needs to be freed here itself.
6250		 */
6251		ext4_mb_pa_put_free(ac);
6252		*errp = -ENOSPC;
6253	}
6254
6255	if (*errp) {
6256errout:
6257		ac->ac_b_ex.fe_len = 0;
6258		ar->len = 0;
6259		ext4_mb_show_ac(ac);
6260	}
6261	ext4_mb_release_context(ac);
6262	kmem_cache_free(ext4_ac_cachep, ac);
6263out:
6264	if (inquota && ar->len < inquota)
6265		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
6266	if (!ar->len) {
6267		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
6268			/* release all the reserved blocks if non delalloc */
6269			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
6270						reserv_clstrs);
6271	}
6272
6273	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
6274
6275	return block;
6276}
6277
6278/*
6279 * We can merge two free data extents only if the physical blocks
6280 * are contiguous, AND the extents were freed by the same transaction,
6281 * AND the blocks are associated with the same group.
6282 */
6283static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
6284					struct ext4_free_data *entry,
6285					struct ext4_free_data *new_entry,
6286					struct rb_root *entry_rb_root)
6287{
6288	if ((entry->efd_tid != new_entry->efd_tid) ||
6289	    (entry->efd_group != new_entry->efd_group))
6290		return;
6291	if (entry->efd_start_cluster + entry->efd_count ==
6292	    new_entry->efd_start_cluster) {
6293		new_entry->efd_start_cluster = entry->efd_start_cluster;
6294		new_entry->efd_count += entry->efd_count;
6295	} else if (new_entry->efd_start_cluster + new_entry->efd_count ==
6296		   entry->efd_start_cluster) {
6297		new_entry->efd_count += entry->efd_count;
6298	} else
6299		return;
6300	spin_lock(&sbi->s_md_lock);
6301	list_del(&entry->efd_list);
6302	spin_unlock(&sbi->s_md_lock);
6303	rb_erase(&entry->efd_node, entry_rb_root);
6304	kmem_cache_free(ext4_free_data_cachep, entry);
6305}
6306
6307static noinline_for_stack void
6308ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
6309		      struct ext4_free_data *new_entry)
6310{
6311	ext4_group_t group = e4b->bd_group;
6312	ext4_grpblk_t cluster;
6313	ext4_grpblk_t clusters = new_entry->efd_count;
6314	struct ext4_free_data *entry;
6315	struct ext4_group_info *db = e4b->bd_info;
6316	struct super_block *sb = e4b->bd_sb;
6317	struct ext4_sb_info *sbi = EXT4_SB(sb);
6318	struct rb_node **n = &db->bb_free_root.rb_node, *node;
6319	struct rb_node *parent = NULL, *new_node;
6320
6321	BUG_ON(!ext4_handle_valid(handle));
6322	BUG_ON(e4b->bd_bitmap_folio == NULL);
6323	BUG_ON(e4b->bd_buddy_folio == NULL);
6324
6325	new_node = &new_entry->efd_node;
6326	cluster = new_entry->efd_start_cluster;
6327
6328	if (!*n) {
6329		/* first free block exent. We need to
6330		   protect buddy cache from being freed,
6331		 * otherwise we'll refresh it from
6332		 * on-disk bitmap and lose not-yet-available
6333		 * blocks */
6334		folio_get(e4b->bd_buddy_folio);
6335		folio_get(e4b->bd_bitmap_folio);
6336	}
6337	while (*n) {
6338		parent = *n;
6339		entry = rb_entry(parent, struct ext4_free_data, efd_node);
6340		if (cluster < entry->efd_start_cluster)
6341			n = &(*n)->rb_left;
6342		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
6343			n = &(*n)->rb_right;
6344		else {
6345			ext4_grp_locked_error(sb, group, 0,
6346				ext4_group_first_block_no(sb, group) +
6347				EXT4_C2B(sbi, cluster),
6348				"Block already on to-be-freed list");
6349			kmem_cache_free(ext4_free_data_cachep, new_entry);
6350			return;
6351		}
6352	}
6353
6354	rb_link_node(new_node, parent, n);
6355	rb_insert_color(new_node, &db->bb_free_root);
6356
6357	/* Now try to see the extent can be merged to left and right */
6358	node = rb_prev(new_node);
6359	if (node) {
6360		entry = rb_entry(node, struct ext4_free_data, efd_node);
6361		ext4_try_merge_freed_extent(sbi, entry, new_entry,
6362					    &(db->bb_free_root));
6363	}
6364
6365	node = rb_next(new_node);
6366	if (node) {
6367		entry = rb_entry(node, struct ext4_free_data, efd_node);
6368		ext4_try_merge_freed_extent(sbi, entry, new_entry,
6369					    &(db->bb_free_root));
6370	}
6371
6372	spin_lock(&sbi->s_md_lock);
6373	list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]);
6374	sbi->s_mb_free_pending += clusters;
6375	spin_unlock(&sbi->s_md_lock);
6376}
6377
6378static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
6379					unsigned long count)
6380{
6381	struct super_block *sb = inode->i_sb;
6382	ext4_group_t group;
6383	ext4_grpblk_t blkoff;
6384
6385	ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
6386	ext4_mb_mark_context(NULL, sb, false, group, blkoff, count,
6387			     EXT4_MB_BITMAP_MARKED_CHECK |
6388			     EXT4_MB_SYNC_UPDATE,
6389			     NULL);
6390}
6391
6392/**
6393 * ext4_mb_clear_bb() -- helper function for freeing blocks.
6394 *			Used by ext4_free_blocks()
6395 * @handle:		handle for this transaction
6396 * @inode:		inode
6397 * @block:		starting physical block to be freed
6398 * @count:		number of blocks to be freed
6399 * @flags:		flags used by ext4_free_blocks
6400 */
6401static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
6402			       ext4_fsblk_t block, unsigned long count,
6403			       int flags)
6404{
6405	struct super_block *sb = inode->i_sb;
6406	struct ext4_group_info *grp;
6407	unsigned int overflow;
6408	ext4_grpblk_t bit;
6409	ext4_group_t block_group;
6410	struct ext4_sb_info *sbi;
6411	struct ext4_buddy e4b;
6412	unsigned int count_clusters;
6413	int err = 0;
6414	int mark_flags = 0;
6415	ext4_grpblk_t changed;
6416
6417	sbi = EXT4_SB(sb);
6418
6419	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6420	    !ext4_inode_block_valid(inode, block, count)) {
6421		ext4_error(sb, "Freeing blocks in system zone - "
6422			   "Block = %llu, count = %lu", block, count);
6423		/* err = 0. ext4_std_error should be a no op */
6424		goto error_out;
6425	}
6426	flags |= EXT4_FREE_BLOCKS_VALIDATED;
6427
6428do_more:
6429	overflow = 0;
6430	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6431
6432	grp = ext4_get_group_info(sb, block_group);
6433	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
6434		return;
6435
6436	/*
6437	 * Check to see if we are freeing blocks across a group
6438	 * boundary.
6439	 */
6440	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
6441		overflow = EXT4_C2B(sbi, bit) + count -
6442			EXT4_BLOCKS_PER_GROUP(sb);
6443		count -= overflow;
6444		/* The range changed so it's no longer validated */
6445		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6446	}
6447	count_clusters = EXT4_NUM_B2C(sbi, count);
6448	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6449
6450	/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6451	err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6452				     GFP_NOFS|__GFP_NOFAIL);
6453	if (err)
6454		goto error_out;
6455
6456	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6457	    !ext4_inode_block_valid(inode, block, count)) {
6458		ext4_error(sb, "Freeing blocks in system zone - "
6459			   "Block = %llu, count = %lu", block, count);
6460		/* err = 0. ext4_std_error should be a no op */
6461		goto error_clean;
6462	}
6463
6464#ifdef AGGRESSIVE_CHECK
6465	mark_flags |= EXT4_MB_BITMAP_MARKED_CHECK;
6466#endif
6467	err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
6468				   count_clusters, mark_flags, &changed);
6469
6470
6471	if (err && changed == 0)
6472		goto error_clean;
6473
6474#ifdef AGGRESSIVE_CHECK
6475	BUG_ON(changed != count_clusters);
6476#endif
6477
6478	/*
6479	 * We need to make sure we don't reuse the freed block until after the
6480	 * transaction is committed. We make an exception if the inode is to be
6481	 * written in writeback mode since writeback mode has weak data
6482	 * consistency guarantees.
6483	 */
6484	if (ext4_handle_valid(handle) &&
6485	    ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6486	     !ext4_should_writeback_data(inode))) {
6487		struct ext4_free_data *new_entry;
6488		/*
6489		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6490		 * to fail.
6491		 */
6492		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6493				GFP_NOFS|__GFP_NOFAIL);
6494		new_entry->efd_start_cluster = bit;
6495		new_entry->efd_group = block_group;
6496		new_entry->efd_count = count_clusters;
6497		new_entry->efd_tid = handle->h_transaction->t_tid;
6498
6499		ext4_lock_group(sb, block_group);
6500		ext4_mb_free_metadata(handle, &e4b, new_entry);
6501	} else {
6502		if (test_opt(sb, DISCARD)) {
6503			err = ext4_issue_discard(sb, block_group, bit,
6504						 count_clusters);
6505			/*
6506			 * Ignore EOPNOTSUPP error. This is consistent with
6507			 * what happens when using journal.
6508			 */
6509			if (err == -EOPNOTSUPP)
6510				err = 0;
6511			if (err)
6512				ext4_msg(sb, KERN_WARNING, "discard request in"
6513					 " group:%u block:%d count:%lu failed"
6514					 " with %d", block_group, bit, count,
6515					 err);
6516		}
6517
6518		EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6519
6520		ext4_lock_group(sb, block_group);
6521		mb_free_blocks(inode, &e4b, bit, count_clusters);
6522	}
6523
6524	ext4_unlock_group(sb, block_group);
6525
6526	/*
6527	 * on a bigalloc file system, defer the s_freeclusters_counter
6528	 * update to the caller (ext4_remove_space and friends) so they
6529	 * can determine if a cluster freed here should be rereserved
6530	 */
6531	if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6532		if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6533			dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6534		percpu_counter_add(&sbi->s_freeclusters_counter,
6535				   count_clusters);
6536	}
6537
6538	if (overflow && !err) {
6539		block += count;
6540		count = overflow;
6541		ext4_mb_unload_buddy(&e4b);
6542		/* The range changed so it's no longer validated */
6543		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6544		goto do_more;
6545	}
6546
6547error_clean:
6548	ext4_mb_unload_buddy(&e4b);
6549error_out:
6550	ext4_std_error(sb, err);
6551}
6552
6553/**
6554 * ext4_free_blocks() -- Free given blocks and update quota
6555 * @handle:		handle for this transaction
6556 * @inode:		inode
6557 * @bh:			optional buffer of the block to be freed
6558 * @block:		starting physical block to be freed
6559 * @count:		number of blocks to be freed
6560 * @flags:		flags used by ext4_free_blocks
6561 */
6562void ext4_free_blocks(handle_t *handle, struct inode *inode,
6563		      struct buffer_head *bh, ext4_fsblk_t block,
6564		      unsigned long count, int flags)
6565{
6566	struct super_block *sb = inode->i_sb;
6567	unsigned int overflow;
6568	struct ext4_sb_info *sbi;
6569
6570	sbi = EXT4_SB(sb);
6571
6572	if (bh) {
6573		if (block)
6574			BUG_ON(block != bh->b_blocknr);
6575		else
6576			block = bh->b_blocknr;
6577	}
6578
6579	if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6580		ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
6581		return;
6582	}
6583
6584	might_sleep();
6585
6586	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6587	    !ext4_inode_block_valid(inode, block, count)) {
6588		ext4_error(sb, "Freeing blocks not in datazone - "
6589			   "block = %llu, count = %lu", block, count);
6590		return;
6591	}
6592	flags |= EXT4_FREE_BLOCKS_VALIDATED;
6593
6594	ext4_debug("freeing block %llu\n", block);
6595	trace_ext4_free_blocks(inode, block, count, flags);
6596
6597	if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6598		BUG_ON(count > 1);
6599
6600		ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6601			    inode, bh, block);
6602	}
6603
6604	/*
6605	 * If the extent to be freed does not begin on a cluster
6606	 * boundary, we need to deal with partial clusters at the
6607	 * beginning and end of the extent.  Normally we will free
6608	 * blocks at the beginning or the end unless we are explicitly
6609	 * requested to avoid doing so.
6610	 */
6611	overflow = EXT4_PBLK_COFF(sbi, block);
6612	if (overflow) {
6613		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6614			overflow = sbi->s_cluster_ratio - overflow;
6615			block += overflow;
6616			if (count > overflow)
6617				count -= overflow;
6618			else
6619				return;
6620		} else {
6621			block -= overflow;
6622			count += overflow;
6623		}
6624		/* The range changed so it's no longer validated */
6625		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6626	}
6627	overflow = EXT4_LBLK_COFF(sbi, count);
6628	if (overflow) {
6629		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6630			if (count > overflow)
6631				count -= overflow;
6632			else
6633				return;
6634		} else
6635			count += sbi->s_cluster_ratio - overflow;
6636		/* The range changed so it's no longer validated */
6637		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6638	}
6639
6640	if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6641		int i;
6642		int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6643
6644		for (i = 0; i < count; i++) {
6645			cond_resched();
6646			if (is_metadata)
6647				bh = sb_find_get_block(inode->i_sb, block + i);
6648			ext4_forget(handle, is_metadata, inode, bh, block + i);
6649		}
6650	}
6651
6652	ext4_mb_clear_bb(handle, inode, block, count, flags);
6653}
6654
6655/**
6656 * ext4_group_add_blocks() -- Add given blocks to an existing group
6657 * @handle:			handle to this transaction
6658 * @sb:				super block
6659 * @block:			start physical block to add to the block group
6660 * @count:			number of blocks to free
6661 *
6662 * This marks the blocks as free in the bitmap and buddy.
6663 */
6664int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6665			 ext4_fsblk_t block, unsigned long count)
6666{
6667	ext4_group_t block_group;
6668	ext4_grpblk_t bit;
6669	struct ext4_sb_info *sbi = EXT4_SB(sb);
6670	struct ext4_buddy e4b;
6671	int err = 0;
6672	ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6673	ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6674	unsigned long cluster_count = last_cluster - first_cluster + 1;
6675	ext4_grpblk_t changed;
6676
6677	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6678
6679	if (cluster_count == 0)
6680		return 0;
6681
6682	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6683	/*
6684	 * Check to see if we are freeing blocks across a group
6685	 * boundary.
6686	 */
6687	if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6688		ext4_warning(sb, "too many blocks added to group %u",
6689			     block_group);
6690		err = -EINVAL;
6691		goto error_out;
6692	}
6693
6694	err = ext4_mb_load_buddy(sb, block_group, &e4b);
6695	if (err)
6696		goto error_out;
6697
6698	if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6699		ext4_error(sb, "Adding blocks in system zones - "
6700			   "Block = %llu, count = %lu",
6701			   block, count);
6702		err = -EINVAL;
6703		goto error_clean;
6704	}
6705
6706	err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
6707				   cluster_count, EXT4_MB_BITMAP_MARKED_CHECK,
6708				   &changed);
6709	if (err && changed == 0)
6710		goto error_clean;
6711
6712	if (changed != cluster_count)
6713		ext4_error(sb, "bit already cleared in group %u", block_group);
6714
6715	ext4_lock_group(sb, block_group);
6716	mb_free_blocks(NULL, &e4b, bit, cluster_count);
6717	ext4_unlock_group(sb, block_group);
6718	percpu_counter_add(&sbi->s_freeclusters_counter,
6719			   changed);
6720
6721error_clean:
6722	ext4_mb_unload_buddy(&e4b);
6723error_out:
6724	ext4_std_error(sb, err);
6725	return err;
6726}
6727
6728/**
6729 * ext4_trim_extent -- function to TRIM one single free extent in the group
6730 * @sb:		super block for the file system
6731 * @start:	starting block of the free extent in the alloc. group
6732 * @count:	number of blocks to TRIM
6733 * @e4b:	ext4 buddy for the group
6734 *
6735 * Trim "count" blocks starting at "start" in the "group". To assure that no
6736 * one will allocate those blocks, mark it as used in buddy bitmap. This must
6737 * be called with under the group lock.
6738 */
6739static int ext4_trim_extent(struct super_block *sb,
6740		int start, int count, struct ext4_buddy *e4b)
6741__releases(bitlock)
6742__acquires(bitlock)
6743{
6744	struct ext4_free_extent ex;
6745	ext4_group_t group = e4b->bd_group;
6746	int ret = 0;
6747
6748	trace_ext4_trim_extent(sb, group, start, count);
6749
6750	assert_spin_locked(ext4_group_lock_ptr(sb, group));
6751
6752	ex.fe_start = start;
6753	ex.fe_group = group;
6754	ex.fe_len = count;
6755
6756	/*
6757	 * Mark blocks used, so no one can reuse them while
6758	 * being trimmed.
6759	 */
6760	mb_mark_used(e4b, &ex);
6761	ext4_unlock_group(sb, group);
6762	ret = ext4_issue_discard(sb, group, start, count);
6763	ext4_lock_group(sb, group);
6764	mb_free_blocks(NULL, e4b, start, ex.fe_len);
6765	return ret;
6766}
6767
6768static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
6769					   ext4_group_t grp)
6770{
6771	unsigned long nr_clusters_in_group;
6772
6773	if (grp < (ext4_get_groups_count(sb) - 1))
6774		nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
6775	else
6776		nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
6777					ext4_group_first_block_no(sb, grp))
6778				       >> EXT4_CLUSTER_BITS(sb);
6779
6780	return nr_clusters_in_group - 1;
6781}
6782
6783static bool ext4_trim_interrupted(void)
6784{
6785	return fatal_signal_pending(current) || freezing(current);
6786}
6787
6788static int ext4_try_to_trim_range(struct super_block *sb,
6789		struct ext4_buddy *e4b, ext4_grpblk_t start,
6790		ext4_grpblk_t max, ext4_grpblk_t minblocks)
6791__acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6792__releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6793{
6794	ext4_grpblk_t next, count, free_count, last, origin_start;
6795	bool set_trimmed = false;
6796	void *bitmap;
6797
6798	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
6799		return 0;
6800
6801	last = ext4_last_grp_cluster(sb, e4b->bd_group);
6802	bitmap = e4b->bd_bitmap;
6803	if (start == 0 && max >= last)
6804		set_trimmed = true;
6805	origin_start = start;
6806	start = max(e4b->bd_info->bb_first_free, start);
6807	count = 0;
6808	free_count = 0;
6809
6810	while (start <= max) {
6811		start = mb_find_next_zero_bit(bitmap, max + 1, start);
6812		if (start > max)
6813			break;
6814
6815		next = mb_find_next_bit(bitmap, last + 1, start);
6816		if (origin_start == 0 && next >= last)
6817			set_trimmed = true;
6818
6819		if ((next - start) >= minblocks) {
6820			int ret = ext4_trim_extent(sb, start, next - start, e4b);
6821
6822			if (ret && ret != -EOPNOTSUPP)
6823				return count;
6824			count += next - start;
6825		}
6826		free_count += next - start;
6827		start = next + 1;
6828
6829		if (ext4_trim_interrupted())
6830			return count;
6831
6832		if (need_resched()) {
6833			ext4_unlock_group(sb, e4b->bd_group);
6834			cond_resched();
6835			ext4_lock_group(sb, e4b->bd_group);
6836		}
6837
6838		if ((e4b->bd_info->bb_free - free_count) < minblocks)
6839			break;
6840	}
6841
6842	if (set_trimmed)
6843		EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
6844
6845	return count;
6846}
6847
6848/**
6849 * ext4_trim_all_free -- function to trim all free space in alloc. group
6850 * @sb:			super block for file system
6851 * @group:		group to be trimmed
6852 * @start:		first group block to examine
6853 * @max:		last group block to examine
6854 * @minblocks:		minimum extent block count
6855 *
6856 * ext4_trim_all_free walks through group's block bitmap searching for free
6857 * extents. When the free extent is found, mark it as used in group buddy
6858 * bitmap. Then issue a TRIM command on this extent and free the extent in
6859 * the group buddy bitmap.
6860 */
6861static ext4_grpblk_t
6862ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6863		   ext4_grpblk_t start, ext4_grpblk_t max,
6864		   ext4_grpblk_t minblocks)
6865{
6866	struct ext4_buddy e4b;
6867	int ret;
6868
6869	trace_ext4_trim_all_free(sb, group, start, max);
6870
6871	ret = ext4_mb_load_buddy(sb, group, &e4b);
6872	if (ret) {
6873		ext4_warning(sb, "Error %d loading buddy information for %u",
6874			     ret, group);
6875		return ret;
6876	}
6877
6878	ext4_lock_group(sb, group);
6879
6880	if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6881	    minblocks < EXT4_SB(sb)->s_last_trim_minblks)
6882		ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6883	else
6884		ret = 0;
6885
6886	ext4_unlock_group(sb, group);
6887	ext4_mb_unload_buddy(&e4b);
6888
6889	ext4_debug("trimmed %d blocks in the group %d\n",
6890		ret, group);
6891
6892	return ret;
6893}
6894
6895/**
6896 * ext4_trim_fs() -- trim ioctl handle function
6897 * @sb:			superblock for filesystem
6898 * @range:		fstrim_range structure
6899 *
6900 * start:	First Byte to trim
6901 * len:		number of Bytes to trim from start
6902 * minlen:	minimum extent length in Bytes
6903 * ext4_trim_fs goes through all allocation groups containing Bytes from
6904 * start to start+len. For each such a group ext4_trim_all_free function
6905 * is invoked to trim all free space.
6906 */
6907int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6908{
6909	unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
6910	struct ext4_group_info *grp;
6911	ext4_group_t group, first_group, last_group;
6912	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6913	uint64_t start, end, minlen, trimmed = 0;
6914	ext4_fsblk_t first_data_blk =
6915			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6916	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6917	int ret = 0;
6918
6919	start = range->start >> sb->s_blocksize_bits;
6920	end = start + (range->len >> sb->s_blocksize_bits) - 1;
6921	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6922			      range->minlen >> sb->s_blocksize_bits);
6923
6924	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6925	    start >= max_blks ||
6926	    range->len < sb->s_blocksize)
6927		return -EINVAL;
6928	/* No point to try to trim less than discard granularity */
6929	if (range->minlen < discard_granularity) {
6930		minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6931				discard_granularity >> sb->s_blocksize_bits);
6932		if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6933			goto out;
6934	}
6935	if (end >= max_blks - 1)
6936		end = max_blks - 1;
6937	if (end <= first_data_blk)
6938		goto out;
6939	if (start < first_data_blk)
6940		start = first_data_blk;
6941
6942	/* Determine first and last group to examine based on start and end */
6943	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6944				     &first_group, &first_cluster);
6945	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6946				     &last_group, &last_cluster);
6947
6948	/* end now represents the last cluster to discard in this group */
6949	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6950
6951	for (group = first_group; group <= last_group; group++) {
6952		if (ext4_trim_interrupted())
6953			break;
6954		grp = ext4_get_group_info(sb, group);
6955		if (!grp)
6956			continue;
6957		/* We only do this if the grp has never been initialized */
6958		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6959			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6960			if (ret)
6961				break;
6962		}
6963
6964		/*
6965		 * For all the groups except the last one, last cluster will
6966		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6967		 * change it for the last group, note that last_cluster is
6968		 * already computed earlier by ext4_get_group_no_and_offset()
6969		 */
6970		if (group == last_group)
6971			end = last_cluster;
6972		if (grp->bb_free >= minlen) {
6973			cnt = ext4_trim_all_free(sb, group, first_cluster,
6974						 end, minlen);
6975			if (cnt < 0) {
6976				ret = cnt;
6977				break;
6978			}
6979			trimmed += cnt;
6980		}
6981
6982		/*
6983		 * For every group except the first one, we are sure
6984		 * that the first cluster to discard will be cluster #0.
6985		 */
6986		first_cluster = 0;
6987	}
6988
6989	if (!ret)
6990		EXT4_SB(sb)->s_last_trim_minblks = minlen;
6991
6992out:
6993	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6994	return ret;
6995}
6996
6997/* Iterate all the free extents in the group. */
6998int
6999ext4_mballoc_query_range(
7000	struct super_block		*sb,
7001	ext4_group_t			group,
7002	ext4_grpblk_t			first,
7003	ext4_grpblk_t			end,
7004	ext4_mballoc_query_range_fn	meta_formatter,
7005	ext4_mballoc_query_range_fn	formatter,
7006	void				*priv)
7007{
7008	void				*bitmap;
7009	ext4_grpblk_t			start, next;
7010	struct ext4_buddy		e4b;
7011	int				error;
7012
7013	error = ext4_mb_load_buddy(sb, group, &e4b);
7014	if (error)
7015		return error;
7016	bitmap = e4b.bd_bitmap;
7017
7018	ext4_lock_group(sb, group);
7019
7020	start = max(e4b.bd_info->bb_first_free, first);
7021	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
7022		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
7023	if (meta_formatter && start != first) {
7024		if (start > end)
7025			start = end;
7026		ext4_unlock_group(sb, group);
7027		error = meta_formatter(sb, group, first, start - first,
7028				       priv);
7029		if (error)
7030			goto out_unload;
7031		ext4_lock_group(sb, group);
7032	}
7033	while (start <= end) {
7034		start = mb_find_next_zero_bit(bitmap, end + 1, start);
7035		if (start > end)
7036			break;
7037		next = mb_find_next_bit(bitmap, end + 1, start);
7038
7039		ext4_unlock_group(sb, group);
7040		error = formatter(sb, group, start, next - start, priv);
7041		if (error)
7042			goto out_unload;
7043		ext4_lock_group(sb, group);
7044
7045		start = next + 1;
7046	}
7047
7048	ext4_unlock_group(sb, group);
7049out_unload:
7050	ext4_mb_unload_buddy(&e4b);
7051
7052	return error;
7053}
7054
7055#ifdef CONFIG_EXT4_KUNIT_TESTS
7056#include "mballoc-test.c"
7057#endif