Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
   3 * Written by Alex Tomas <alex@clusterfs.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public Licens
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  17 */
  18
  19
  20/*
  21 * mballoc.c contains the multiblocks allocation routines
  22 */
  23
 
  24#include "mballoc.h"
  25#include <linux/debugfs.h>
 
  26#include <linux/slab.h>
 
 
  27#include <trace/events/ext4.h>
  28
  29/*
  30 * MUSTDO:
  31 *   - test ext4_ext_search_left() and ext4_ext_search_right()
  32 *   - search for metadata in few groups
  33 *
  34 * TODO v4:
  35 *   - normalization should take into account whether file is still open
  36 *   - discard preallocations if no free space left (policy?)
  37 *   - don't normalize tails
  38 *   - quota
  39 *   - reservation for superuser
  40 *
  41 * TODO v3:
  42 *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
  43 *   - track min/max extents in each group for better group selection
  44 *   - mb_mark_used() may allocate chunk right after splitting buddy
  45 *   - tree of groups sorted by number of free blocks
  46 *   - error handling
  47 */
  48
  49/*
  50 * The allocation request involve request for multiple number of blocks
  51 * near to the goal(block) value specified.
  52 *
  53 * During initialization phase of the allocator we decide to use the
  54 * group preallocation or inode preallocation depending on the size of
  55 * the file. The size of the file could be the resulting file size we
  56 * would have after allocation, or the current file size, which ever
  57 * is larger. If the size is less than sbi->s_mb_stream_request we
  58 * select to use the group preallocation. The default value of
  59 * s_mb_stream_request is 16 blocks. This can also be tuned via
  60 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
  61 * terms of number of blocks.
  62 *
  63 * The main motivation for having small file use group preallocation is to
  64 * ensure that we have small files closer together on the disk.
  65 *
  66 * First stage the allocator looks at the inode prealloc list,
  67 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
  68 * spaces for this particular inode. The inode prealloc space is
  69 * represented as:
  70 *
  71 * pa_lstart -> the logical start block for this prealloc space
  72 * pa_pstart -> the physical start block for this prealloc space
  73 * pa_len    -> length for this prealloc space
  74 * pa_free   ->  free space available in this prealloc space
  75 *
  76 * The inode preallocation space is used looking at the _logical_ start
  77 * block. If only the logical file block falls within the range of prealloc
  78 * space we will consume the particular prealloc space. This makes sure that
  79 * we have contiguous physical blocks representing the file blocks
  80 *
  81 * The important thing to be noted in case of inode prealloc space is that
  82 * we don't modify the values associated to inode prealloc space except
  83 * pa_free.
  84 *
  85 * If we are not able to find blocks in the inode prealloc space and if we
  86 * have the group allocation flag set then we look at the locality group
  87 * prealloc space. These are per CPU prealloc list represented as
  88 *
  89 * ext4_sb_info.s_locality_groups[smp_processor_id()]
  90 *
  91 * The reason for having a per cpu locality group is to reduce the contention
  92 * between CPUs. It is possible to get scheduled at this point.
  93 *
  94 * The locality group prealloc space is used looking at whether we have
  95 * enough free space (pa_free) within the prealloc space.
  96 *
  97 * If we can't allocate blocks via inode prealloc or/and locality group
  98 * prealloc then we look at the buddy cache. The buddy cache is represented
  99 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
 100 * mapped to the buddy and bitmap information regarding different
 101 * groups. The buddy information is attached to buddy cache inode so that
 102 * we can access them through the page cache. The information regarding
 103 * each group is loaded via ext4_mb_load_buddy.  The information involve
 104 * block bitmap and buddy information. The information are stored in the
 105 * inode as:
 106 *
 107 *  {                        page                        }
 108 *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
 109 *
 110 *
 111 * one block each for bitmap and buddy information.  So for each group we
 112 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
 113 * blocksize) blocks.  So it can have information regarding groups_per_page
 114 * which is blocks_per_page/2
 115 *
 116 * The buddy cache inode is not stored on disk. The inode is thrown
 117 * away when the filesystem is unmounted.
 118 *
 119 * We look for count number of blocks in the buddy cache. If we were able
 120 * to locate that many free blocks we return with additional information
 121 * regarding rest of the contiguous physical block available
 122 *
 123 * Before allocating blocks via buddy cache we normalize the request
 124 * blocks. This ensure we ask for more blocks that we needed. The extra
 125 * blocks that we get after allocation is added to the respective prealloc
 126 * list. In case of inode preallocation we follow a list of heuristics
 127 * based on file size. This can be found in ext4_mb_normalize_request. If
 128 * we are doing a group prealloc we try to normalize the request to
 129 * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is
 
 130 * 512 blocks. This can be tuned via
 131 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
 132 * terms of number of blocks. If we have mounted the file system with -O
 133 * stripe=<value> option the group prealloc request is normalized to the
 134 * the smallest multiple of the stripe value (sbi->s_stripe) which is
 135 * greater than the default mb_group_prealloc.
 136 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137 * The regular allocator (using the buddy cache) supports a few tunables.
 138 *
 139 * /sys/fs/ext4/<partition>/mb_min_to_scan
 140 * /sys/fs/ext4/<partition>/mb_max_to_scan
 141 * /sys/fs/ext4/<partition>/mb_order2_req
 
 142 *
 143 * The regular allocator uses buddy scan only if the request len is power of
 144 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
 145 * value of s_mb_order2_reqs can be tuned via
 146 * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
 147 * stripe size (sbi->s_stripe), we try to search for contiguous block in
 148 * stripe size. This should result in better allocation on RAID setups. If
 149 * not, we search in the specific group using bitmap for best extents. The
 150 * tunable min_to_scan and max_to_scan control the behaviour here.
 151 * min_to_scan indicate how long the mballoc __must__ look for a best
 152 * extent and max_to_scan indicates how long the mballoc __can__ look for a
 153 * best extent in the found extents. Searching for the blocks starts with
 154 * the group specified as the goal value in allocation context via
 155 * ac_g_ex. Each group is first checked based on the criteria whether it
 156 * can be used for allocation. ext4_mb_good_group explains how the groups are
 157 * checked.
 158 *
 
 
 
 
 
 
 
 
 
 
 159 * Both the prealloc space are getting populated as above. So for the first
 160 * request we will hit the buddy cache which will result in this prealloc
 161 * space getting filled. The prealloc space is then later used for the
 162 * subsequent request.
 163 */
 164
 165/*
 166 * mballoc operates on the following data:
 167 *  - on-disk bitmap
 168 *  - in-core buddy (actually includes buddy and bitmap)
 169 *  - preallocation descriptors (PAs)
 170 *
 171 * there are two types of preallocations:
 172 *  - inode
 173 *    assiged to specific inode and can be used for this inode only.
 174 *    it describes part of inode's space preallocated to specific
 175 *    physical blocks. any block from that preallocated can be used
 176 *    independent. the descriptor just tracks number of blocks left
 177 *    unused. so, before taking some block from descriptor, one must
 178 *    make sure corresponded logical block isn't allocated yet. this
 179 *    also means that freeing any block within descriptor's range
 180 *    must discard all preallocated blocks.
 181 *  - locality group
 182 *    assigned to specific locality group which does not translate to
 183 *    permanent set of inodes: inode can join and leave group. space
 184 *    from this type of preallocation can be used for any inode. thus
 185 *    it's consumed from the beginning to the end.
 186 *
 187 * relation between them can be expressed as:
 188 *    in-core buddy = on-disk bitmap + preallocation descriptors
 189 *
 190 * this mean blocks mballoc considers used are:
 191 *  - allocated blocks (persistent)
 192 *  - preallocated blocks (non-persistent)
 193 *
 194 * consistency in mballoc world means that at any time a block is either
 195 * free or used in ALL structures. notice: "any time" should not be read
 196 * literally -- time is discrete and delimited by locks.
 197 *
 198 *  to keep it simple, we don't use block numbers, instead we count number of
 199 *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
 200 *
 201 * all operations can be expressed as:
 202 *  - init buddy:			buddy = on-disk + PAs
 203 *  - new PA:				buddy += N; PA = N
 204 *  - use inode PA:			on-disk += N; PA -= N
 205 *  - discard inode PA			buddy -= on-disk - PA; PA = 0
 206 *  - use locality group PA		on-disk += N; PA -= N
 207 *  - discard locality group PA		buddy -= PA; PA = 0
 208 *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
 209 *        is used in real operation because we can't know actual used
 210 *        bits from PA, only from on-disk bitmap
 211 *
 212 * if we follow this strict logic, then all operations above should be atomic.
 213 * given some of them can block, we'd have to use something like semaphores
 214 * killing performance on high-end SMP hardware. let's try to relax it using
 215 * the following knowledge:
 216 *  1) if buddy is referenced, it's already initialized
 217 *  2) while block is used in buddy and the buddy is referenced,
 218 *     nobody can re-allocate that block
 219 *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
 220 *     bit set and PA claims same block, it's OK. IOW, one can set bit in
 221 *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
 222 *     block
 223 *
 224 * so, now we're building a concurrency table:
 225 *  - init buddy vs.
 226 *    - new PA
 227 *      blocks for PA are allocated in the buddy, buddy must be referenced
 228 *      until PA is linked to allocation group to avoid concurrent buddy init
 229 *    - use inode PA
 230 *      we need to make sure that either on-disk bitmap or PA has uptodate data
 231 *      given (3) we care that PA-=N operation doesn't interfere with init
 232 *    - discard inode PA
 233 *      the simplest way would be to have buddy initialized by the discard
 234 *    - use locality group PA
 235 *      again PA-=N must be serialized with init
 236 *    - discard locality group PA
 237 *      the simplest way would be to have buddy initialized by the discard
 238 *  - new PA vs.
 239 *    - use inode PA
 240 *      i_data_sem serializes them
 241 *    - discard inode PA
 242 *      discard process must wait until PA isn't used by another process
 243 *    - use locality group PA
 244 *      some mutex should serialize them
 245 *    - discard locality group PA
 246 *      discard process must wait until PA isn't used by another process
 247 *  - use inode PA
 248 *    - use inode PA
 249 *      i_data_sem or another mutex should serializes them
 250 *    - discard inode PA
 251 *      discard process must wait until PA isn't used by another process
 252 *    - use locality group PA
 253 *      nothing wrong here -- they're different PAs covering different blocks
 254 *    - discard locality group PA
 255 *      discard process must wait until PA isn't used by another process
 256 *
 257 * now we're ready to make few consequences:
 258 *  - PA is referenced and while it is no discard is possible
 259 *  - PA is referenced until block isn't marked in on-disk bitmap
 260 *  - PA changes only after on-disk bitmap
 261 *  - discard must not compete with init. either init is done before
 262 *    any discard or they're serialized somehow
 263 *  - buddy init as sum of on-disk bitmap and PAs is done atomically
 264 *
 265 * a special case when we've used PA to emptiness. no need to modify buddy
 266 * in this case, but we should care about concurrent init
 267 *
 268 */
 269
 270 /*
 271 * Logic in few words:
 272 *
 273 *  - allocation:
 274 *    load group
 275 *    find blocks
 276 *    mark bits in on-disk bitmap
 277 *    release group
 278 *
 279 *  - use preallocation:
 280 *    find proper PA (per-inode or group)
 281 *    load group
 282 *    mark bits in on-disk bitmap
 283 *    release group
 284 *    release PA
 285 *
 286 *  - free:
 287 *    load group
 288 *    mark bits in on-disk bitmap
 289 *    release group
 290 *
 291 *  - discard preallocations in group:
 292 *    mark PAs deleted
 293 *    move them onto local list
 294 *    load on-disk bitmap
 295 *    load group
 296 *    remove PA from object (inode or locality group)
 297 *    mark free blocks in-core
 298 *
 299 *  - discard inode's preallocations:
 300 */
 301
 302/*
 303 * Locking rules
 304 *
 305 * Locks:
 306 *  - bitlock on a group	(group)
 307 *  - object (inode/locality)	(object)
 308 *  - per-pa lock		(pa)
 
 
 309 *
 310 * Paths:
 311 *  - new pa
 312 *    object
 313 *    group
 314 *
 315 *  - find and use pa:
 316 *    pa
 317 *
 318 *  - release consumed pa:
 319 *    pa
 320 *    group
 321 *    object
 322 *
 323 *  - generate in-core bitmap:
 324 *    group
 325 *        pa
 326 *
 327 *  - discard all for given object (inode, locality group):
 328 *    object
 329 *        pa
 330 *    group
 331 *
 332 *  - discard all for given group:
 333 *    group
 334 *        pa
 335 *    group
 336 *        object
 337 *
 
 
 
 338 */
 339static struct kmem_cache *ext4_pspace_cachep;
 340static struct kmem_cache *ext4_ac_cachep;
 341static struct kmem_cache *ext4_free_ext_cachep;
 342
 343/* We create slab caches for groupinfo data structures based on the
 344 * superblock block size.  There will be one per mounted filesystem for
 345 * each unique s_blocksize_bits */
 346#define NR_GRPINFO_CACHES 8
 347static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
 348
 349static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
 350	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
 351	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
 352	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
 353};
 354
 355static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
 356					ext4_group_t group);
 357static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
 358						ext4_group_t group);
 359static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 360
 361static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
 362{
 363#if BITS_PER_LONG == 64
 364	*bit += ((unsigned long) addr & 7UL) << 3;
 365	addr = (void *) ((unsigned long) addr & ~7UL);
 366#elif BITS_PER_LONG == 32
 367	*bit += ((unsigned long) addr & 3UL) << 3;
 368	addr = (void *) ((unsigned long) addr & ~3UL);
 369#else
 370#error "how many bits you are?!"
 371#endif
 372	return addr;
 373}
 374
 375static inline int mb_test_bit(int bit, void *addr)
 376{
 377	/*
 378	 * ext4_test_bit on architecture like powerpc
 379	 * needs unsigned long aligned address
 380	 */
 381	addr = mb_correct_addr_and_bit(&bit, addr);
 382	return ext4_test_bit(bit, addr);
 383}
 384
 385static inline void mb_set_bit(int bit, void *addr)
 386{
 387	addr = mb_correct_addr_and_bit(&bit, addr);
 388	ext4_set_bit(bit, addr);
 389}
 390
 391static inline void mb_clear_bit(int bit, void *addr)
 392{
 393	addr = mb_correct_addr_and_bit(&bit, addr);
 394	ext4_clear_bit(bit, addr);
 395}
 396
 
 
 
 
 
 
 397static inline int mb_find_next_zero_bit(void *addr, int max, int start)
 398{
 399	int fix = 0, ret, tmpmax;
 400	addr = mb_correct_addr_and_bit(&fix, addr);
 401	tmpmax = max + fix;
 402	start += fix;
 403
 404	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
 405	if (ret > max)
 406		return max;
 407	return ret;
 408}
 409
 410static inline int mb_find_next_bit(void *addr, int max, int start)
 411{
 412	int fix = 0, ret, tmpmax;
 413	addr = mb_correct_addr_and_bit(&fix, addr);
 414	tmpmax = max + fix;
 415	start += fix;
 416
 417	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
 418	if (ret > max)
 419		return max;
 420	return ret;
 421}
 422
 423static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
 424{
 425	char *bb;
 426
 427	BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
 428	BUG_ON(max == NULL);
 429
 430	if (order > e4b->bd_blkbits + 1) {
 431		*max = 0;
 432		return NULL;
 433	}
 434
 435	/* at order 0 we see each particular block */
 436	if (order == 0) {
 437		*max = 1 << (e4b->bd_blkbits + 3);
 438		return EXT4_MB_BITMAP(e4b);
 439	}
 440
 441	bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
 442	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
 443
 444	return bb;
 445}
 446
 447#ifdef DOUBLE_CHECK
 448static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
 449			   int first, int count)
 450{
 451	int i;
 452	struct super_block *sb = e4b->bd_sb;
 453
 454	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 455		return;
 456	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
 457	for (i = 0; i < count; i++) {
 458		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
 459			ext4_fsblk_t blocknr;
 460
 461			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
 462			blocknr += first + i;
 463			ext4_grp_locked_error(sb, e4b->bd_group,
 464					      inode ? inode->i_ino : 0,
 465					      blocknr,
 466					      "freeing block already freed "
 467					      "(bit %u)",
 468					      first + i);
 
 
 469		}
 470		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
 471	}
 472}
 473
 474static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
 475{
 476	int i;
 477
 478	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 479		return;
 480	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
 481	for (i = 0; i < count; i++) {
 482		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
 483		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
 484	}
 485}
 486
 487static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
 488{
 
 
 489	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
 490		unsigned char *b1, *b2;
 491		int i;
 492		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
 493		b2 = (unsigned char *) bitmap;
 494		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
 495			if (b1[i] != b2[i]) {
 496				ext4_msg(e4b->bd_sb, KERN_ERR,
 497					 "corruption in group %u "
 498					 "at byte %u(%u): %x in copy != %x "
 499					 "on disk/prealloc",
 500					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
 501				BUG();
 502			}
 503		}
 504	}
 505}
 506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507#else
 508static inline void mb_free_blocks_double(struct inode *inode,
 509				struct ext4_buddy *e4b, int first, int count)
 510{
 511	return;
 512}
 513static inline void mb_mark_used_double(struct ext4_buddy *e4b,
 514						int first, int count)
 515{
 516	return;
 517}
 518static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
 519{
 520	return;
 521}
 
 
 
 
 
 
 
 
 
 
 
 522#endif
 523
 524#ifdef AGGRESSIVE_CHECK
 525
 526#define MB_CHECK_ASSERT(assert)						\
 527do {									\
 528	if (!(assert)) {						\
 529		printk(KERN_EMERG					\
 530			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
 531			function, file, line, # assert);		\
 532		BUG();							\
 533	}								\
 534} while (0)
 535
 536static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
 537				const char *function, int line)
 538{
 539	struct super_block *sb = e4b->bd_sb;
 540	int order = e4b->bd_blkbits + 1;
 541	int max;
 542	int max2;
 543	int i;
 544	int j;
 545	int k;
 546	int count;
 547	struct ext4_group_info *grp;
 548	int fragments = 0;
 549	int fstart;
 550	struct list_head *cur;
 551	void *buddy;
 552	void *buddy2;
 553
 554	{
 555		static int mb_check_counter;
 556		if (mb_check_counter++ % 100 != 0)
 557			return 0;
 558	}
 559
 560	while (order > 1) {
 561		buddy = mb_find_buddy(e4b, order, &max);
 562		MB_CHECK_ASSERT(buddy);
 563		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
 564		MB_CHECK_ASSERT(buddy2);
 565		MB_CHECK_ASSERT(buddy != buddy2);
 566		MB_CHECK_ASSERT(max * 2 == max2);
 567
 568		count = 0;
 569		for (i = 0; i < max; i++) {
 570
 571			if (mb_test_bit(i, buddy)) {
 572				/* only single bit in buddy2 may be 1 */
 573				if (!mb_test_bit(i << 1, buddy2)) {
 574					MB_CHECK_ASSERT(
 575						mb_test_bit((i<<1)+1, buddy2));
 576				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
 577					MB_CHECK_ASSERT(
 578						mb_test_bit(i << 1, buddy2));
 579				}
 580				continue;
 581			}
 582
 583			/* both bits in buddy2 must be 0 */
 584			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
 585			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
 586
 587			for (j = 0; j < (1 << order); j++) {
 588				k = (i * (1 << order)) + j;
 589				MB_CHECK_ASSERT(
 590					!mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
 591			}
 592			count++;
 593		}
 594		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
 595		order--;
 596	}
 597
 598	fstart = -1;
 599	buddy = mb_find_buddy(e4b, 0, &max);
 600	for (i = 0; i < max; i++) {
 601		if (!mb_test_bit(i, buddy)) {
 602			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
 603			if (fstart == -1) {
 604				fragments++;
 605				fstart = i;
 606			}
 607			continue;
 608		}
 609		fstart = -1;
 610		/* check used bits only */
 611		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
 612			buddy2 = mb_find_buddy(e4b, j, &max2);
 613			k = i >> j;
 614			MB_CHECK_ASSERT(k < max2);
 615			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
 616		}
 617	}
 618	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
 619	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
 620
 621	grp = ext4_get_group_info(sb, e4b->bd_group);
 622	list_for_each(cur, &grp->bb_prealloc_list) {
 623		ext4_group_t groupnr;
 624		struct ext4_prealloc_space *pa;
 625		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
 626		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
 627		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
 628		for (i = 0; i < pa->pa_len; i++)
 629			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
 630	}
 631	return 0;
 632}
 633#undef MB_CHECK_ASSERT
 634#define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
 635					__FILE__, __func__, __LINE__)
 636#else
 637#define mb_check_buddy(e4b)
 638#endif
 639
 640/*
 641 * Divide blocks started from @first with length @len into
 642 * smaller chunks with power of 2 blocks.
 643 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
 644 * then increase bb_counters[] for corresponded chunk size.
 645 */
 646static void ext4_mb_mark_free_simple(struct super_block *sb,
 647				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
 648					struct ext4_group_info *grp)
 649{
 650	struct ext4_sb_info *sbi = EXT4_SB(sb);
 651	ext4_grpblk_t min;
 652	ext4_grpblk_t max;
 653	ext4_grpblk_t chunk;
 654	unsigned short border;
 655
 656	BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
 657
 658	border = 2 << sb->s_blocksize_bits;
 659
 660	while (len > 0) {
 661		/* find how many blocks can be covered since this position */
 662		max = ffs(first | border) - 1;
 663
 664		/* find how many blocks of power 2 we need to mark */
 665		min = fls(len) - 1;
 666
 667		if (max < min)
 668			min = max;
 669		chunk = 1 << min;
 670
 671		/* mark multiblock chunks only */
 672		grp->bb_counters[min]++;
 673		if (min > 0)
 674			mb_clear_bit(first >> min,
 675				     buddy + sbi->s_mb_offsets[min]);
 676
 677		len -= chunk;
 678		first += chunk;
 679	}
 680}
 681
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 682/*
 683 * Cache the order of the largest free extent we have available in this block
 684 * group.
 685 */
 686static void
 687mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
 688{
 
 689	int i;
 690	int bits;
 691
 692	grp->bb_largest_free_order = -1; /* uninit */
 693
 694	bits = sb->s_blocksize_bits + 1;
 695	for (i = bits; i >= 0; i--) {
 696		if (grp->bb_counters[i] > 0) {
 697			grp->bb_largest_free_order = i;
 698			break;
 699		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 700	}
 701}
 702
 703static noinline_for_stack
 704void ext4_mb_generate_buddy(struct super_block *sb,
 705				void *buddy, void *bitmap, ext4_group_t group)
 706{
 707	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
 708	ext4_grpblk_t max = EXT4_BLOCKS_PER_GROUP(sb);
 
 709	ext4_grpblk_t i = 0;
 710	ext4_grpblk_t first;
 711	ext4_grpblk_t len;
 712	unsigned free = 0;
 713	unsigned fragments = 0;
 714	unsigned long long period = get_cycles();
 715
 716	/* initialize buddy from bitmap which is aggregation
 717	 * of on-disk bitmap and preallocations */
 718	i = mb_find_next_zero_bit(bitmap, max, 0);
 719	grp->bb_first_free = i;
 720	while (i < max) {
 721		fragments++;
 722		first = i;
 723		i = mb_find_next_bit(bitmap, max, i);
 724		len = i - first;
 725		free += len;
 726		if (len > 1)
 727			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
 728		else
 729			grp->bb_counters[0]++;
 730		if (i < max)
 731			i = mb_find_next_zero_bit(bitmap, max, i);
 732	}
 733	grp->bb_fragments = fragments;
 734
 735	if (free != grp->bb_free) {
 736		ext4_grp_locked_error(sb, group, 0, 0,
 737				      "%u blocks in bitmap, %u in gd",
 
 738				      free, grp->bb_free);
 739		/*
 740		 * If we intent to continue, we consider group descritor
 741		 * corrupt and update bb_free using bitmap value
 742		 */
 743		grp->bb_free = free;
 
 
 744	}
 745	mb_set_largest_free_order(sb, grp);
 
 746
 747	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
 748
 749	period = get_cycles() - period;
 750	spin_lock(&EXT4_SB(sb)->s_bal_lock);
 751	EXT4_SB(sb)->s_mb_buddies_generated++;
 752	EXT4_SB(sb)->s_mb_generation_time += period;
 753	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
 754}
 755
 756/* The buddy information is attached the buddy cache inode
 757 * for convenience. The information regarding each group
 758 * is loaded via ext4_mb_load_buddy. The information involve
 759 * block bitmap and buddy information. The information are
 760 * stored in the inode as
 761 *
 762 * {                        page                        }
 763 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
 764 *
 765 *
 766 * one block each for bitmap and buddy information.
 767 * So for each group we take up 2 blocks. A page can
 768 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
 769 * So it can have information regarding groups_per_page which
 770 * is blocks_per_page/2
 771 *
 772 * Locking note:  This routine takes the block group lock of all groups
 773 * for this page; do not hold this lock when calling this routine!
 774 */
 775
 776static int ext4_mb_init_cache(struct page *page, char *incore)
 777{
 778	ext4_group_t ngroups;
 779	int blocksize;
 780	int blocks_per_page;
 781	int groups_per_page;
 782	int err = 0;
 783	int i;
 784	ext4_group_t first_group;
 785	int first_block;
 786	struct super_block *sb;
 787	struct buffer_head *bhs;
 788	struct buffer_head **bh;
 789	struct inode *inode;
 790	char *data;
 791	char *bitmap;
 792	struct ext4_group_info *grinfo;
 793
 794	mb_debug(1, "init page %lu\n", page->index);
 795
 796	inode = page->mapping->host;
 797	sb = inode->i_sb;
 798	ngroups = ext4_get_groups_count(sb);
 799	blocksize = 1 << inode->i_blkbits;
 800	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
 
 
 801
 802	groups_per_page = blocks_per_page >> 1;
 803	if (groups_per_page == 0)
 804		groups_per_page = 1;
 805
 806	/* allocate buffer_heads to read bitmaps */
 807	if (groups_per_page > 1) {
 808		err = -ENOMEM;
 809		i = sizeof(struct buffer_head *) * groups_per_page;
 810		bh = kzalloc(i, GFP_NOFS);
 811		if (bh == NULL)
 
 812			goto out;
 
 813	} else
 814		bh = &bhs;
 815
 816	first_group = page->index * blocks_per_page / 2;
 817
 818	/* read all groups the page covers into the cache */
 819	for (i = 0; i < groups_per_page; i++) {
 820		struct ext4_group_desc *desc;
 821
 822		if (first_group + i >= ngroups)
 823			break;
 824
 825		grinfo = ext4_get_group_info(sb, first_group + i);
 826		/*
 827		 * If page is uptodate then we came here after online resize
 828		 * which added some new uninitialized group info structs, so
 829		 * we must skip all initialized uptodate buddies on the page,
 830		 * which may be currently in use by an allocating task.
 831		 */
 832		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
 833			bh[i] = NULL;
 834			continue;
 835		}
 836
 837		err = -EIO;
 838		desc = ext4_get_group_desc(sb, first_group + i, NULL);
 839		if (desc == NULL)
 840			goto out;
 841
 842		err = -ENOMEM;
 843		bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
 844		if (bh[i] == NULL)
 845			goto out;
 846
 847		if (bitmap_uptodate(bh[i]))
 848			continue;
 849
 850		lock_buffer(bh[i]);
 851		if (bitmap_uptodate(bh[i])) {
 852			unlock_buffer(bh[i]);
 853			continue;
 854		}
 855		ext4_lock_group(sb, first_group + i);
 856		if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 857			ext4_init_block_bitmap(sb, bh[i],
 858						first_group + i, desc);
 859			set_bitmap_uptodate(bh[i]);
 860			set_buffer_uptodate(bh[i]);
 861			ext4_unlock_group(sb, first_group + i);
 862			unlock_buffer(bh[i]);
 863			continue;
 864		}
 865		ext4_unlock_group(sb, first_group + i);
 866		if (buffer_uptodate(bh[i])) {
 867			/*
 868			 * if not uninit if bh is uptodate,
 869			 * bitmap is also uptodate
 870			 */
 871			set_bitmap_uptodate(bh[i]);
 872			unlock_buffer(bh[i]);
 873			continue;
 874		}
 875		get_bh(bh[i]);
 876		/*
 877		 * submit the buffer_head for read. We can
 878		 * safely mark the bitmap as uptodate now.
 879		 * We do it here so the bitmap uptodate bit
 880		 * get set with buffer lock held.
 881		 */
 882		set_bitmap_uptodate(bh[i]);
 883		bh[i]->b_end_io = end_buffer_read_sync;
 884		submit_bh(READ, bh[i]);
 885		mb_debug(1, "read bitmap for group %u\n", first_group + i);
 886	}
 887
 888	/* wait for I/O completion */
 889	for (i = 0; i < groups_per_page; i++)
 890		if (bh[i])
 891			wait_on_buffer(bh[i]);
 892
 893	err = -EIO;
 894	for (i = 0; i < groups_per_page; i++)
 895		if (bh[i] && !buffer_uptodate(bh[i]))
 896			goto out;
 
 
 897
 898	err = 0;
 899	first_block = page->index * blocks_per_page;
 900	for (i = 0; i < blocks_per_page; i++) {
 901		int group;
 902
 903		group = (first_block + i) >> 1;
 904		if (group >= ngroups)
 905			break;
 906
 907		if (!bh[group - first_group])
 908			/* skip initialized uptodate buddy */
 909			continue;
 910
 
 
 
 
 
 911		/*
 912		 * data carry information regarding this
 913		 * particular group in the format specified
 914		 * above
 915		 *
 916		 */
 917		data = page_address(page) + (i * blocksize);
 918		bitmap = bh[group - first_group]->b_data;
 919
 920		/*
 921		 * We place the buddy block and bitmap block
 922		 * close together
 923		 */
 924		if ((first_block + i) & 1) {
 925			/* this is block of buddy */
 926			BUG_ON(incore == NULL);
 927			mb_debug(1, "put buddy for group %u in page %lu/%x\n",
 928				group, page->index, i * blocksize);
 929			trace_ext4_mb_buddy_bitmap_load(sb, group);
 930			grinfo = ext4_get_group_info(sb, group);
 931			grinfo->bb_fragments = 0;
 932			memset(grinfo->bb_counters, 0,
 933			       sizeof(*grinfo->bb_counters) *
 934				(sb->s_blocksize_bits+2));
 935			/*
 936			 * incore got set to the group block bitmap below
 937			 */
 938			ext4_lock_group(sb, group);
 939			/* init the buddy */
 940			memset(data, 0xff, blocksize);
 941			ext4_mb_generate_buddy(sb, data, incore, group);
 942			ext4_unlock_group(sb, group);
 943			incore = NULL;
 944		} else {
 945			/* this is block of bitmap */
 946			BUG_ON(incore != NULL);
 947			mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
 948				group, page->index, i * blocksize);
 949			trace_ext4_mb_bitmap_load(sb, group);
 950
 951			/* see comments in ext4_mb_put_pa() */
 952			ext4_lock_group(sb, group);
 953			memcpy(data, bitmap, blocksize);
 954
 955			/* mark all preallocated blks used in in-core bitmap */
 956			ext4_mb_generate_from_pa(sb, data, group);
 957			ext4_mb_generate_from_freelist(sb, data, group);
 958			ext4_unlock_group(sb, group);
 959
 960			/* set incore so that the buddy information can be
 961			 * generated using this
 962			 */
 963			incore = data;
 964		}
 965	}
 966	SetPageUptodate(page);
 967
 968out:
 969	if (bh) {
 970		for (i = 0; i < groups_per_page; i++)
 971			brelse(bh[i]);
 972		if (bh != &bhs)
 973			kfree(bh);
 974	}
 975	return err;
 976}
 977
 978/*
 979 * Lock the buddy and bitmap pages. This make sure other parallel init_group
 980 * on the same buddy page doesn't happen whild holding the buddy page lock.
 981 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
 982 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
 983 */
 984static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
 985		ext4_group_t group, struct ext4_buddy *e4b)
 986{
 987	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
 988	int block, pnum, poff;
 989	int blocks_per_page;
 990	struct page *page;
 991
 992	e4b->bd_buddy_page = NULL;
 993	e4b->bd_bitmap_page = NULL;
 994
 995	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
 996	/*
 997	 * the buddy cache inode stores the block bitmap
 998	 * and buddy information in consecutive blocks.
 999	 * So for each group we need two blocks.
1000	 */
1001	block = group * 2;
1002	pnum = block / blocks_per_page;
1003	poff = block % blocks_per_page;
1004	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1005	if (!page)
1006		return -EIO;
1007	BUG_ON(page->mapping != inode->i_mapping);
1008	e4b->bd_bitmap_page = page;
1009	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1010
1011	if (blocks_per_page >= 2) {
1012		/* buddy and bitmap are on the same page */
1013		return 0;
1014	}
1015
1016	block++;
1017	pnum = block / blocks_per_page;
1018	poff = block % blocks_per_page;
1019	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1020	if (!page)
1021		return -EIO;
1022	BUG_ON(page->mapping != inode->i_mapping);
1023	e4b->bd_buddy_page = page;
1024	return 0;
1025}
1026
1027static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1028{
1029	if (e4b->bd_bitmap_page) {
1030		unlock_page(e4b->bd_bitmap_page);
1031		page_cache_release(e4b->bd_bitmap_page);
1032	}
1033	if (e4b->bd_buddy_page) {
1034		unlock_page(e4b->bd_buddy_page);
1035		page_cache_release(e4b->bd_buddy_page);
1036	}
1037}
1038
1039/*
1040 * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1041 * block group lock of all groups for this page; do not hold the BG lock when
1042 * calling this routine!
1043 */
1044static noinline_for_stack
1045int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1046{
1047
1048	struct ext4_group_info *this_grp;
1049	struct ext4_buddy e4b;
1050	struct page *page;
1051	int ret = 0;
1052
1053	mb_debug(1, "init group %u\n", group);
 
1054	this_grp = ext4_get_group_info(sb, group);
1055	/*
1056	 * This ensures that we don't reinit the buddy cache
1057	 * page which map to the group from which we are already
1058	 * allocating. If we are looking at the buddy cache we would
1059	 * have taken a reference using ext4_mb_load_buddy and that
1060	 * would have pinned buddy page to page cache.
 
 
1061	 */
1062	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1063	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1064		/*
1065		 * somebody initialized the group
1066		 * return without doing anything
1067		 */
1068		goto err;
1069	}
1070
1071	page = e4b.bd_bitmap_page;
1072	ret = ext4_mb_init_cache(page, NULL);
1073	if (ret)
1074		goto err;
1075	if (!PageUptodate(page)) {
1076		ret = -EIO;
1077		goto err;
1078	}
1079	mark_page_accessed(page);
1080
1081	if (e4b.bd_buddy_page == NULL) {
1082		/*
1083		 * If both the bitmap and buddy are in
1084		 * the same page we don't need to force
1085		 * init the buddy
1086		 */
1087		ret = 0;
1088		goto err;
1089	}
1090	/* init buddy cache */
1091	page = e4b.bd_buddy_page;
1092	ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
1093	if (ret)
1094		goto err;
1095	if (!PageUptodate(page)) {
1096		ret = -EIO;
1097		goto err;
1098	}
1099	mark_page_accessed(page);
1100err:
1101	ext4_mb_put_buddy_page_lock(&e4b);
1102	return ret;
1103}
1104
1105/*
1106 * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1107 * block group lock of all groups for this page; do not hold the BG lock when
1108 * calling this routine!
1109 */
1110static noinline_for_stack int
1111ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1112					struct ext4_buddy *e4b)
1113{
1114	int blocks_per_page;
1115	int block;
1116	int pnum;
1117	int poff;
1118	struct page *page;
1119	int ret;
1120	struct ext4_group_info *grp;
1121	struct ext4_sb_info *sbi = EXT4_SB(sb);
1122	struct inode *inode = sbi->s_buddy_cache;
1123
1124	mb_debug(1, "load group %u\n", group);
 
1125
1126	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1127	grp = ext4_get_group_info(sb, group);
1128
1129	e4b->bd_blkbits = sb->s_blocksize_bits;
1130	e4b->bd_info = grp;
1131	e4b->bd_sb = sb;
1132	e4b->bd_group = group;
1133	e4b->bd_buddy_page = NULL;
1134	e4b->bd_bitmap_page = NULL;
1135
1136	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1137		/*
1138		 * we need full data about the group
1139		 * to make a good selection
1140		 */
1141		ret = ext4_mb_init_group(sb, group);
1142		if (ret)
1143			return ret;
1144	}
1145
1146	/*
1147	 * the buddy cache inode stores the block bitmap
1148	 * and buddy information in consecutive blocks.
1149	 * So for each group we need two blocks.
1150	 */
1151	block = group * 2;
1152	pnum = block / blocks_per_page;
1153	poff = block % blocks_per_page;
1154
1155	/* we could use find_or_create_page(), but it locks page
1156	 * what we'd like to avoid in fast path ... */
1157	page = find_get_page(inode->i_mapping, pnum);
1158	if (page == NULL || !PageUptodate(page)) {
1159		if (page)
1160			/*
1161			 * drop the page reference and try
1162			 * to get the page with lock. If we
1163			 * are not uptodate that implies
1164			 * somebody just created the page but
1165			 * is yet to initialize the same. So
1166			 * wait for it to initialize.
1167			 */
1168			page_cache_release(page);
1169		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1170		if (page) {
1171			BUG_ON(page->mapping != inode->i_mapping);
1172			if (!PageUptodate(page)) {
1173				ret = ext4_mb_init_cache(page, NULL);
1174				if (ret) {
1175					unlock_page(page);
1176					goto err;
1177				}
1178				mb_cmp_bitmaps(e4b, page_address(page) +
1179					       (poff * sb->s_blocksize));
1180			}
1181			unlock_page(page);
1182		}
1183	}
1184	if (page == NULL || !PageUptodate(page)) {
 
 
 
 
1185		ret = -EIO;
1186		goto err;
1187	}
 
 
1188	e4b->bd_bitmap_page = page;
1189	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1190	mark_page_accessed(page);
1191
1192	block++;
1193	pnum = block / blocks_per_page;
1194	poff = block % blocks_per_page;
1195
1196	page = find_get_page(inode->i_mapping, pnum);
1197	if (page == NULL || !PageUptodate(page)) {
1198		if (page)
1199			page_cache_release(page);
1200		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1201		if (page) {
1202			BUG_ON(page->mapping != inode->i_mapping);
1203			if (!PageUptodate(page)) {
1204				ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
 
1205				if (ret) {
1206					unlock_page(page);
1207					goto err;
1208				}
1209			}
1210			unlock_page(page);
1211		}
1212	}
1213	if (page == NULL || !PageUptodate(page)) {
 
 
 
 
1214		ret = -EIO;
1215		goto err;
1216	}
 
 
1217	e4b->bd_buddy_page = page;
1218	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1219	mark_page_accessed(page);
1220
1221	BUG_ON(e4b->bd_bitmap_page == NULL);
1222	BUG_ON(e4b->bd_buddy_page == NULL);
1223
1224	return 0;
1225
1226err:
1227	if (page)
1228		page_cache_release(page);
1229	if (e4b->bd_bitmap_page)
1230		page_cache_release(e4b->bd_bitmap_page);
1231	if (e4b->bd_buddy_page)
1232		page_cache_release(e4b->bd_buddy_page);
1233	e4b->bd_buddy = NULL;
1234	e4b->bd_bitmap = NULL;
1235	return ret;
1236}
1237
 
 
 
 
 
 
1238static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1239{
1240	if (e4b->bd_bitmap_page)
1241		page_cache_release(e4b->bd_bitmap_page);
1242	if (e4b->bd_buddy_page)
1243		page_cache_release(e4b->bd_buddy_page);
1244}
1245
1246
1247static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1248{
1249	int order = 1;
1250	void *bb;
1251
1252	BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1253	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1254
1255	bb = EXT4_MB_BUDDY(e4b);
1256	while (order <= e4b->bd_blkbits + 1) {
1257		block = block >> 1;
1258		if (!mb_test_bit(block, bb)) {
1259			/* this block is part of buddy of order 'order' */
1260			return order;
1261		}
1262		bb += 1 << (e4b->bd_blkbits - order);
1263		order++;
1264	}
1265	return 0;
1266}
1267
1268static void mb_clear_bits(void *bm, int cur, int len)
1269{
1270	__u32 *addr;
1271
1272	len = cur + len;
1273	while (cur < len) {
1274		if ((cur & 31) == 0 && (len - cur) >= 32) {
1275			/* fast path: clear whole word at once */
1276			addr = bm + (cur >> 3);
1277			*addr = 0;
1278			cur += 32;
1279			continue;
1280		}
1281		mb_clear_bit(cur, bm);
1282		cur++;
1283	}
1284}
1285
1286void ext4_set_bits(void *bm, int cur, int len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1287{
1288	__u32 *addr;
1289
1290	len = cur + len;
1291	while (cur < len) {
1292		if ((cur & 31) == 0 && (len - cur) >= 32) {
1293			/* fast path: set whole word at once */
1294			addr = bm + (cur >> 3);
1295			*addr = 0xffffffff;
1296			cur += 32;
1297			continue;
1298		}
1299		mb_set_bit(cur, bm);
1300		cur++;
1301	}
1302}
1303
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1304static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1305			  int first, int count)
1306{
1307	int block = 0;
1308	int max = 0;
1309	int order;
1310	void *buddy;
1311	void *buddy2;
1312	struct super_block *sb = e4b->bd_sb;
1313
1314	BUG_ON(first + count > (sb->s_blocksize << 3));
 
 
1315	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
 
 
 
 
1316	mb_check_buddy(e4b);
1317	mb_free_blocks_double(inode, e4b, first, count);
1318
 
1319	e4b->bd_info->bb_free += count;
1320	if (first < e4b->bd_info->bb_first_free)
1321		e4b->bd_info->bb_first_free = first;
1322
1323	/* let's maintain fragments counter */
 
 
1324	if (first != 0)
1325		block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1326	if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1327		max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1328	if (block && max)
1329		e4b->bd_info->bb_fragments--;
1330	else if (!block && !max)
1331		e4b->bd_info->bb_fragments++;
1332
1333	/* let's maintain buddy itself */
1334	while (count-- > 0) {
1335		block = first++;
1336		order = 0;
1337
1338		if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1339			ext4_fsblk_t blocknr;
1340
1341			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1342			blocknr += block;
1343			ext4_grp_locked_error(sb, e4b->bd_group,
1344					      inode ? inode->i_ino : 0,
1345					      blocknr,
1346					      "freeing already freed block "
1347					      "(bit %u)", block);
 
 
 
1348		}
1349		mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1350		e4b->bd_info->bb_counters[order]++;
1351
1352		/* start of the buddy */
1353		buddy = mb_find_buddy(e4b, order, &max);
1354
1355		do {
1356			block &= ~1UL;
1357			if (mb_test_bit(block, buddy) ||
1358					mb_test_bit(block + 1, buddy))
1359				break;
1360
1361			/* both the buddies are free, try to coalesce them */
1362			buddy2 = mb_find_buddy(e4b, order + 1, &max);
 
 
 
1363
1364			if (!buddy2)
1365				break;
 
 
 
 
 
 
 
 
 
 
 
 
1366
1367			if (order > 0) {
1368				/* for special purposes, we don't set
1369				 * free bits in bitmap */
1370				mb_set_bit(block, buddy);
1371				mb_set_bit(block + 1, buddy);
1372			}
1373			e4b->bd_info->bb_counters[order]--;
1374			e4b->bd_info->bb_counters[order]--;
1375
1376			block = block >> 1;
1377			order++;
1378			e4b->bd_info->bb_counters[order]++;
1379
1380			mb_clear_bit(block, buddy2);
1381			buddy = buddy2;
1382		} while (1);
1383	}
1384	mb_set_largest_free_order(sb, e4b->bd_info);
 
1385	mb_check_buddy(e4b);
1386}
1387
1388static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1389				int needed, struct ext4_free_extent *ex)
1390{
1391	int next = block;
1392	int max;
1393	int ord;
1394	void *buddy;
1395
1396	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1397	BUG_ON(ex == NULL);
1398
1399	buddy = mb_find_buddy(e4b, order, &max);
1400	BUG_ON(buddy == NULL);
1401	BUG_ON(block >= max);
1402	if (mb_test_bit(block, buddy)) {
1403		ex->fe_len = 0;
1404		ex->fe_start = 0;
1405		ex->fe_group = 0;
1406		return 0;
1407	}
1408
1409	/* FIXME dorp order completely ? */
1410	if (likely(order == 0)) {
1411		/* find actual order */
1412		order = mb_find_order_for_block(e4b, block);
1413		block = block >> order;
1414	}
1415
1416	ex->fe_len = 1 << order;
1417	ex->fe_start = block << order;
1418	ex->fe_group = e4b->bd_group;
1419
1420	/* calc difference from given start */
1421	next = next - ex->fe_start;
1422	ex->fe_len -= next;
1423	ex->fe_start += next;
1424
1425	while (needed > ex->fe_len &&
1426	       (buddy = mb_find_buddy(e4b, order, &max))) {
1427
1428		if (block + 1 >= max)
1429			break;
1430
1431		next = (block + 1) * (1 << order);
1432		if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1433			break;
1434
1435		ord = mb_find_order_for_block(e4b, next);
1436
1437		order = ord;
1438		block = next >> order;
1439		ex->fe_len += 1 << order;
1440	}
1441
1442	BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
 
 
 
 
 
 
 
 
 
 
 
1443	return ex->fe_len;
1444}
1445
1446static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1447{
1448	int ord;
1449	int mlen = 0;
1450	int max = 0;
1451	int cur;
1452	int start = ex->fe_start;
1453	int len = ex->fe_len;
1454	unsigned ret = 0;
1455	int len0 = len;
1456	void *buddy;
 
1457
1458	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1459	BUG_ON(e4b->bd_group != ex->fe_group);
1460	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1461	mb_check_buddy(e4b);
1462	mb_mark_used_double(e4b, start, len);
1463
 
1464	e4b->bd_info->bb_free -= len;
1465	if (e4b->bd_info->bb_first_free == start)
1466		e4b->bd_info->bb_first_free += len;
1467
1468	/* let's maintain fragments counter */
1469	if (start != 0)
1470		mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1471	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1472		max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1473	if (mlen && max)
1474		e4b->bd_info->bb_fragments++;
1475	else if (!mlen && !max)
1476		e4b->bd_info->bb_fragments--;
1477
1478	/* let's maintain buddy itself */
1479	while (len) {
1480		ord = mb_find_order_for_block(e4b, start);
 
1481
1482		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1483			/* the whole chunk may be allocated at once! */
1484			mlen = 1 << ord;
1485			buddy = mb_find_buddy(e4b, ord, &max);
 
 
 
1486			BUG_ON((start >> ord) >= max);
1487			mb_set_bit(start >> ord, buddy);
1488			e4b->bd_info->bb_counters[ord]--;
1489			start += mlen;
1490			len -= mlen;
1491			BUG_ON(len < 0);
1492			continue;
1493		}
1494
1495		/* store for history */
1496		if (ret == 0)
1497			ret = len | (ord << 16);
1498
1499		/* we have to split large buddy */
1500		BUG_ON(ord <= 0);
1501		buddy = mb_find_buddy(e4b, ord, &max);
1502		mb_set_bit(start >> ord, buddy);
1503		e4b->bd_info->bb_counters[ord]--;
1504
1505		ord--;
1506		cur = (start >> ord) & ~1U;
1507		buddy = mb_find_buddy(e4b, ord, &max);
1508		mb_clear_bit(cur, buddy);
1509		mb_clear_bit(cur + 1, buddy);
1510		e4b->bd_info->bb_counters[ord]++;
1511		e4b->bd_info->bb_counters[ord]++;
 
1512	}
1513	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1514
1515	ext4_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
 
1516	mb_check_buddy(e4b);
1517
1518	return ret;
1519}
1520
1521/*
1522 * Must be called under group lock!
1523 */
1524static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1525					struct ext4_buddy *e4b)
1526{
1527	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1528	int ret;
1529
1530	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1531	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1532
1533	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1534	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1535	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1536
1537	/* preallocation can change ac_b_ex, thus we store actually
1538	 * allocated blocks for history */
1539	ac->ac_f_ex = ac->ac_b_ex;
1540
1541	ac->ac_status = AC_STATUS_FOUND;
1542	ac->ac_tail = ret & 0xffff;
1543	ac->ac_buddy = ret >> 16;
1544
1545	/*
1546	 * take the page reference. We want the page to be pinned
1547	 * so that we don't get a ext4_mb_init_cache_call for this
1548	 * group until we update the bitmap. That would mean we
1549	 * double allocate blocks. The reference is dropped
1550	 * in ext4_mb_release_context
1551	 */
1552	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1553	get_page(ac->ac_bitmap_page);
1554	ac->ac_buddy_page = e4b->bd_buddy_page;
1555	get_page(ac->ac_buddy_page);
1556	/* store last allocated for subsequent stream allocation */
1557	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1558		spin_lock(&sbi->s_md_lock);
1559		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1560		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1561		spin_unlock(&sbi->s_md_lock);
1562	}
1563}
 
 
 
 
 
 
1564
1565/*
1566 * regular allocator, for general purposes allocation
1567 */
1568
1569static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1570					struct ext4_buddy *e4b,
1571					int finish_group)
1572{
1573	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1574	struct ext4_free_extent *bex = &ac->ac_b_ex;
1575	struct ext4_free_extent *gex = &ac->ac_g_ex;
1576	struct ext4_free_extent ex;
1577	int max;
1578
1579	if (ac->ac_status == AC_STATUS_FOUND)
1580		return;
1581	/*
1582	 * We don't want to scan for a whole year
1583	 */
1584	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1585			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1586		ac->ac_status = AC_STATUS_BREAK;
1587		return;
1588	}
1589
1590	/*
1591	 * Haven't found good chunk so far, let's continue
1592	 */
1593	if (bex->fe_len < gex->fe_len)
1594		return;
1595
1596	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1597			&& bex->fe_group == e4b->bd_group) {
1598		/* recheck chunk's availability - we don't know
1599		 * when it was found (within this lock-unlock
1600		 * period or not) */
1601		max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1602		if (max >= gex->fe_len) {
1603			ext4_mb_use_best_found(ac, e4b);
1604			return;
1605		}
1606	}
1607}
1608
1609/*
1610 * The routine checks whether found extent is good enough. If it is,
1611 * then the extent gets marked used and flag is set to the context
1612 * to stop scanning. Otherwise, the extent is compared with the
1613 * previous found extent and if new one is better, then it's stored
1614 * in the context. Later, the best found extent will be used, if
1615 * mballoc can't find good enough extent.
1616 *
1617 * FIXME: real allocation policy is to be designed yet!
1618 */
1619static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1620					struct ext4_free_extent *ex,
1621					struct ext4_buddy *e4b)
1622{
1623	struct ext4_free_extent *bex = &ac->ac_b_ex;
1624	struct ext4_free_extent *gex = &ac->ac_g_ex;
1625
1626	BUG_ON(ex->fe_len <= 0);
1627	BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1628	BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1629	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1630
1631	ac->ac_found++;
1632
1633	/*
1634	 * The special case - take what you catch first
1635	 */
1636	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1637		*bex = *ex;
1638		ext4_mb_use_best_found(ac, e4b);
1639		return;
1640	}
1641
1642	/*
1643	 * Let's check whether the chuck is good enough
1644	 */
1645	if (ex->fe_len == gex->fe_len) {
1646		*bex = *ex;
1647		ext4_mb_use_best_found(ac, e4b);
1648		return;
1649	}
1650
1651	/*
1652	 * If this is first found extent, just store it in the context
1653	 */
1654	if (bex->fe_len == 0) {
1655		*bex = *ex;
1656		return;
1657	}
1658
1659	/*
1660	 * If new found extent is better, store it in the context
1661	 */
1662	if (bex->fe_len < gex->fe_len) {
1663		/* if the request isn't satisfied, any found extent
1664		 * larger than previous best one is better */
1665		if (ex->fe_len > bex->fe_len)
1666			*bex = *ex;
1667	} else if (ex->fe_len > gex->fe_len) {
1668		/* if the request is satisfied, then we try to find
1669		 * an extent that still satisfy the request, but is
1670		 * smaller than previous one */
1671		if (ex->fe_len < bex->fe_len)
1672			*bex = *ex;
1673	}
1674
1675	ext4_mb_check_limits(ac, e4b, 0);
1676}
1677
1678static noinline_for_stack
1679int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1680					struct ext4_buddy *e4b)
1681{
1682	struct ext4_free_extent ex = ac->ac_b_ex;
1683	ext4_group_t group = ex.fe_group;
1684	int max;
1685	int err;
1686
1687	BUG_ON(ex.fe_len <= 0);
1688	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1689	if (err)
1690		return err;
1691
1692	ext4_lock_group(ac->ac_sb, group);
1693	max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1694
1695	if (max > 0) {
1696		ac->ac_b_ex = ex;
1697		ext4_mb_use_best_found(ac, e4b);
1698	}
1699
1700	ext4_unlock_group(ac->ac_sb, group);
1701	ext4_mb_unload_buddy(e4b);
1702
1703	return 0;
1704}
1705
1706static noinline_for_stack
1707int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1708				struct ext4_buddy *e4b)
1709{
1710	ext4_group_t group = ac->ac_g_ex.fe_group;
1711	int max;
1712	int err;
1713	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 
1714	struct ext4_free_extent ex;
1715
1716	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1717		return 0;
 
 
1718
1719	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1720	if (err)
1721		return err;
1722
 
 
 
 
 
1723	ext4_lock_group(ac->ac_sb, group);
1724	max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1725			     ac->ac_g_ex.fe_len, &ex);
 
1726
1727	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1728		ext4_fsblk_t start;
1729
1730		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1731			ex.fe_start;
1732		/* use do_div to get remainder (would be 64-bit modulo) */
1733		if (do_div(start, sbi->s_stripe) == 0) {
1734			ac->ac_found++;
1735			ac->ac_b_ex = ex;
1736			ext4_mb_use_best_found(ac, e4b);
1737		}
1738	} else if (max >= ac->ac_g_ex.fe_len) {
1739		BUG_ON(ex.fe_len <= 0);
1740		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1741		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1742		ac->ac_found++;
1743		ac->ac_b_ex = ex;
1744		ext4_mb_use_best_found(ac, e4b);
1745	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1746		/* Sometimes, caller may want to merge even small
1747		 * number of blocks to an existing extent */
1748		BUG_ON(ex.fe_len <= 0);
1749		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1750		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1751		ac->ac_found++;
1752		ac->ac_b_ex = ex;
1753		ext4_mb_use_best_found(ac, e4b);
1754	}
1755	ext4_unlock_group(ac->ac_sb, group);
1756	ext4_mb_unload_buddy(e4b);
1757
1758	return 0;
1759}
1760
1761/*
1762 * The routine scans buddy structures (not bitmap!) from given order
1763 * to max order and tries to find big enough chunk to satisfy the req
1764 */
1765static noinline_for_stack
1766void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1767					struct ext4_buddy *e4b)
1768{
1769	struct super_block *sb = ac->ac_sb;
1770	struct ext4_group_info *grp = e4b->bd_info;
1771	void *buddy;
1772	int i;
1773	int k;
1774	int max;
1775
1776	BUG_ON(ac->ac_2order <= 0);
1777	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1778		if (grp->bb_counters[i] == 0)
1779			continue;
1780
1781		buddy = mb_find_buddy(e4b, i, &max);
1782		BUG_ON(buddy == NULL);
1783
1784		k = mb_find_next_zero_bit(buddy, max, 0);
1785		BUG_ON(k >= max);
1786
 
 
 
 
 
 
 
1787		ac->ac_found++;
1788
1789		ac->ac_b_ex.fe_len = 1 << i;
1790		ac->ac_b_ex.fe_start = k << i;
1791		ac->ac_b_ex.fe_group = e4b->bd_group;
1792
1793		ext4_mb_use_best_found(ac, e4b);
1794
1795		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1796
1797		if (EXT4_SB(sb)->s_mb_stats)
1798			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1799
1800		break;
1801	}
1802}
1803
1804/*
1805 * The routine scans the group and measures all found extents.
1806 * In order to optimize scanning, caller must pass number of
1807 * free blocks in the group, so the routine can know upper limit.
1808 */
1809static noinline_for_stack
1810void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1811					struct ext4_buddy *e4b)
1812{
1813	struct super_block *sb = ac->ac_sb;
1814	void *bitmap = EXT4_MB_BITMAP(e4b);
1815	struct ext4_free_extent ex;
1816	int i;
1817	int free;
1818
1819	free = e4b->bd_info->bb_free;
1820	BUG_ON(free <= 0);
 
1821
1822	i = e4b->bd_info->bb_first_free;
1823
1824	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1825		i = mb_find_next_zero_bit(bitmap,
1826						EXT4_BLOCKS_PER_GROUP(sb), i);
1827		if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
1828			/*
1829			 * IF we have corrupt bitmap, we won't find any
1830			 * free blocks even though group info says we
1831			 * we have free blocks
1832			 */
1833			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1834					"%d free blocks as per "
1835					"group info. But bitmap says 0",
1836					free);
 
 
1837			break;
1838		}
1839
1840		mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1841		BUG_ON(ex.fe_len <= 0);
 
1842		if (free < ex.fe_len) {
1843			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1844					"%d free blocks as per "
1845					"group info. But got %d blocks",
1846					free, ex.fe_len);
 
 
1847			/*
1848			 * The number of free blocks differs. This mostly
1849			 * indicate that the bitmap is corrupt. So exit
1850			 * without claiming the space.
1851			 */
1852			break;
1853		}
1854
1855		ext4_mb_measure_extent(ac, &ex, e4b);
1856
1857		i += ex.fe_len;
1858		free -= ex.fe_len;
1859	}
1860
1861	ext4_mb_check_limits(ac, e4b, 1);
1862}
1863
1864/*
1865 * This is a special case for storages like raid5
1866 * we try to find stripe-aligned chunks for stripe-size-multiple requests
1867 */
1868static noinline_for_stack
1869void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1870				 struct ext4_buddy *e4b)
1871{
1872	struct super_block *sb = ac->ac_sb;
1873	struct ext4_sb_info *sbi = EXT4_SB(sb);
1874	void *bitmap = EXT4_MB_BITMAP(e4b);
1875	struct ext4_free_extent ex;
1876	ext4_fsblk_t first_group_block;
1877	ext4_fsblk_t a;
1878	ext4_grpblk_t i;
1879	int max;
1880
1881	BUG_ON(sbi->s_stripe == 0);
1882
1883	/* find first stripe-aligned block in group */
1884	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1885
1886	a = first_group_block + sbi->s_stripe - 1;
1887	do_div(a, sbi->s_stripe);
1888	i = (a * sbi->s_stripe) - first_group_block;
1889
1890	while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1891		if (!mb_test_bit(i, bitmap)) {
1892			max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1893			if (max >= sbi->s_stripe) {
1894				ac->ac_found++;
 
1895				ac->ac_b_ex = ex;
1896				ext4_mb_use_best_found(ac, e4b);
1897				break;
1898			}
1899		}
1900		i += sbi->s_stripe;
1901	}
1902}
1903
1904/* This is now called BEFORE we load the buddy bitmap. */
1905static int ext4_mb_good_group(struct ext4_allocation_context *ac,
 
 
 
 
1906				ext4_group_t group, int cr)
1907{
1908	unsigned free, fragments;
1909	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
1910	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1911
1912	BUG_ON(cr < 0 || cr >= 4);
1913
1914	/* We only do this if the grp has never been initialized */
1915	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1916		int ret = ext4_mb_init_group(ac->ac_sb, group);
1917		if (ret)
1918			return 0;
1919	}
1920
1921	free = grp->bb_free;
1922	fragments = grp->bb_fragments;
1923	if (free == 0)
1924		return 0;
 
 
1925	if (fragments == 0)
1926		return 0;
1927
1928	switch (cr) {
1929	case 0:
1930		BUG_ON(ac->ac_2order == 0);
1931
1932		if (grp->bb_largest_free_order < ac->ac_2order)
1933			return 0;
1934
1935		/* Avoid using the first bg of a flexgroup for data files */
1936		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1937		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1938		    ((group % flex_size) == 0))
1939			return 0;
1940
1941		return 1;
 
 
 
 
 
 
 
 
 
1942	case 1:
1943		if ((free / fragments) >= ac->ac_g_ex.fe_len)
1944			return 1;
1945		break;
1946	case 2:
1947		if (free >= ac->ac_g_ex.fe_len)
1948			return 1;
1949		break;
1950	case 3:
1951		return 1;
1952	default:
1953		BUG();
1954	}
1955
1956	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1957}
1958
1959static noinline_for_stack int
1960ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1961{
1962	ext4_group_t ngroups, group, i;
1963	int cr;
1964	int err = 0;
 
1965	struct ext4_sb_info *sbi;
1966	struct super_block *sb;
1967	struct ext4_buddy e4b;
 
1968
1969	sb = ac->ac_sb;
1970	sbi = EXT4_SB(sb);
1971	ngroups = ext4_get_groups_count(sb);
1972	/* non-extent files are limited to low blocks/groups */
1973	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
1974		ngroups = sbi->s_blockfile_groups;
1975
1976	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1977
1978	/* first, try the goal */
1979	err = ext4_mb_find_by_goal(ac, &e4b);
1980	if (err || ac->ac_status == AC_STATUS_FOUND)
1981		goto out;
1982
1983	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1984		goto out;
1985
1986	/*
1987	 * ac->ac2_order is set only if the fe_len is a power of 2
1988	 * if ac2_order is set we also set criteria to 0 so that we
1989	 * try exact allocation using buddy.
1990	 */
1991	i = fls(ac->ac_g_ex.fe_len);
1992	ac->ac_2order = 0;
1993	/*
1994	 * We search using buddy data only if the order of the request
1995	 * is greater than equal to the sbi_s_mb_order2_reqs
1996	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
 
 
1997	 */
1998	if (i >= sbi->s_mb_order2_reqs) {
1999		/*
2000		 * This should tell if fe_len is exactly power of 2
2001		 */
2002		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2003			ac->ac_2order = i - 1;
 
2004	}
2005
2006	/* if stream allocation is enabled, use global goal */
2007	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2008		/* TBD: may be hot point */
2009		spin_lock(&sbi->s_md_lock);
2010		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2011		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2012		spin_unlock(&sbi->s_md_lock);
2013	}
2014
2015	/* Let's just scan groups to find more-less suitable blocks */
2016	cr = ac->ac_2order ? 0 : 1;
2017	/*
2018	 * cr == 0 try to get exact allocation,
2019	 * cr == 3  try to get anything
2020	 */
2021repeat:
2022	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2023		ac->ac_criteria = cr;
2024		/*
2025		 * searching for the right group start
2026		 * from the goal value specified
2027		 */
2028		group = ac->ac_g_ex.fe_group;
 
 
2029
2030		for (i = 0; i < ngroups; group++, i++) {
2031			if (group == ngroups)
2032				group = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2033
2034			/* This now checks without needing the buddy page */
2035			if (!ext4_mb_good_group(ac, group, cr))
 
 
 
2036				continue;
 
2037
2038			err = ext4_mb_load_buddy(sb, group, &e4b);
2039			if (err)
2040				goto out;
2041
2042			ext4_lock_group(sb, group);
2043
2044			/*
2045			 * We need to check again after locking the
2046			 * block group
2047			 */
2048			if (!ext4_mb_good_group(ac, group, cr)) {
 
2049				ext4_unlock_group(sb, group);
2050				ext4_mb_unload_buddy(&e4b);
2051				continue;
2052			}
2053
2054			ac->ac_groups_scanned++;
2055			if (cr == 0)
2056				ext4_mb_simple_scan_group(ac, &e4b);
2057			else if (cr == 1 && sbi->s_stripe &&
2058					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2059				ext4_mb_scan_aligned(ac, &e4b);
2060			else
2061				ext4_mb_complex_scan_group(ac, &e4b);
2062
2063			ext4_unlock_group(sb, group);
2064			ext4_mb_unload_buddy(&e4b);
2065
2066			if (ac->ac_status != AC_STATUS_CONTINUE)
2067				break;
2068		}
 
 
 
2069	}
2070
2071	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2072	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2073		/*
2074		 * We've been searching too long. Let's try to allocate
2075		 * the best chunk we've found so far
2076		 */
2077
2078		ext4_mb_try_best_found(ac, &e4b);
2079		if (ac->ac_status != AC_STATUS_FOUND) {
2080			/*
2081			 * Someone more lucky has already allocated it.
2082			 * The only thing we can do is just take first
2083			 * found block(s)
2084			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2085			 */
 
 
 
 
 
2086			ac->ac_b_ex.fe_group = 0;
2087			ac->ac_b_ex.fe_start = 0;
2088			ac->ac_b_ex.fe_len = 0;
2089			ac->ac_status = AC_STATUS_CONTINUE;
2090			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2091			cr = 3;
2092			atomic_inc(&sbi->s_mb_lost_chunks);
2093			goto repeat;
2094		}
2095	}
 
 
 
2096out:
 
 
 
 
 
 
 
 
 
 
2097	return err;
2098}
2099
2100static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2101{
2102	struct super_block *sb = seq->private;
2103	ext4_group_t group;
2104
2105	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2106		return NULL;
2107	group = *pos + 1;
2108	return (void *) ((unsigned long) group);
2109}
2110
2111static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2112{
2113	struct super_block *sb = seq->private;
2114	ext4_group_t group;
2115
2116	++*pos;
2117	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2118		return NULL;
2119	group = *pos + 1;
2120	return (void *) ((unsigned long) group);
2121}
2122
2123static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2124{
2125	struct super_block *sb = seq->private;
2126	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2127	int i;
2128	int err;
2129	struct ext4_buddy e4b;
 
 
 
 
2130	struct sg {
2131		struct ext4_group_info info;
2132		ext4_grpblk_t counters[16];
2133	} sg;
2134
2135	group--;
2136	if (group == 0)
2137		seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2138				"[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2139				  "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2140			   "group", "free", "frags", "first",
2141			   "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2142			   "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2143
2144	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2145		sizeof(struct ext4_group_info);
2146	err = ext4_mb_load_buddy(sb, group, &e4b);
2147	if (err) {
2148		seq_printf(seq, "#%-5u: I/O error\n", group);
2149		return 0;
 
 
 
 
 
 
2150	}
2151	ext4_lock_group(sb, group);
2152	memcpy(&sg, ext4_get_group_info(sb, group), i);
2153	ext4_unlock_group(sb, group);
2154	ext4_mb_unload_buddy(&e4b);
 
2155
2156	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2157			sg.info.bb_fragments, sg.info.bb_first_free);
2158	for (i = 0; i <= 13; i++)
2159		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2160				sg.info.bb_counters[i] : 0);
2161	seq_printf(seq, " ]\n");
2162
2163	return 0;
2164}
2165
2166static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2167{
2168}
2169
2170static const struct seq_operations ext4_mb_seq_groups_ops = {
2171	.start  = ext4_mb_seq_groups_start,
2172	.next   = ext4_mb_seq_groups_next,
2173	.stop   = ext4_mb_seq_groups_stop,
2174	.show   = ext4_mb_seq_groups_show,
2175};
2176
2177static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2178{
2179	struct super_block *sb = PDE(inode)->data;
2180	int rc;
2181
2182	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2183	if (rc == 0) {
2184		struct seq_file *m = file->private_data;
2185		m->private = sb;
 
2186	}
2187	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2188
 
 
 
 
 
2189}
2190
2191static const struct file_operations ext4_mb_seq_groups_fops = {
2192	.owner		= THIS_MODULE,
2193	.open		= ext4_mb_seq_groups_open,
2194	.read		= seq_read,
2195	.llseek		= seq_lseek,
2196	.release	= seq_release,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2197};
2198
2199static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2200{
2201	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2202	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2203
2204	BUG_ON(!cachep);
2205	return cachep;
2206}
2207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2208/* Create and initialize ext4_group_info data for the given group. */
2209int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2210			  struct ext4_group_desc *desc)
2211{
2212	int i;
2213	int metalen = 0;
 
2214	struct ext4_sb_info *sbi = EXT4_SB(sb);
2215	struct ext4_group_info **meta_group_info;
2216	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2217
2218	/*
2219	 * First check if this group is the first of a reserved block.
2220	 * If it's true, we have to allocate a new table of pointers
2221	 * to ext4_group_info structures
2222	 */
2223	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2224		metalen = sizeof(*meta_group_info) <<
2225			EXT4_DESC_PER_BLOCK_BITS(sb);
2226		meta_group_info = kmalloc(metalen, GFP_KERNEL);
2227		if (meta_group_info == NULL) {
2228			ext4_msg(sb, KERN_ERR, "EXT4-fs: can't allocate mem "
2229				 "for a buddy group");
2230			goto exit_meta_group_info;
2231		}
2232		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2233			meta_group_info;
 
2234	}
2235
2236	meta_group_info =
2237		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2238	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2239
2240	meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
2241	if (meta_group_info[i] == NULL) {
2242		ext4_msg(sb, KERN_ERR, "EXT4-fs: can't allocate buddy mem");
2243		goto exit_group_info;
2244	}
2245	memset(meta_group_info[i], 0, kmem_cache_size(cachep));
2246	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2247		&(meta_group_info[i]->bb_state));
2248
2249	/*
2250	 * initialize bb_free to be able to skip
2251	 * empty groups without initialization
2252	 */
2253	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 
2254		meta_group_info[i]->bb_free =
2255			ext4_free_blocks_after_init(sb, group, desc);
2256	} else {
2257		meta_group_info[i]->bb_free =
2258			ext4_free_blks_count(sb, desc);
2259	}
2260
2261	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2262	init_rwsem(&meta_group_info[i]->alloc_sem);
2263	meta_group_info[i]->bb_free_root = RB_ROOT;
 
 
2264	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
 
 
2265
2266#ifdef DOUBLE_CHECK
2267	{
2268		struct buffer_head *bh;
2269		meta_group_info[i]->bb_bitmap =
2270			kmalloc(sb->s_blocksize, GFP_KERNEL);
2271		BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2272		bh = ext4_read_block_bitmap(sb, group);
2273		BUG_ON(bh == NULL);
2274		memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2275			sb->s_blocksize);
2276		put_bh(bh);
2277	}
2278#endif
2279
2280	return 0;
2281
2282exit_group_info:
2283	/* If a meta_group_info table has been allocated, release it now */
2284	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2285		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2286		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
 
 
 
 
 
2287	}
2288exit_meta_group_info:
2289	return -ENOMEM;
2290} /* ext4_mb_add_groupinfo */
2291
2292static int ext4_mb_init_backend(struct super_block *sb)
2293{
2294	ext4_group_t ngroups = ext4_get_groups_count(sb);
2295	ext4_group_t i;
2296	struct ext4_sb_info *sbi = EXT4_SB(sb);
2297	struct ext4_super_block *es = sbi->s_es;
2298	int num_meta_group_infos;
2299	int num_meta_group_infos_max;
2300	int array_size;
2301	struct ext4_group_desc *desc;
 
2302	struct kmem_cache *cachep;
2303
2304	/* This is the number of blocks used by GDT */
2305	num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
2306				1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2307
2308	/*
2309	 * This is the total number of blocks used by GDT including
2310	 * the number of reserved blocks for GDT.
2311	 * The s_group_info array is allocated with this value
2312	 * to allow a clean online resize without a complex
2313	 * manipulation of pointer.
2314	 * The drawback is the unused memory when no resize
2315	 * occurs but it's very low in terms of pages
2316	 * (see comments below)
2317	 * Need to handle this properly when META_BG resizing is allowed
2318	 */
2319	num_meta_group_infos_max = num_meta_group_infos +
2320				le16_to_cpu(es->s_reserved_gdt_blocks);
2321
2322	/*
2323	 * array_size is the size of s_group_info array. We round it
2324	 * to the next power of two because this approximation is done
2325	 * internally by kmalloc so we can have some more memory
2326	 * for free here (e.g. may be used for META_BG resize).
2327	 */
2328	array_size = 1;
2329	while (array_size < sizeof(*sbi->s_group_info) *
2330	       num_meta_group_infos_max)
2331		array_size = array_size << 1;
2332	/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2333	 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2334	 * So a two level scheme suffices for now. */
2335	sbi->s_group_info = ext4_kvzalloc(array_size, GFP_KERNEL);
2336	if (sbi->s_group_info == NULL) {
2337		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2338		return -ENOMEM;
2339	}
2340	sbi->s_buddy_cache = new_inode(sb);
2341	if (sbi->s_buddy_cache == NULL) {
2342		ext4_msg(sb, KERN_ERR, "can't get new inode");
2343		goto err_freesgi;
2344	}
2345	/* To avoid potentially colliding with an valid on-disk inode number,
2346	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2347	 * not in the inode hash, so it should never be found by iget(), but
2348	 * this will avoid confusion if it ever shows up during debugging. */
2349	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2350	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2351	for (i = 0; i < ngroups; i++) {
 
2352		desc = ext4_get_group_desc(sb, i, NULL);
2353		if (desc == NULL) {
2354			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2355			goto err_freebuddy;
2356		}
2357		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2358			goto err_freebuddy;
2359	}
2360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2361	return 0;
2362
2363err_freebuddy:
2364	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2365	while (i-- > 0)
2366		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
2367	i = num_meta_group_infos;
 
 
2368	while (i-- > 0)
2369		kfree(sbi->s_group_info[i]);
 
2370	iput(sbi->s_buddy_cache);
2371err_freesgi:
2372	ext4_kvfree(sbi->s_group_info);
 
 
2373	return -ENOMEM;
2374}
2375
2376static void ext4_groupinfo_destroy_slabs(void)
2377{
2378	int i;
2379
2380	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2381		if (ext4_groupinfo_caches[i])
2382			kmem_cache_destroy(ext4_groupinfo_caches[i]);
2383		ext4_groupinfo_caches[i] = NULL;
2384	}
2385}
2386
2387static int ext4_groupinfo_create_slab(size_t size)
2388{
2389	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2390	int slab_size;
2391	int blocksize_bits = order_base_2(size);
2392	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2393	struct kmem_cache *cachep;
2394
2395	if (cache_index >= NR_GRPINFO_CACHES)
2396		return -EINVAL;
2397
2398	if (unlikely(cache_index < 0))
2399		cache_index = 0;
2400
2401	mutex_lock(&ext4_grpinfo_slab_create_mutex);
2402	if (ext4_groupinfo_caches[cache_index]) {
2403		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2404		return 0;	/* Already created */
2405	}
2406
2407	slab_size = offsetof(struct ext4_group_info,
2408				bb_counters[blocksize_bits + 2]);
2409
2410	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2411					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2412					NULL);
2413
2414	ext4_groupinfo_caches[cache_index] = cachep;
2415
2416	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2417	if (!cachep) {
2418		printk(KERN_EMERG
2419		       "EXT4-fs: no memory for groupinfo slab cache\n");
2420		return -ENOMEM;
2421	}
2422
2423	return 0;
2424}
2425
2426int ext4_mb_init(struct super_block *sb, int needs_recovery)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2427{
2428	struct ext4_sb_info *sbi = EXT4_SB(sb);
2429	unsigned i, j;
2430	unsigned offset;
2431	unsigned max;
2432	int ret;
2433
2434	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2435
2436	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2437	if (sbi->s_mb_offsets == NULL) {
2438		ret = -ENOMEM;
2439		goto out;
2440	}
2441
2442	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2443	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2444	if (sbi->s_mb_maxs == NULL) {
2445		ret = -ENOMEM;
2446		goto out;
2447	}
2448
2449	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2450	if (ret < 0)
2451		goto out;
2452
2453	/* order 0 is regular bitmap */
2454	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2455	sbi->s_mb_offsets[0] = 0;
2456
2457	i = 1;
2458	offset = 0;
 
2459	max = sb->s_blocksize << 2;
2460	do {
2461		sbi->s_mb_offsets[i] = offset;
2462		sbi->s_mb_maxs[i] = max;
2463		offset += 1 << (sb->s_blocksize_bits - i);
 
2464		max = max >> 1;
2465		i++;
2466	} while (i <= sb->s_blocksize_bits + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2467
2468	spin_lock_init(&sbi->s_md_lock);
2469	spin_lock_init(&sbi->s_bal_lock);
 
 
 
 
2470
2471	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2472	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2473	sbi->s_mb_stats = MB_DEFAULT_STATS;
2474	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2475	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2476	sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2477	/*
2478	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2479	 * to the lowest multiple of s_stripe which is bigger than
2480	 * the s_mb_group_prealloc as determined above. We want
2481	 * the preallocation size to be an exact multiple of the
2482	 * RAID stripe size so that preallocations don't fragment
2483	 * the stripes.
2484	 */
2485	if (sbi->s_stripe > 1) {
2486		sbi->s_mb_group_prealloc = roundup(
2487			sbi->s_mb_group_prealloc, sbi->s_stripe);
2488	}
2489
2490	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2491	if (sbi->s_locality_groups == NULL) {
2492		ret = -ENOMEM;
2493		goto out;
2494	}
2495	for_each_possible_cpu(i) {
2496		struct ext4_locality_group *lg;
2497		lg = per_cpu_ptr(sbi->s_locality_groups, i);
2498		mutex_init(&lg->lg_mutex);
2499		for (j = 0; j < PREALLOC_TB_SIZE; j++)
2500			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2501		spin_lock_init(&lg->lg_prealloc_lock);
2502	}
2503
 
 
 
 
2504	/* init file for buddy data */
2505	ret = ext4_mb_init_backend(sb);
2506	if (ret != 0) {
2507		goto out;
2508	}
2509
2510	if (sbi->s_proc)
2511		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2512				 &ext4_mb_seq_groups_fops, sb);
2513
2514	if (sbi->s_journal)
2515		sbi->s_journal->j_commit_callback = release_blocks_on_commit;
 
2516out:
2517	if (ret) {
2518		kfree(sbi->s_mb_offsets);
2519		kfree(sbi->s_mb_maxs);
2520	}
 
 
 
 
2521	return ret;
2522}
2523
2524/* need to called with the ext4 group lock held */
2525static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2526{
2527	struct ext4_prealloc_space *pa;
2528	struct list_head *cur, *tmp;
2529	int count = 0;
2530
2531	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2532		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2533		list_del(&pa->pa_group_list);
2534		count++;
2535		kmem_cache_free(ext4_pspace_cachep, pa);
2536	}
2537	if (count)
2538		mb_debug(1, "mballoc: %u PAs left\n", count);
2539
2540}
2541
2542int ext4_mb_release(struct super_block *sb)
2543{
2544	ext4_group_t ngroups = ext4_get_groups_count(sb);
2545	ext4_group_t i;
2546	int num_meta_group_infos;
2547	struct ext4_group_info *grinfo;
2548	struct ext4_sb_info *sbi = EXT4_SB(sb);
2549	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
 
 
 
 
 
 
 
 
 
2550
2551	if (sbi->s_group_info) {
2552		for (i = 0; i < ngroups; i++) {
 
2553			grinfo = ext4_get_group_info(sb, i);
2554#ifdef DOUBLE_CHECK
2555			kfree(grinfo->bb_bitmap);
2556#endif
2557			ext4_lock_group(sb, i);
2558			ext4_mb_cleanup_pa(grinfo);
 
 
 
2559			ext4_unlock_group(sb, i);
2560			kmem_cache_free(cachep, grinfo);
2561		}
2562		num_meta_group_infos = (ngroups +
2563				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2564			EXT4_DESC_PER_BLOCK_BITS(sb);
 
 
2565		for (i = 0; i < num_meta_group_infos; i++)
2566			kfree(sbi->s_group_info[i]);
2567		ext4_kvfree(sbi->s_group_info);
 
2568	}
 
 
 
 
2569	kfree(sbi->s_mb_offsets);
2570	kfree(sbi->s_mb_maxs);
2571	if (sbi->s_buddy_cache)
2572		iput(sbi->s_buddy_cache);
2573	if (sbi->s_mb_stats) {
2574		ext4_msg(sb, KERN_INFO,
2575		       "mballoc: %u blocks %u reqs (%u success)",
2576				atomic_read(&sbi->s_bal_allocated),
2577				atomic_read(&sbi->s_bal_reqs),
2578				atomic_read(&sbi->s_bal_success));
2579		ext4_msg(sb, KERN_INFO,
2580		      "mballoc: %u extents scanned, %u goal hits, "
2581				"%u 2^N hits, %u breaks, %u lost",
2582				atomic_read(&sbi->s_bal_ex_scanned),
 
2583				atomic_read(&sbi->s_bal_goals),
2584				atomic_read(&sbi->s_bal_2orders),
2585				atomic_read(&sbi->s_bal_breaks),
2586				atomic_read(&sbi->s_mb_lost_chunks));
2587		ext4_msg(sb, KERN_INFO,
2588		       "mballoc: %lu generated and it took %Lu",
2589				sbi->s_mb_buddies_generated,
2590				sbi->s_mb_generation_time);
2591		ext4_msg(sb, KERN_INFO,
2592		       "mballoc: %u preallocated, %u discarded",
2593				atomic_read(&sbi->s_mb_preallocated),
2594				atomic_read(&sbi->s_mb_discarded));
2595	}
2596
2597	free_percpu(sbi->s_locality_groups);
2598	if (sbi->s_proc)
2599		remove_proc_entry("mb_groups", sbi->s_proc);
2600
2601	return 0;
2602}
2603
2604static inline int ext4_issue_discard(struct super_block *sb,
2605		ext4_group_t block_group, ext4_grpblk_t block, int count)
 
2606{
2607	ext4_fsblk_t discard_block;
2608
2609	discard_block = block + ext4_group_first_block_no(sb, block_group);
 
 
2610	trace_ext4_discard_blocks(sb,
2611			(unsigned long long) discard_block, count);
2612	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
 
 
 
 
 
 
2613}
2614
2615/*
2616 * This function is called by the jbd2 layer once the commit has finished,
2617 * so we know we can free the blocks that were released with that commit.
2618 */
2619static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2620{
2621	struct super_block *sb = journal->j_private;
2622	struct ext4_buddy e4b;
2623	struct ext4_group_info *db;
2624	int err, count = 0, count2 = 0;
2625	struct ext4_free_data *entry;
2626	struct list_head *l, *ltmp;
2627
2628	list_for_each_safe(l, ltmp, &txn->t_private_list) {
2629		entry = list_entry(l, struct ext4_free_data, list);
2630
2631		mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2632			 entry->count, entry->group, entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2633
2634		if (test_opt(sb, DISCARD))
2635			ext4_issue_discard(sb, entry->group,
2636					   entry->start_blk, entry->count);
2637
2638		err = ext4_mb_load_buddy(sb, entry->group, &e4b);
2639		/* we expect to find existing buddy because it's pinned */
2640		BUG_ON(err != 0);
2641
2642		db = e4b.bd_info;
2643		/* there are blocks to put in buddy to make them really free */
2644		count += entry->count;
2645		count2++;
2646		ext4_lock_group(sb, entry->group);
2647		/* Take it out of per group rb tree */
2648		rb_erase(&entry->node, &(db->bb_free_root));
2649		mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
2650
2651		/*
2652		 * Clear the trimmed flag for the group so that the next
2653		 * ext4_trim_fs can trim it.
2654		 * If the volume is mounted with -o discard, online discard
2655		 * is supported and the free blocks will be trimmed online.
2656		 */
2657		if (!test_opt(sb, DISCARD))
2658			EXT4_MB_GRP_CLEAR_TRIMMED(db);
2659
2660		if (!db->bb_free_root.rb_node) {
2661			/* No more items in the per group rb tree
2662			 * balance refcounts from ext4_mb_free_metadata()
2663			 */
2664			page_cache_release(e4b.bd_buddy_page);
2665			page_cache_release(e4b.bd_bitmap_page);
2666		}
2667		ext4_unlock_group(sb, entry->group);
2668		kmem_cache_free(ext4_free_ext_cachep, entry);
2669		ext4_mb_unload_buddy(&e4b);
2670	}
 
 
2671
2672	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
 
2673}
2674
2675#ifdef CONFIG_EXT4_DEBUG
2676u8 mb_enable_debug __read_mostly;
2677
2678static struct dentry *debugfs_dir;
2679static struct dentry *debugfs_debug;
2680
2681static void __init ext4_create_debugfs_entry(void)
2682{
2683	debugfs_dir = debugfs_create_dir("ext4", NULL);
2684	if (debugfs_dir)
2685		debugfs_debug = debugfs_create_u8("mballoc-debug",
2686						  S_IRUGO | S_IWUSR,
2687						  debugfs_dir,
2688						  &mb_enable_debug);
2689}
2690
2691static void ext4_remove_debugfs_entry(void)
2692{
2693	debugfs_remove(debugfs_debug);
2694	debugfs_remove(debugfs_dir);
2695}
2696
2697#else
 
 
 
 
 
 
 
 
 
2698
2699static void __init ext4_create_debugfs_entry(void)
2700{
2701}
2702
2703static void ext4_remove_debugfs_entry(void)
2704{
 
 
 
 
 
 
 
 
 
2705}
2706
2707#endif
2708
2709int __init ext4_init_mballoc(void)
2710{
2711	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2712					SLAB_RECLAIM_ACCOUNT);
2713	if (ext4_pspace_cachep == NULL)
2714		return -ENOMEM;
2715
2716	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2717				    SLAB_RECLAIM_ACCOUNT);
2718	if (ext4_ac_cachep == NULL) {
2719		kmem_cache_destroy(ext4_pspace_cachep);
2720		return -ENOMEM;
2721	}
 
 
 
2722
2723	ext4_free_ext_cachep = KMEM_CACHE(ext4_free_data,
2724					  SLAB_RECLAIM_ACCOUNT);
2725	if (ext4_free_ext_cachep == NULL) {
2726		kmem_cache_destroy(ext4_pspace_cachep);
2727		kmem_cache_destroy(ext4_ac_cachep);
2728		return -ENOMEM;
2729	}
2730	ext4_create_debugfs_entry();
2731	return 0;
 
 
 
 
 
 
 
2732}
2733
2734void ext4_exit_mballoc(void)
2735{
2736	/*
2737	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2738	 * before destroying the slab cache.
2739	 */
2740	rcu_barrier();
2741	kmem_cache_destroy(ext4_pspace_cachep);
2742	kmem_cache_destroy(ext4_ac_cachep);
2743	kmem_cache_destroy(ext4_free_ext_cachep);
2744	ext4_groupinfo_destroy_slabs();
2745	ext4_remove_debugfs_entry();
2746}
2747
2748
2749/*
2750 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
2751 * Returns 0 if success or error code
2752 */
2753static noinline_for_stack int
2754ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2755				handle_t *handle, unsigned int reserv_blks)
2756{
2757	struct buffer_head *bitmap_bh = NULL;
2758	struct ext4_group_desc *gdp;
2759	struct buffer_head *gdp_bh;
2760	struct ext4_sb_info *sbi;
2761	struct super_block *sb;
2762	ext4_fsblk_t block;
2763	int err, len;
2764
2765	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2766	BUG_ON(ac->ac_b_ex.fe_len <= 0);
2767
2768	sb = ac->ac_sb;
2769	sbi = EXT4_SB(sb);
2770
2771	err = -EIO;
2772	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2773	if (!bitmap_bh)
 
 
2774		goto out_err;
 
2775
2776	err = ext4_journal_get_write_access(handle, bitmap_bh);
 
 
2777	if (err)
2778		goto out_err;
2779
2780	err = -EIO;
2781	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2782	if (!gdp)
2783		goto out_err;
2784
2785	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2786			ext4_free_blks_count(sb, gdp));
2787
2788	err = ext4_journal_get_write_access(handle, gdp_bh);
 
2789	if (err)
2790		goto out_err;
2791
2792	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2793
2794	len = ac->ac_b_ex.fe_len;
2795	if (!ext4_data_block_valid(sbi, block, len)) {
2796		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2797			   "fs metadata\n", block, block+len);
2798		/* File system mounted not to panic on error
2799		 * Fix the bitmap and repeat the block allocation
2800		 * We leak some of the blocks here.
2801		 */
2802		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2803		ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2804			      ac->ac_b_ex.fe_len);
2805		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2806		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2807		if (!err)
2808			err = -EAGAIN;
2809		goto out_err;
2810	}
2811
2812	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2813#ifdef AGGRESSIVE_CHECK
2814	{
2815		int i;
2816		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2817			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2818						bitmap_bh->b_data));
2819		}
2820	}
2821#endif
2822	ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2823		      ac->ac_b_ex.fe_len);
2824	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 
2825		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2826		ext4_free_blks_set(sb, gdp,
2827					ext4_free_blocks_after_init(sb,
2828					ac->ac_b_ex.fe_group, gdp));
2829	}
2830	len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
2831	ext4_free_blks_set(sb, gdp, len);
2832	gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
 
2833
2834	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2835	percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
2836	/*
2837	 * Now reduce the dirty block count also. Should not go negative
2838	 */
2839	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2840		/* release all the reserved blocks if non delalloc */
2841		percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
 
2842
2843	if (sbi->s_log_groups_per_flex) {
2844		ext4_group_t flex_group = ext4_flex_group(sbi,
2845							  ac->ac_b_ex.fe_group);
2846		atomic_sub(ac->ac_b_ex.fe_len,
2847			   &sbi->s_flex_groups[flex_group].free_blocks);
 
2848	}
2849
2850	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2851	if (err)
2852		goto out_err;
2853	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2854
2855out_err:
2856	ext4_mark_super_dirty(sb);
2857	brelse(bitmap_bh);
2858	return err;
2859}
2860
2861/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2862 * here we normalize request for locality group
2863 * Group request are normalized to s_mb_group_prealloc, which goes to
2864 * s_strip if we set the same via mount option.
2865 * s_mb_group_prealloc can be configured via
2866 * /sys/fs/ext4/<partition>/mb_group_prealloc
2867 *
2868 * XXX: should we try to preallocate more than the group has now?
2869 */
2870static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2871{
2872	struct super_block *sb = ac->ac_sb;
2873	struct ext4_locality_group *lg = ac->ac_lg;
2874
2875	BUG_ON(lg == NULL);
2876	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
2877	mb_debug(1, "#%u: goal %u blocks for locality group\n",
2878		current->pid, ac->ac_g_ex.fe_len);
2879}
2880
2881/*
2882 * Normalization means making request better in terms of
2883 * size and alignment
2884 */
2885static noinline_for_stack void
2886ext4_mb_normalize_request(struct ext4_allocation_context *ac,
2887				struct ext4_allocation_request *ar)
2888{
 
2889	int bsbits, max;
2890	ext4_lblk_t end;
2891	loff_t size, orig_size, start_off;
 
2892	ext4_lblk_t start;
2893	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
2894	struct ext4_prealloc_space *pa;
2895
2896	/* do normalize only data requests, metadata requests
2897	   do not need preallocation */
2898	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2899		return;
2900
2901	/* sometime caller may want exact blocks */
2902	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2903		return;
2904
2905	/* caller may indicate that preallocation isn't
2906	 * required (it's a tail, for example) */
2907	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2908		return;
2909
2910	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2911		ext4_mb_normalize_group_request(ac);
2912		return ;
2913	}
2914
2915	bsbits = ac->ac_sb->s_blocksize_bits;
2916
2917	/* first, let's learn actual file size
2918	 * given current request is allocated */
2919	size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
2920	size = size << bsbits;
2921	if (size < i_size_read(ac->ac_inode))
2922		size = i_size_read(ac->ac_inode);
2923	orig_size = size;
2924
2925	/* max size of free chunks */
2926	max = 2 << bsbits;
2927
2928#define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
2929		(req <= (size) || max <= (chunk_size))
2930
2931	/* first, try to predict filesize */
2932	/* XXX: should this table be tunable? */
2933	start_off = 0;
2934	if (size <= 16 * 1024) {
2935		size = 16 * 1024;
2936	} else if (size <= 32 * 1024) {
2937		size = 32 * 1024;
2938	} else if (size <= 64 * 1024) {
2939		size = 64 * 1024;
2940	} else if (size <= 128 * 1024) {
2941		size = 128 * 1024;
2942	} else if (size <= 256 * 1024) {
2943		size = 256 * 1024;
2944	} else if (size <= 512 * 1024) {
2945		size = 512 * 1024;
2946	} else if (size <= 1024 * 1024) {
2947		size = 1024 * 1024;
2948	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
2949		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2950						(21 - bsbits)) << 21;
2951		size = 2 * 1024 * 1024;
2952	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
2953		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2954							(22 - bsbits)) << 22;
2955		size = 4 * 1024 * 1024;
2956	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
2957					(8<<20)>>bsbits, max, 8 * 1024)) {
2958		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2959							(23 - bsbits)) << 23;
2960		size = 8 * 1024 * 1024;
2961	} else {
2962		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
2963		size	  = ac->ac_o_ex.fe_len << bsbits;
 
2964	}
2965	size = size >> bsbits;
2966	start = start_off >> bsbits;
2967
 
 
 
 
 
 
 
 
 
2968	/* don't cover already allocated blocks in selected range */
2969	if (ar->pleft && start <= ar->lleft) {
2970		size -= ar->lleft + 1 - start;
2971		start = ar->lleft + 1;
2972	}
2973	if (ar->pright && start + size - 1 >= ar->lright)
2974		size -= start + size - ar->lright;
2975
 
 
 
 
 
 
 
2976	end = start + size;
2977
2978	/* check we don't cross already preallocated blocks */
2979	rcu_read_lock();
2980	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
2981		ext4_lblk_t pa_end;
2982
2983		if (pa->pa_deleted)
2984			continue;
2985		spin_lock(&pa->pa_lock);
2986		if (pa->pa_deleted) {
2987			spin_unlock(&pa->pa_lock);
2988			continue;
2989		}
2990
2991		pa_end = pa->pa_lstart + pa->pa_len;
 
2992
2993		/* PA must not overlap original request */
2994		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
2995			ac->ac_o_ex.fe_logical < pa->pa_lstart));
2996
2997		/* skip PAs this normalized request doesn't overlap with */
2998		if (pa->pa_lstart >= end || pa_end <= start) {
2999			spin_unlock(&pa->pa_lock);
3000			continue;
3001		}
3002		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3003
3004		/* adjust start or end to be adjacent to this pa */
3005		if (pa_end <= ac->ac_o_ex.fe_logical) {
3006			BUG_ON(pa_end < start);
3007			start = pa_end;
3008		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3009			BUG_ON(pa->pa_lstart > end);
3010			end = pa->pa_lstart;
3011		}
3012		spin_unlock(&pa->pa_lock);
3013	}
3014	rcu_read_unlock();
3015	size = end - start;
3016
3017	/* XXX: extra loop to check we really don't overlap preallocations */
3018	rcu_read_lock();
3019	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3020		ext4_lblk_t pa_end;
 
3021		spin_lock(&pa->pa_lock);
3022		if (pa->pa_deleted == 0) {
3023			pa_end = pa->pa_lstart + pa->pa_len;
 
3024			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3025		}
3026		spin_unlock(&pa->pa_lock);
3027	}
3028	rcu_read_unlock();
3029
3030	if (start + size <= ac->ac_o_ex.fe_logical &&
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3031			start > ac->ac_o_ex.fe_logical) {
3032		ext4_msg(ac->ac_sb, KERN_ERR,
3033			 "start %lu, size %lu, fe_logical %lu",
3034			 (unsigned long) start, (unsigned long) size,
3035			 (unsigned long) ac->ac_o_ex.fe_logical);
 
3036	}
3037	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3038			start > ac->ac_o_ex.fe_logical);
3039	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3040
3041	/* now prepare goal request */
3042
3043	/* XXX: is it better to align blocks WRT to logical
3044	 * placement or satisfy big request as is */
3045	ac->ac_g_ex.fe_logical = start;
3046	ac->ac_g_ex.fe_len = size;
3047
3048	/* define goal start in order to merge */
3049	if (ar->pright && (ar->lright == (start + size))) {
3050		/* merge to the right */
3051		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3052						&ac->ac_f_ex.fe_group,
3053						&ac->ac_f_ex.fe_start);
3054		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3055	}
3056	if (ar->pleft && (ar->lleft + 1 == start)) {
3057		/* merge to the left */
3058		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3059						&ac->ac_f_ex.fe_group,
3060						&ac->ac_f_ex.fe_start);
3061		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3062	}
3063
3064	mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
3065		(unsigned) orig_size, (unsigned) start);
3066}
3067
3068static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3069{
3070	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3071
3072	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3073		atomic_inc(&sbi->s_bal_reqs);
3074		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3075		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3076			atomic_inc(&sbi->s_bal_success);
3077		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
 
3078		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3079				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3080			atomic_inc(&sbi->s_bal_goals);
3081		if (ac->ac_found > sbi->s_mb_max_to_scan)
3082			atomic_inc(&sbi->s_bal_breaks);
3083	}
3084
3085	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3086		trace_ext4_mballoc_alloc(ac);
3087	else
3088		trace_ext4_mballoc_prealloc(ac);
3089}
3090
3091/*
3092 * Called on failure; free up any blocks from the inode PA for this
3093 * context.  We don't need this for MB_GROUP_PA because we only change
3094 * pa_free in ext4_mb_release_context(), but on failure, we've already
3095 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3096 */
3097static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3098{
3099	struct ext4_prealloc_space *pa = ac->ac_pa;
3100	int len;
 
3101
3102	if (pa && pa->pa_type == MB_INODE_PA) {
3103		len = ac->ac_b_ex.fe_len;
3104		pa->pa_free += len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3105	}
3106
 
3107}
3108
3109/*
3110 * use blocks preallocated to inode
3111 */
3112static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3113				struct ext4_prealloc_space *pa)
3114{
 
3115	ext4_fsblk_t start;
3116	ext4_fsblk_t end;
3117	int len;
3118
3119	/* found preallocated blocks, use them */
3120	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3121	end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3122	len = end - start;
 
3123	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3124					&ac->ac_b_ex.fe_start);
3125	ac->ac_b_ex.fe_len = len;
3126	ac->ac_status = AC_STATUS_FOUND;
3127	ac->ac_pa = pa;
3128
3129	BUG_ON(start < pa->pa_pstart);
3130	BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3131	BUG_ON(pa->pa_free < len);
3132	pa->pa_free -= len;
3133
3134	mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
3135}
3136
3137/*
3138 * use blocks preallocated to locality group
3139 */
3140static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3141				struct ext4_prealloc_space *pa)
3142{
3143	unsigned int len = ac->ac_o_ex.fe_len;
3144
3145	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3146					&ac->ac_b_ex.fe_group,
3147					&ac->ac_b_ex.fe_start);
3148	ac->ac_b_ex.fe_len = len;
3149	ac->ac_status = AC_STATUS_FOUND;
3150	ac->ac_pa = pa;
3151
3152	/* we don't correct pa_pstart or pa_plen here to avoid
3153	 * possible race when the group is being loaded concurrently
3154	 * instead we correct pa later, after blocks are marked
3155	 * in on-disk bitmap -- see ext4_mb_release_context()
3156	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3157	 */
3158	mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
 
3159}
3160
3161/*
3162 * Return the prealloc space that have minimal distance
3163 * from the goal block. @cpa is the prealloc
3164 * space that is having currently known minimal distance
3165 * from the goal block.
3166 */
3167static struct ext4_prealloc_space *
3168ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3169			struct ext4_prealloc_space *pa,
3170			struct ext4_prealloc_space *cpa)
3171{
3172	ext4_fsblk_t cur_distance, new_distance;
3173
3174	if (cpa == NULL) {
3175		atomic_inc(&pa->pa_count);
3176		return pa;
3177	}
3178	cur_distance = abs(goal_block - cpa->pa_pstart);
3179	new_distance = abs(goal_block - pa->pa_pstart);
3180
3181	if (cur_distance <= new_distance)
3182		return cpa;
3183
3184	/* drop the previous reference */
3185	atomic_dec(&cpa->pa_count);
3186	atomic_inc(&pa->pa_count);
3187	return pa;
3188}
3189
3190/*
3191 * search goal blocks in preallocated space
3192 */
3193static noinline_for_stack int
3194ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3195{
 
3196	int order, i;
3197	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3198	struct ext4_locality_group *lg;
3199	struct ext4_prealloc_space *pa, *cpa = NULL;
3200	ext4_fsblk_t goal_block;
3201
3202	/* only data can be preallocated */
3203	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3204		return 0;
3205
3206	/* first, try per-file preallocation */
3207	rcu_read_lock();
3208	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3209
3210		/* all fields in this condition don't change,
3211		 * so we can skip locking for them */
3212		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3213			ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
 
3214			continue;
3215
3216		/* non-extent files can't have physical blocks past 2^32 */
3217		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3218			pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS)
 
3219			continue;
3220
3221		/* found preallocated blocks, use them */
3222		spin_lock(&pa->pa_lock);
3223		if (pa->pa_deleted == 0 && pa->pa_free) {
3224			atomic_inc(&pa->pa_count);
3225			ext4_mb_use_inode_pa(ac, pa);
3226			spin_unlock(&pa->pa_lock);
3227			ac->ac_criteria = 10;
3228			rcu_read_unlock();
3229			return 1;
3230		}
3231		spin_unlock(&pa->pa_lock);
3232	}
3233	rcu_read_unlock();
3234
3235	/* can we use group allocation? */
3236	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3237		return 0;
3238
3239	/* inode may have no locality group for some reason */
3240	lg = ac->ac_lg;
3241	if (lg == NULL)
3242		return 0;
3243	order  = fls(ac->ac_o_ex.fe_len) - 1;
3244	if (order > PREALLOC_TB_SIZE - 1)
3245		/* The max size of hash table is PREALLOC_TB_SIZE */
3246		order = PREALLOC_TB_SIZE - 1;
3247
3248	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3249	/*
3250	 * search for the prealloc space that is having
3251	 * minimal distance from the goal block.
3252	 */
3253	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3254		rcu_read_lock();
3255		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3256					pa_inode_list) {
3257			spin_lock(&pa->pa_lock);
3258			if (pa->pa_deleted == 0 &&
3259					pa->pa_free >= ac->ac_o_ex.fe_len) {
3260
3261				cpa = ext4_mb_check_group_pa(goal_block,
3262								pa, cpa);
3263			}
3264			spin_unlock(&pa->pa_lock);
3265		}
3266		rcu_read_unlock();
3267	}
3268	if (cpa) {
3269		ext4_mb_use_group_pa(ac, cpa);
3270		ac->ac_criteria = 20;
3271		return 1;
3272	}
3273	return 0;
3274}
3275
3276/*
3277 * the function goes through all block freed in the group
3278 * but not yet committed and marks them used in in-core bitmap.
3279 * buddy must be generated from this bitmap
3280 * Need to be called with the ext4 group lock held
3281 */
3282static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3283						ext4_group_t group)
3284{
3285	struct rb_node *n;
3286	struct ext4_group_info *grp;
3287	struct ext4_free_data *entry;
3288
3289	grp = ext4_get_group_info(sb, group);
3290	n = rb_first(&(grp->bb_free_root));
3291
3292	while (n) {
3293		entry = rb_entry(n, struct ext4_free_data, node);
3294		ext4_set_bits(bitmap, entry->start_blk, entry->count);
3295		n = rb_next(n);
3296	}
3297	return;
3298}
3299
3300/*
3301 * the function goes through all preallocation in this group and marks them
3302 * used in in-core bitmap. buddy must be generated from this bitmap
3303 * Need to be called with ext4 group lock held
3304 */
3305static noinline_for_stack
3306void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3307					ext4_group_t group)
3308{
3309	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3310	struct ext4_prealloc_space *pa;
3311	struct list_head *cur;
3312	ext4_group_t groupnr;
3313	ext4_grpblk_t start;
3314	int preallocated = 0;
3315	int count = 0;
3316	int len;
3317
3318	/* all form of preallocation discards first load group,
3319	 * so the only competing code is preallocation use.
3320	 * we don't need any locking here
3321	 * notice we do NOT ignore preallocations with pa_deleted
3322	 * otherwise we could leave used blocks available for
3323	 * allocation in buddy when concurrent ext4_mb_put_pa()
3324	 * is dropping preallocation
3325	 */
3326	list_for_each(cur, &grp->bb_prealloc_list) {
3327		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3328		spin_lock(&pa->pa_lock);
3329		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3330					     &groupnr, &start);
3331		len = pa->pa_len;
3332		spin_unlock(&pa->pa_lock);
3333		if (unlikely(len == 0))
3334			continue;
3335		BUG_ON(groupnr != group);
3336		ext4_set_bits(bitmap, start, len);
3337		preallocated += len;
3338		count++;
3339	}
3340	mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3341}
3342
3343static void ext4_mb_pa_callback(struct rcu_head *head)
3344{
3345	struct ext4_prealloc_space *pa;
3346	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
 
 
 
3347	kmem_cache_free(ext4_pspace_cachep, pa);
3348}
3349
3350/*
3351 * drops a reference to preallocated space descriptor
3352 * if this was the last reference and the space is consumed
3353 */
3354static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3355			struct super_block *sb, struct ext4_prealloc_space *pa)
3356{
3357	ext4_group_t grp;
3358	ext4_fsblk_t grp_blk;
3359
3360	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3361		return;
3362
3363	/* in this short window concurrent discard can set pa_deleted */
3364	spin_lock(&pa->pa_lock);
 
 
 
 
 
3365	if (pa->pa_deleted == 1) {
3366		spin_unlock(&pa->pa_lock);
3367		return;
3368	}
3369
3370	pa->pa_deleted = 1;
3371	spin_unlock(&pa->pa_lock);
3372
3373	grp_blk = pa->pa_pstart;
3374	/*
3375	 * If doing group-based preallocation, pa_pstart may be in the
3376	 * next group when pa is used up
3377	 */
3378	if (pa->pa_type == MB_GROUP_PA)
3379		grp_blk--;
3380
3381	ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
3382
3383	/*
3384	 * possible race:
3385	 *
3386	 *  P1 (buddy init)			P2 (regular allocation)
3387	 *					find block B in PA
3388	 *  copy on-disk bitmap to buddy
3389	 *  					mark B in on-disk bitmap
3390	 *					drop PA from group
3391	 *  mark all PAs in buddy
3392	 *
3393	 * thus, P1 initializes buddy with B available. to prevent this
3394	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3395	 * against that pair
3396	 */
3397	ext4_lock_group(sb, grp);
3398	list_del(&pa->pa_group_list);
3399	ext4_unlock_group(sb, grp);
3400
3401	spin_lock(pa->pa_obj_lock);
3402	list_del_rcu(&pa->pa_inode_list);
3403	spin_unlock(pa->pa_obj_lock);
3404
3405	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3406}
3407
3408/*
3409 * creates new preallocated space for given inode
3410 */
3411static noinline_for_stack int
3412ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3413{
3414	struct super_block *sb = ac->ac_sb;
 
3415	struct ext4_prealloc_space *pa;
3416	struct ext4_group_info *grp;
3417	struct ext4_inode_info *ei;
3418
3419	/* preallocate only when found space is larger then requested */
3420	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3421	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3422	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
 
3423
3424	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3425	if (pa == NULL)
3426		return -ENOMEM;
3427
3428	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3429		int winl;
3430		int wins;
3431		int win;
3432		int offs;
3433
3434		/* we can't allocate as much as normalizer wants.
3435		 * so, found space must get proper lstart
3436		 * to cover original request */
3437		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3438		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3439
3440		/* we're limited by original request in that
3441		 * logical block must be covered any way
3442		 * winl is window we can move our chunk within */
3443		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3444
3445		/* also, we should cover whole original request */
3446		wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3447
3448		/* the smallest one defines real window */
3449		win = min(winl, wins);
3450
3451		offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
 
3452		if (offs && offs < win)
3453			win = offs;
3454
3455		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
 
3456		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3457		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3458	}
3459
3460	/* preallocation can change ac_b_ex, thus we store actually
3461	 * allocated blocks for history */
3462	ac->ac_f_ex = ac->ac_b_ex;
3463
3464	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3465	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3466	pa->pa_len = ac->ac_b_ex.fe_len;
3467	pa->pa_free = pa->pa_len;
3468	atomic_set(&pa->pa_count, 1);
3469	spin_lock_init(&pa->pa_lock);
3470	INIT_LIST_HEAD(&pa->pa_inode_list);
3471	INIT_LIST_HEAD(&pa->pa_group_list);
3472	pa->pa_deleted = 0;
3473	pa->pa_type = MB_INODE_PA;
3474
3475	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
3476			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3477	trace_ext4_mb_new_inode_pa(ac, pa);
3478
3479	ext4_mb_use_inode_pa(ac, pa);
3480	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3481
3482	ei = EXT4_I(ac->ac_inode);
3483	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3484
3485	pa->pa_obj_lock = &ei->i_prealloc_lock;
3486	pa->pa_inode = ac->ac_inode;
3487
3488	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3489	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3490	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3491
3492	spin_lock(pa->pa_obj_lock);
3493	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3494	spin_unlock(pa->pa_obj_lock);
3495
3496	return 0;
3497}
3498
3499/*
3500 * creates new preallocated space for locality group inodes belongs to
3501 */
3502static noinline_for_stack int
3503ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3504{
3505	struct super_block *sb = ac->ac_sb;
3506	struct ext4_locality_group *lg;
3507	struct ext4_prealloc_space *pa;
3508	struct ext4_group_info *grp;
3509
3510	/* preallocate only when found space is larger then requested */
3511	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3512	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3513	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
 
3514
3515	BUG_ON(ext4_pspace_cachep == NULL);
3516	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3517	if (pa == NULL)
3518		return -ENOMEM;
3519
3520	/* preallocation can change ac_b_ex, thus we store actually
3521	 * allocated blocks for history */
3522	ac->ac_f_ex = ac->ac_b_ex;
3523
3524	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3525	pa->pa_lstart = pa->pa_pstart;
3526	pa->pa_len = ac->ac_b_ex.fe_len;
3527	pa->pa_free = pa->pa_len;
3528	atomic_set(&pa->pa_count, 1);
3529	spin_lock_init(&pa->pa_lock);
3530	INIT_LIST_HEAD(&pa->pa_inode_list);
3531	INIT_LIST_HEAD(&pa->pa_group_list);
3532	pa->pa_deleted = 0;
3533	pa->pa_type = MB_GROUP_PA;
3534
3535	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
3536			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3537	trace_ext4_mb_new_group_pa(ac, pa);
3538
3539	ext4_mb_use_group_pa(ac, pa);
3540	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3541
3542	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3543	lg = ac->ac_lg;
3544	BUG_ON(lg == NULL);
3545
3546	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3547	pa->pa_inode = NULL;
3548
3549	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3550	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3551	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3552
3553	/*
3554	 * We will later add the new pa to the right bucket
3555	 * after updating the pa_free in ext4_mb_release_context
3556	 */
3557	return 0;
3558}
3559
3560static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3561{
3562	int err;
3563
3564	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3565		err = ext4_mb_new_group_pa(ac);
3566	else
3567		err = ext4_mb_new_inode_pa(ac);
3568	return err;
3569}
3570
3571/*
3572 * finds all unused blocks in on-disk bitmap, frees them in
3573 * in-core bitmap and buddy.
3574 * @pa must be unlinked from inode and group lists, so that
3575 * nobody else can find/use it.
3576 * the caller MUST hold group/inode locks.
3577 * TODO: optimize the case when there are no in-core structures yet
3578 */
3579static noinline_for_stack int
3580ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3581			struct ext4_prealloc_space *pa)
3582{
3583	struct super_block *sb = e4b->bd_sb;
3584	struct ext4_sb_info *sbi = EXT4_SB(sb);
3585	unsigned int end;
3586	unsigned int next;
3587	ext4_group_t group;
3588	ext4_grpblk_t bit;
3589	unsigned long long grp_blk_start;
3590	int err = 0;
3591	int free = 0;
3592
3593	BUG_ON(pa->pa_deleted == 0);
3594	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3595	grp_blk_start = pa->pa_pstart - bit;
3596	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3597	end = bit + pa->pa_len;
3598
3599	while (bit < end) {
3600		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3601		if (bit >= end)
3602			break;
3603		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3604		mb_debug(1, "    free preallocated %u/%u in group %u\n",
3605			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3606			 (unsigned) next - bit, (unsigned) group);
3607		free += next - bit;
3608
3609		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
3610		trace_ext4_mb_release_inode_pa(pa, grp_blk_start + bit,
 
3611					       next - bit);
3612		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3613		bit = next + 1;
3614	}
3615	if (free != pa->pa_free) {
3616		ext4_msg(e4b->bd_sb, KERN_CRIT,
3617			 "pa %p: logic %lu, phys. %lu, len %lu",
3618			 pa, (unsigned long) pa->pa_lstart,
3619			 (unsigned long) pa->pa_pstart,
3620			 (unsigned long) pa->pa_len);
3621		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
3622					free, pa->pa_free);
3623		/*
3624		 * pa is already deleted so we use the value obtained
3625		 * from the bitmap and continue.
3626		 */
3627	}
3628	atomic_add(free, &sbi->s_mb_discarded);
3629
3630	return err;
3631}
3632
3633static noinline_for_stack int
3634ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3635				struct ext4_prealloc_space *pa)
3636{
3637	struct super_block *sb = e4b->bd_sb;
3638	ext4_group_t group;
3639	ext4_grpblk_t bit;
3640
3641	trace_ext4_mb_release_group_pa(pa);
3642	BUG_ON(pa->pa_deleted == 0);
3643	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3644	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3645	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3646	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3647	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
3648
3649	return 0;
3650}
3651
3652/*
3653 * releases all preallocations in given group
3654 *
3655 * first, we need to decide discard policy:
3656 * - when do we discard
3657 *   1) ENOSPC
3658 * - how many do we discard
3659 *   1) how many requested
3660 */
3661static noinline_for_stack int
3662ext4_mb_discard_group_preallocations(struct super_block *sb,
3663					ext4_group_t group, int needed)
3664{
3665	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3666	struct buffer_head *bitmap_bh = NULL;
3667	struct ext4_prealloc_space *pa, *tmp;
3668	struct list_head list;
3669	struct ext4_buddy e4b;
3670	int err;
3671	int busy = 0;
3672	int free = 0;
3673
3674	mb_debug(1, "discard preallocation for group %u\n", group);
3675
3676	if (list_empty(&grp->bb_prealloc_list))
3677		return 0;
3678
3679	bitmap_bh = ext4_read_block_bitmap(sb, group);
3680	if (bitmap_bh == NULL) {
3681		ext4_error(sb, "Error reading block bitmap for %u", group);
3682		return 0;
 
 
 
3683	}
3684
3685	err = ext4_mb_load_buddy(sb, group, &e4b);
3686	if (err) {
3687		ext4_error(sb, "Error loading buddy information for %u", group);
 
3688		put_bh(bitmap_bh);
3689		return 0;
3690	}
3691
3692	if (needed == 0)
3693		needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3694
3695	INIT_LIST_HEAD(&list);
3696repeat:
3697	ext4_lock_group(sb, group);
3698	list_for_each_entry_safe(pa, tmp,
3699				&grp->bb_prealloc_list, pa_group_list) {
3700		spin_lock(&pa->pa_lock);
3701		if (atomic_read(&pa->pa_count)) {
3702			spin_unlock(&pa->pa_lock);
3703			busy = 1;
3704			continue;
3705		}
3706		if (pa->pa_deleted) {
3707			spin_unlock(&pa->pa_lock);
3708			continue;
3709		}
3710
3711		/* seems this one can be freed ... */
3712		pa->pa_deleted = 1;
 
 
 
3713
3714		/* we can trust pa_free ... */
3715		free += pa->pa_free;
3716
3717		spin_unlock(&pa->pa_lock);
3718
3719		list_del(&pa->pa_group_list);
3720		list_add(&pa->u.pa_tmp_list, &list);
3721	}
3722
3723	/* if we still need more blocks and some PAs were used, try again */
3724	if (free < needed && busy) {
3725		busy = 0;
3726		ext4_unlock_group(sb, group);
3727		/*
3728		 * Yield the CPU here so that we don't get soft lockup
3729		 * in non preempt case.
3730		 */
3731		yield();
3732		goto repeat;
3733	}
3734
3735	/* found anything to free? */
3736	if (list_empty(&list)) {
3737		BUG_ON(free != 0);
3738		goto out;
3739	}
3740
3741	/* now free all selected PAs */
3742	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3743
3744		/* remove from object (inode or locality group) */
3745		spin_lock(pa->pa_obj_lock);
3746		list_del_rcu(&pa->pa_inode_list);
3747		spin_unlock(pa->pa_obj_lock);
3748
3749		if (pa->pa_type == MB_GROUP_PA)
3750			ext4_mb_release_group_pa(&e4b, pa);
3751		else
3752			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3753
3754		list_del(&pa->u.pa_tmp_list);
3755		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3756	}
3757
3758out:
3759	ext4_unlock_group(sb, group);
3760	ext4_mb_unload_buddy(&e4b);
3761	put_bh(bitmap_bh);
 
 
 
3762	return free;
3763}
3764
3765/*
3766 * releases all non-used preallocated blocks for given inode
3767 *
3768 * It's important to discard preallocations under i_data_sem
3769 * We don't want another block to be served from the prealloc
3770 * space when we are discarding the inode prealloc space.
3771 *
3772 * FIXME!! Make sure it is valid at all the call sites
3773 */
3774void ext4_discard_preallocations(struct inode *inode)
3775{
3776	struct ext4_inode_info *ei = EXT4_I(inode);
3777	struct super_block *sb = inode->i_sb;
3778	struct buffer_head *bitmap_bh = NULL;
3779	struct ext4_prealloc_space *pa, *tmp;
3780	ext4_group_t group = 0;
3781	struct list_head list;
3782	struct ext4_buddy e4b;
3783	int err;
3784
3785	if (!S_ISREG(inode->i_mode)) {
3786		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3787		return;
3788	}
3789
3790	mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
3791	trace_ext4_discard_preallocations(inode);
 
 
 
 
 
3792
3793	INIT_LIST_HEAD(&list);
3794
 
 
 
3795repeat:
3796	/* first, collect all pa's in the inode */
3797	spin_lock(&ei->i_prealloc_lock);
3798	while (!list_empty(&ei->i_prealloc_list)) {
3799		pa = list_entry(ei->i_prealloc_list.next,
3800				struct ext4_prealloc_space, pa_inode_list);
3801		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3802		spin_lock(&pa->pa_lock);
3803		if (atomic_read(&pa->pa_count)) {
3804			/* this shouldn't happen often - nobody should
3805			 * use preallocation while we're discarding it */
3806			spin_unlock(&pa->pa_lock);
3807			spin_unlock(&ei->i_prealloc_lock);
3808			ext4_msg(sb, KERN_ERR,
3809				 "uh-oh! used pa while discarding");
3810			WARN_ON(1);
3811			schedule_timeout_uninterruptible(HZ);
3812			goto repeat;
3813
3814		}
3815		if (pa->pa_deleted == 0) {
3816			pa->pa_deleted = 1;
3817			spin_unlock(&pa->pa_lock);
3818			list_del_rcu(&pa->pa_inode_list);
3819			list_add(&pa->u.pa_tmp_list, &list);
 
3820			continue;
3821		}
3822
3823		/* someone is deleting pa right now */
3824		spin_unlock(&pa->pa_lock);
3825		spin_unlock(&ei->i_prealloc_lock);
3826
3827		/* we have to wait here because pa_deleted
3828		 * doesn't mean pa is already unlinked from
3829		 * the list. as we might be called from
3830		 * ->clear_inode() the inode will get freed
3831		 * and concurrent thread which is unlinking
3832		 * pa from inode's list may access already
3833		 * freed memory, bad-bad-bad */
3834
3835		/* XXX: if this happens too often, we can
3836		 * add a flag to force wait only in case
3837		 * of ->clear_inode(), but not in case of
3838		 * regular truncate */
3839		schedule_timeout_uninterruptible(HZ);
3840		goto repeat;
3841	}
3842	spin_unlock(&ei->i_prealloc_lock);
3843
3844	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3845		BUG_ON(pa->pa_type != MB_INODE_PA);
3846		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3847
3848		err = ext4_mb_load_buddy(sb, group, &e4b);
 
3849		if (err) {
3850			ext4_error(sb, "Error loading buddy information for %u",
3851					group);
3852			continue;
3853		}
3854
3855		bitmap_bh = ext4_read_block_bitmap(sb, group);
3856		if (bitmap_bh == NULL) {
3857			ext4_error(sb, "Error reading block bitmap for %u",
3858					group);
 
3859			ext4_mb_unload_buddy(&e4b);
3860			continue;
3861		}
3862
3863		ext4_lock_group(sb, group);
3864		list_del(&pa->pa_group_list);
3865		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3866		ext4_unlock_group(sb, group);
3867
3868		ext4_mb_unload_buddy(&e4b);
3869		put_bh(bitmap_bh);
3870
3871		list_del(&pa->u.pa_tmp_list);
3872		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3873	}
3874}
3875
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3876#ifdef CONFIG_EXT4_DEBUG
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3877static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3878{
3879	struct super_block *sb = ac->ac_sb;
3880	ext4_group_t ngroups, i;
3881
3882	if (!mb_enable_debug ||
3883	    (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
3884		return;
3885
3886	ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: Can't allocate:"
3887			" Allocation context details:");
3888	ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: status %d flags %d",
3889			ac->ac_status, ac->ac_flags);
3890	ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: orig %lu/%lu/%lu@%lu, "
3891		 	"goal %lu/%lu/%lu@%lu, "
3892			"best %lu/%lu/%lu@%lu cr %d",
3893			(unsigned long)ac->ac_o_ex.fe_group,
3894			(unsigned long)ac->ac_o_ex.fe_start,
3895			(unsigned long)ac->ac_o_ex.fe_len,
3896			(unsigned long)ac->ac_o_ex.fe_logical,
3897			(unsigned long)ac->ac_g_ex.fe_group,
3898			(unsigned long)ac->ac_g_ex.fe_start,
3899			(unsigned long)ac->ac_g_ex.fe_len,
3900			(unsigned long)ac->ac_g_ex.fe_logical,
3901			(unsigned long)ac->ac_b_ex.fe_group,
3902			(unsigned long)ac->ac_b_ex.fe_start,
3903			(unsigned long)ac->ac_b_ex.fe_len,
3904			(unsigned long)ac->ac_b_ex.fe_logical,
3905			(int)ac->ac_criteria);
3906	ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: %lu scanned, %d found",
3907		 ac->ac_ex_scanned, ac->ac_found);
3908	ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: groups: ");
3909	ngroups = ext4_get_groups_count(sb);
3910	for (i = 0; i < ngroups; i++) {
3911		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3912		struct ext4_prealloc_space *pa;
3913		ext4_grpblk_t start;
3914		struct list_head *cur;
3915		ext4_lock_group(sb, i);
3916		list_for_each(cur, &grp->bb_prealloc_list) {
3917			pa = list_entry(cur, struct ext4_prealloc_space,
3918					pa_group_list);
3919			spin_lock(&pa->pa_lock);
3920			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3921						     NULL, &start);
3922			spin_unlock(&pa->pa_lock);
3923			printk(KERN_ERR "PA:%u:%d:%u \n", i,
3924			       start, pa->pa_len);
3925		}
3926		ext4_unlock_group(sb, i);
3927
3928		if (grp->bb_free == 0)
3929			continue;
3930		printk(KERN_ERR "%u: %d/%d \n",
3931		       i, grp->bb_free, grp->bb_fragments);
3932	}
3933	printk(KERN_ERR "\n");
3934}
3935#else
 
 
 
 
3936static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3937{
 
3938	return;
3939}
3940#endif
3941
3942/*
3943 * We use locality group preallocation for small size file. The size of the
3944 * file is determined by the current size or the resulting size after
3945 * allocation which ever is larger
3946 *
3947 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
3948 */
3949static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3950{
3951	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3952	int bsbits = ac->ac_sb->s_blocksize_bits;
3953	loff_t size, isize;
 
3954
3955	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3956		return;
3957
3958	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3959		return;
3960
3961	size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
 
 
3962	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
3963		>> bsbits;
3964
3965	if ((size == isize) &&
3966	    !ext4_fs_is_busy(sbi) &&
3967	    (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
3968		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3969		return;
3970	}
3971
3972	/* don't use group allocation for large files */
3973	size = max(size, isize);
3974	if (size > sbi->s_mb_stream_request) {
3975		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
 
 
 
 
 
 
 
3976		return;
3977	}
3978
3979	BUG_ON(ac->ac_lg != NULL);
3980	/*
3981	 * locality group prealloc space are per cpu. The reason for having
3982	 * per cpu locality group is to reduce the contention between block
3983	 * request from multiple CPUs.
3984	 */
3985	ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
3986
3987	/* we're going to use group allocation */
3988	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
3989
3990	/* serialize all allocations in the group */
3991	mutex_lock(&ac->ac_lg->lg_mutex);
3992}
3993
3994static noinline_for_stack int
3995ext4_mb_initialize_context(struct ext4_allocation_context *ac,
3996				struct ext4_allocation_request *ar)
3997{
3998	struct super_block *sb = ar->inode->i_sb;
3999	struct ext4_sb_info *sbi = EXT4_SB(sb);
4000	struct ext4_super_block *es = sbi->s_es;
4001	ext4_group_t group;
4002	unsigned int len;
4003	ext4_fsblk_t goal;
4004	ext4_grpblk_t block;
4005
4006	/* we can't allocate > group size */
4007	len = ar->len;
4008
4009	/* just a dirty hack to filter too big requests  */
4010	if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
4011		len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
4012
4013	/* start searching from the goal */
4014	goal = ar->goal;
4015	if (goal < le32_to_cpu(es->s_first_data_block) ||
4016			goal >= ext4_blocks_count(es))
4017		goal = le32_to_cpu(es->s_first_data_block);
4018	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4019
4020	/* set up allocation goals */
4021	memset(ac, 0, sizeof(struct ext4_allocation_context));
4022	ac->ac_b_ex.fe_logical = ar->logical;
4023	ac->ac_status = AC_STATUS_CONTINUE;
4024	ac->ac_sb = sb;
4025	ac->ac_inode = ar->inode;
4026	ac->ac_o_ex.fe_logical = ar->logical;
4027	ac->ac_o_ex.fe_group = group;
4028	ac->ac_o_ex.fe_start = block;
4029	ac->ac_o_ex.fe_len = len;
4030	ac->ac_g_ex.fe_logical = ar->logical;
4031	ac->ac_g_ex.fe_group = group;
4032	ac->ac_g_ex.fe_start = block;
4033	ac->ac_g_ex.fe_len = len;
4034	ac->ac_flags = ar->flags;
4035
4036	/* we have to define context: we'll we work with a file or
4037	 * locality group. this is a policy, actually */
4038	ext4_mb_group_or_file(ac);
4039
4040	mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4041			"left: %u/%u, right %u/%u to %swritable\n",
4042			(unsigned) ar->len, (unsigned) ar->logical,
4043			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4044			(unsigned) ar->lleft, (unsigned) ar->pleft,
4045			(unsigned) ar->lright, (unsigned) ar->pright,
4046			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4047	return 0;
4048
4049}
4050
4051static noinline_for_stack void
4052ext4_mb_discard_lg_preallocations(struct super_block *sb,
4053					struct ext4_locality_group *lg,
4054					int order, int total_entries)
4055{
4056	ext4_group_t group = 0;
4057	struct ext4_buddy e4b;
4058	struct list_head discard_list;
4059	struct ext4_prealloc_space *pa, *tmp;
4060
4061	mb_debug(1, "discard locality group preallocation\n");
4062
4063	INIT_LIST_HEAD(&discard_list);
4064
4065	spin_lock(&lg->lg_prealloc_lock);
4066	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4067						pa_inode_list) {
 
4068		spin_lock(&pa->pa_lock);
4069		if (atomic_read(&pa->pa_count)) {
4070			/*
4071			 * This is the pa that we just used
4072			 * for block allocation. So don't
4073			 * free that
4074			 */
4075			spin_unlock(&pa->pa_lock);
4076			continue;
4077		}
4078		if (pa->pa_deleted) {
4079			spin_unlock(&pa->pa_lock);
4080			continue;
4081		}
4082		/* only lg prealloc space */
4083		BUG_ON(pa->pa_type != MB_GROUP_PA);
4084
4085		/* seems this one can be freed ... */
4086		pa->pa_deleted = 1;
4087		spin_unlock(&pa->pa_lock);
4088
4089		list_del_rcu(&pa->pa_inode_list);
4090		list_add(&pa->u.pa_tmp_list, &discard_list);
4091
4092		total_entries--;
4093		if (total_entries <= 5) {
4094			/*
4095			 * we want to keep only 5 entries
4096			 * allowing it to grow to 8. This
4097			 * mak sure we don't call discard
4098			 * soon for this list.
4099			 */
4100			break;
4101		}
4102	}
4103	spin_unlock(&lg->lg_prealloc_lock);
4104
4105	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
 
4106
4107		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4108		if (ext4_mb_load_buddy(sb, group, &e4b)) {
4109			ext4_error(sb, "Error loading buddy information for %u",
4110					group);
 
 
4111			continue;
4112		}
4113		ext4_lock_group(sb, group);
4114		list_del(&pa->pa_group_list);
4115		ext4_mb_release_group_pa(&e4b, pa);
4116		ext4_unlock_group(sb, group);
4117
4118		ext4_mb_unload_buddy(&e4b);
4119		list_del(&pa->u.pa_tmp_list);
4120		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4121	}
4122}
4123
4124/*
4125 * We have incremented pa_count. So it cannot be freed at this
4126 * point. Also we hold lg_mutex. So no parallel allocation is
4127 * possible from this lg. That means pa_free cannot be updated.
4128 *
4129 * A parallel ext4_mb_discard_group_preallocations is possible.
4130 * which can cause the lg_prealloc_list to be updated.
4131 */
4132
4133static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4134{
4135	int order, added = 0, lg_prealloc_count = 1;
4136	struct super_block *sb = ac->ac_sb;
4137	struct ext4_locality_group *lg = ac->ac_lg;
4138	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4139
4140	order = fls(pa->pa_free) - 1;
4141	if (order > PREALLOC_TB_SIZE - 1)
4142		/* The max size of hash table is PREALLOC_TB_SIZE */
4143		order = PREALLOC_TB_SIZE - 1;
4144	/* Add the prealloc space to lg */
4145	rcu_read_lock();
4146	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4147						pa_inode_list) {
 
4148		spin_lock(&tmp_pa->pa_lock);
4149		if (tmp_pa->pa_deleted) {
4150			spin_unlock(&tmp_pa->pa_lock);
4151			continue;
4152		}
4153		if (!added && pa->pa_free < tmp_pa->pa_free) {
4154			/* Add to the tail of the previous entry */
4155			list_add_tail_rcu(&pa->pa_inode_list,
4156						&tmp_pa->pa_inode_list);
4157			added = 1;
4158			/*
4159			 * we want to count the total
4160			 * number of entries in the list
4161			 */
4162		}
4163		spin_unlock(&tmp_pa->pa_lock);
4164		lg_prealloc_count++;
4165	}
4166	if (!added)
4167		list_add_tail_rcu(&pa->pa_inode_list,
4168					&lg->lg_prealloc_list[order]);
4169	rcu_read_unlock();
4170
4171	/* Now trim the list to be not more than 8 elements */
4172	if (lg_prealloc_count > 8) {
4173		ext4_mb_discard_lg_preallocations(sb, lg,
4174						order, lg_prealloc_count);
4175		return;
4176	}
4177	return ;
4178}
4179
4180/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4181 * release all resource we used in allocation
4182 */
4183static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4184{
 
 
 
4185	struct ext4_prealloc_space *pa = ac->ac_pa;
4186	if (pa) {
4187		if (pa->pa_type == MB_GROUP_PA) {
4188			/* see comment in ext4_mb_use_group_pa() */
4189			spin_lock(&pa->pa_lock);
4190			pa->pa_pstart += ac->ac_b_ex.fe_len;
4191			pa->pa_lstart += ac->ac_b_ex.fe_len;
4192			pa->pa_free -= ac->ac_b_ex.fe_len;
4193			pa->pa_len -= ac->ac_b_ex.fe_len;
4194			spin_unlock(&pa->pa_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
4195		}
4196	}
4197	if (pa) {
4198		/*
4199		 * We want to add the pa to the right bucket.
4200		 * Remove it from the list and while adding
4201		 * make sure the list to which we are adding
4202		 * doesn't grow big.
4203		 */
4204		if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4205			spin_lock(pa->pa_obj_lock);
4206			list_del_rcu(&pa->pa_inode_list);
4207			spin_unlock(pa->pa_obj_lock);
4208			ext4_mb_add_n_trim(ac);
4209		}
 
4210		ext4_mb_put_pa(ac, ac->ac_sb, pa);
4211	}
4212	if (ac->ac_bitmap_page)
4213		page_cache_release(ac->ac_bitmap_page);
4214	if (ac->ac_buddy_page)
4215		page_cache_release(ac->ac_buddy_page);
4216	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4217		mutex_unlock(&ac->ac_lg->lg_mutex);
4218	ext4_mb_collect_stats(ac);
 
4219	return 0;
4220}
4221
4222static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4223{
4224	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4225	int ret;
4226	int freed = 0;
 
4227
4228	trace_ext4_mb_discard_preallocations(sb, needed);
 
 
 
 
4229	for (i = 0; i < ngroups && needed > 0; i++) {
4230		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4231		freed += ret;
4232		needed -= ret;
 
 
 
 
 
 
4233	}
4234
4235	return freed;
4236}
4237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4238/*
4239 * Main entry point into mballoc to allocate blocks
4240 * it tries to use preallocation first, then falls back
4241 * to usual allocation
4242 */
4243ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4244				struct ext4_allocation_request *ar, int *errp)
4245{
4246	int freed;
4247	struct ext4_allocation_context *ac = NULL;
4248	struct ext4_sb_info *sbi;
4249	struct super_block *sb;
4250	ext4_fsblk_t block = 0;
4251	unsigned int inquota = 0;
4252	unsigned int reserv_blks = 0;
 
 
4253
 
4254	sb = ar->inode->i_sb;
4255	sbi = EXT4_SB(sb);
4256
4257	trace_ext4_request_blocks(ar);
 
 
4258
4259	/*
4260	 * For delayed allocation, we could skip the ENOSPC and
4261	 * EDQUOT check, as blocks and quotas have been already
4262	 * reserved when data being copied into pagecache.
4263	 */
4264	if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
4265		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4266	else {
4267		/* Without delayed allocation we need to verify
4268		 * there is enough free blocks to do block allocation
4269		 * and verify allocation doesn't exceed the quota limits.
4270		 */
4271		while (ar->len &&
4272			ext4_claim_free_blocks(sbi, ar->len, ar->flags)) {
4273
4274			/* let others to free the space */
4275			yield();
4276			ar->len = ar->len >> 1;
4277		}
4278		if (!ar->len) {
 
4279			*errp = -ENOSPC;
4280			return 0;
4281		}
4282		reserv_blks = ar->len;
4283		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4284			dquot_alloc_block_nofail(ar->inode, ar->len);
 
4285		} else {
4286			while (ar->len &&
4287				dquot_alloc_block(ar->inode, ar->len)) {
 
4288
4289				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4290				ar->len--;
4291			}
4292		}
4293		inquota = ar->len;
4294		if (ar->len == 0) {
4295			*errp = -EDQUOT;
4296			goto out;
4297		}
4298	}
4299
4300	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4301	if (!ac) {
4302		ar->len = 0;
4303		*errp = -ENOMEM;
4304		goto out;
4305	}
4306
4307	*errp = ext4_mb_initialize_context(ac, ar);
4308	if (*errp) {
4309		ar->len = 0;
4310		goto out;
4311	}
4312
4313	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
 
4314	if (!ext4_mb_use_preallocated(ac)) {
4315		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4316		ext4_mb_normalize_request(ac, ar);
 
 
 
 
4317repeat:
4318		/* allocate space in core */
4319		*errp = ext4_mb_regular_allocator(ac);
4320		if (*errp)
 
 
 
 
 
 
 
 
 
4321			goto errout;
4322
4323		/* as we've just preallocated more space than
4324		 * user requested orinally, we store allocated
4325		 * space in a special descriptor */
4326		if (ac->ac_status == AC_STATUS_FOUND &&
4327				ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4328			ext4_mb_new_preallocation(ac);
4329	}
4330	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4331		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
4332		if (*errp == -EAGAIN) {
4333			/*
4334			 * drop the reference that we took
4335			 * in ext4_mb_use_best_found
4336			 */
4337			ext4_mb_release_context(ac);
4338			ac->ac_b_ex.fe_group = 0;
4339			ac->ac_b_ex.fe_start = 0;
4340			ac->ac_b_ex.fe_len = 0;
4341			ac->ac_status = AC_STATUS_CONTINUE;
4342			goto repeat;
4343		} else if (*errp)
4344		errout:
4345			ext4_discard_allocated_blocks(ac);
4346		else {
 
4347			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4348			ar->len = ac->ac_b_ex.fe_len;
4349		}
4350	} else {
4351		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4352		if (freed)
4353			goto repeat;
 
 
 
 
 
4354		*errp = -ENOSPC;
4355	}
4356
 
4357	if (*errp) {
4358		ac->ac_b_ex.fe_len = 0;
4359		ar->len = 0;
4360		ext4_mb_show_ac(ac);
4361	}
4362	ext4_mb_release_context(ac);
4363out:
4364	if (ac)
4365		kmem_cache_free(ext4_ac_cachep, ac);
4366	if (inquota && ar->len < inquota)
4367		dquot_free_block(ar->inode, inquota - ar->len);
4368	if (!ar->len) {
4369		if (!ext4_test_inode_state(ar->inode,
4370					   EXT4_STATE_DELALLOC_RESERVED))
4371			/* release all the reserved blocks if non delalloc */
4372			percpu_counter_sub(&sbi->s_dirtyblocks_counter,
4373						reserv_blks);
4374	}
4375
4376	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
4377
4378	return block;
4379}
4380
4381/*
4382 * We can merge two free data extents only if the physical blocks
4383 * are contiguous, AND the extents were freed by the same transaction,
4384 * AND the blocks are associated with the same group.
4385 */
4386static int can_merge(struct ext4_free_data *entry1,
4387			struct ext4_free_data *entry2)
 
 
4388{
4389	if ((entry1->t_tid == entry2->t_tid) &&
4390	    (entry1->group == entry2->group) &&
4391	    ((entry1->start_blk + entry1->count) == entry2->start_blk))
4392		return 1;
4393	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
4394}
4395
4396static noinline_for_stack int
4397ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4398		      struct ext4_free_data *new_entry)
4399{
4400	ext4_group_t group = e4b->bd_group;
4401	ext4_grpblk_t block;
 
4402	struct ext4_free_data *entry;
4403	struct ext4_group_info *db = e4b->bd_info;
4404	struct super_block *sb = e4b->bd_sb;
4405	struct ext4_sb_info *sbi = EXT4_SB(sb);
4406	struct rb_node **n = &db->bb_free_root.rb_node, *node;
4407	struct rb_node *parent = NULL, *new_node;
4408
4409	BUG_ON(!ext4_handle_valid(handle));
4410	BUG_ON(e4b->bd_bitmap_page == NULL);
4411	BUG_ON(e4b->bd_buddy_page == NULL);
4412
4413	new_node = &new_entry->node;
4414	block = new_entry->start_blk;
4415
4416	if (!*n) {
4417		/* first free block exent. We need to
4418		   protect buddy cache from being freed,
4419		 * otherwise we'll refresh it from
4420		 * on-disk bitmap and lose not-yet-available
4421		 * blocks */
4422		page_cache_get(e4b->bd_buddy_page);
4423		page_cache_get(e4b->bd_bitmap_page);
4424	}
4425	while (*n) {
4426		parent = *n;
4427		entry = rb_entry(parent, struct ext4_free_data, node);
4428		if (block < entry->start_blk)
4429			n = &(*n)->rb_left;
4430		else if (block >= (entry->start_blk + entry->count))
4431			n = &(*n)->rb_right;
4432		else {
4433			ext4_grp_locked_error(sb, group, 0,
4434				ext4_group_first_block_no(sb, group) + block,
 
4435				"Block already on to-be-freed list");
 
4436			return 0;
4437		}
4438	}
4439
4440	rb_link_node(new_node, parent, n);
4441	rb_insert_color(new_node, &db->bb_free_root);
4442
4443	/* Now try to see the extent can be merged to left and right */
4444	node = rb_prev(new_node);
4445	if (node) {
4446		entry = rb_entry(node, struct ext4_free_data, node);
4447		if (can_merge(entry, new_entry)) {
4448			new_entry->start_blk = entry->start_blk;
4449			new_entry->count += entry->count;
4450			rb_erase(node, &(db->bb_free_root));
4451			spin_lock(&sbi->s_md_lock);
4452			list_del(&entry->list);
4453			spin_unlock(&sbi->s_md_lock);
4454			kmem_cache_free(ext4_free_ext_cachep, entry);
4455		}
4456	}
4457
4458	node = rb_next(new_node);
4459	if (node) {
4460		entry = rb_entry(node, struct ext4_free_data, node);
4461		if (can_merge(new_entry, entry)) {
4462			new_entry->count += entry->count;
4463			rb_erase(node, &(db->bb_free_root));
4464			spin_lock(&sbi->s_md_lock);
4465			list_del(&entry->list);
4466			spin_unlock(&sbi->s_md_lock);
4467			kmem_cache_free(ext4_free_ext_cachep, entry);
4468		}
4469	}
4470	/* Add the extent to transaction's private list */
4471	spin_lock(&sbi->s_md_lock);
4472	list_add(&new_entry->list, &handle->h_transaction->t_private_list);
 
4473	spin_unlock(&sbi->s_md_lock);
4474	return 0;
4475}
4476
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4477/**
4478 * ext4_free_blocks() -- Free given blocks and update quota
 
4479 * @handle:		handle for this transaction
4480 * @inode:		inode
4481 * @block:		start physical block to free
4482 * @count:		number of blocks to count
4483 * @flags:		flags used by ext4_free_blocks
4484 */
4485void ext4_free_blocks(handle_t *handle, struct inode *inode,
4486		      struct buffer_head *bh, ext4_fsblk_t block,
4487		      unsigned long count, int flags)
4488{
4489	struct buffer_head *bitmap_bh = NULL;
4490	struct super_block *sb = inode->i_sb;
4491	struct ext4_group_desc *gdp;
4492	unsigned long freed = 0;
4493	unsigned int overflow;
4494	ext4_grpblk_t bit;
4495	struct buffer_head *gd_bh;
4496	ext4_group_t block_group;
4497	struct ext4_sb_info *sbi;
4498	struct ext4_buddy e4b;
 
4499	int err = 0;
4500	int ret;
4501
4502	if (bh) {
4503		if (block)
4504			BUG_ON(block != bh->b_blocknr);
4505		else
4506			block = bh->b_blocknr;
4507	}
4508
4509	sbi = EXT4_SB(sb);
 
4510	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4511	    !ext4_data_block_valid(sbi, block, count)) {
4512		ext4_error(sb, "Freeing blocks not in datazone - "
4513			   "block = %llu, count = %lu", block, count);
 
4514		goto error_return;
4515	}
4516
4517	ext4_debug("freeing block %llu\n", block);
4518	trace_ext4_free_blocks(inode, block, count, flags);
4519
4520	if (flags & EXT4_FREE_BLOCKS_FORGET) {
4521		struct buffer_head *tbh = bh;
4522		int i;
4523
4524		BUG_ON(bh && (count > 1));
4525
4526		for (i = 0; i < count; i++) {
4527			if (!bh)
4528				tbh = sb_find_get_block(inode->i_sb,
4529							block + i);
4530			if (unlikely(!tbh))
4531				continue;
4532			ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4533				    inode, tbh, block + i);
4534		}
4535	}
4536
4537	/*
4538	 * We need to make sure we don't reuse the freed block until
4539	 * after the transaction is committed, which we can do by
4540	 * treating the block as metadata, below.  We make an
4541	 * exception if the inode is to be written in writeback mode
4542	 * since writeback mode has weak data consistency guarantees.
4543	 */
4544	if (!ext4_should_writeback_data(inode))
4545		flags |= EXT4_FREE_BLOCKS_METADATA;
4546
4547do_more:
4548	overflow = 0;
4549	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4550
 
 
 
 
4551	/*
4552	 * Check to see if we are freeing blocks across a group
4553	 * boundary.
4554	 */
4555	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4556		overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
 
4557		count -= overflow;
 
 
4558	}
 
4559	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4560	if (!bitmap_bh) {
4561		err = -EIO;
 
4562		goto error_return;
4563	}
4564	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4565	if (!gdp) {
4566		err = -EIO;
4567		goto error_return;
4568	}
4569
4570	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4571	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4572	    in_range(block, ext4_inode_table(sb, gdp),
4573		      EXT4_SB(sb)->s_itb_per_group) ||
4574	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4575		      EXT4_SB(sb)->s_itb_per_group)) {
4576
4577		ext4_error(sb, "Freeing blocks in system zone - "
4578			   "Block = %llu, count = %lu", block, count);
4579		/* err = 0. ext4_std_error should be a no op */
4580		goto error_return;
4581	}
4582
4583	BUFFER_TRACE(bitmap_bh, "getting write access");
4584	err = ext4_journal_get_write_access(handle, bitmap_bh);
 
4585	if (err)
4586		goto error_return;
4587
4588	/*
4589	 * We are about to modify some metadata.  Call the journal APIs
4590	 * to unshare ->b_data if a currently-committing transaction is
4591	 * using it
4592	 */
4593	BUFFER_TRACE(gd_bh, "get_write_access");
4594	err = ext4_journal_get_write_access(handle, gd_bh);
4595	if (err)
4596		goto error_return;
4597#ifdef AGGRESSIVE_CHECK
4598	{
4599		int i;
4600		for (i = 0; i < count; i++)
4601			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4602	}
4603#endif
4604	trace_ext4_mballoc_free(sb, inode, block_group, bit, count);
4605
4606	err = ext4_mb_load_buddy(sb, block_group, &e4b);
 
 
4607	if (err)
4608		goto error_return;
4609
4610	if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
 
 
 
 
 
 
 
 
4611		struct ext4_free_data *new_entry;
4612		/*
4613		 * blocks being freed are metadata. these blocks shouldn't
4614		 * be used until this transaction is committed
4615		 */
4616		new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4617		if (!new_entry) {
4618			err = -ENOMEM;
4619			goto error_return;
4620		}
4621		new_entry->start_blk = bit;
4622		new_entry->group  = block_group;
4623		new_entry->count = count;
4624		new_entry->t_tid = handle->h_transaction->t_tid;
4625
4626		ext4_lock_group(sb, block_group);
4627		mb_clear_bits(bitmap_bh->b_data, bit, count);
4628		ext4_mb_free_metadata(handle, &e4b, new_entry);
4629	} else {
4630		/* need to update group_info->bb_free and bitmap
4631		 * with group lock held. generate_buddy look at
4632		 * them with group lock_held
4633		 */
 
 
 
 
 
 
 
 
 
 
 
4634		ext4_lock_group(sb, block_group);
4635		mb_clear_bits(bitmap_bh->b_data, bit, count);
4636		mb_free_blocks(inode, &e4b, bit, count);
4637	}
4638
4639	ret = ext4_free_blks_count(sb, gdp) + count;
4640	ext4_free_blks_set(sb, gdp, ret);
4641	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
 
4642	ext4_unlock_group(sb, block_group);
4643	percpu_counter_add(&sbi->s_freeblocks_counter, count);
4644
4645	if (sbi->s_log_groups_per_flex) {
4646		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4647		atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
 
 
4648	}
4649
4650	ext4_mb_unload_buddy(&e4b);
 
 
 
 
 
 
 
 
 
 
4651
4652	freed += count;
4653
4654	/* We dirtied the bitmap block */
4655	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4656	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4657
4658	/* And the group descriptor block */
4659	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4660	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4661	if (!err)
4662		err = ret;
4663
4664	if (overflow && !err) {
4665		block += count;
4666		count = overflow;
4667		put_bh(bitmap_bh);
 
 
4668		goto do_more;
4669	}
4670	ext4_mark_super_dirty(sb);
4671error_return:
4672	if (freed && !(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4673		dquot_free_block(inode, freed);
4674	brelse(bitmap_bh);
4675	ext4_std_error(sb, err);
4676	return;
4677}
4678
4679/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4680 * ext4_group_add_blocks() -- Add given blocks to an existing group
4681 * @handle:			handle to this transaction
4682 * @sb:				super block
4683 * @block:			start physcial block to add to the block group
4684 * @count:			number of blocks to free
4685 *
4686 * This marks the blocks as free in the bitmap and buddy.
4687 */
4688int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4689			 ext4_fsblk_t block, unsigned long count)
4690{
4691	struct buffer_head *bitmap_bh = NULL;
4692	struct buffer_head *gd_bh;
4693	ext4_group_t block_group;
4694	ext4_grpblk_t bit;
4695	unsigned int i;
4696	struct ext4_group_desc *desc;
4697	struct ext4_sb_info *sbi = EXT4_SB(sb);
4698	struct ext4_buddy e4b;
4699	int err = 0, ret, blk_free_count;
4700	ext4_grpblk_t blocks_freed;
 
 
 
4701
4702	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4703
4704	if (count == 0)
4705		return 0;
4706
4707	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4708	/*
4709	 * Check to see if we are freeing blocks across a group
4710	 * boundary.
4711	 */
4712	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4713		ext4_warning(sb, "too much blocks added to group %u\n",
4714			     block_group);
4715		err = -EINVAL;
4716		goto error_return;
4717	}
4718
4719	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4720	if (!bitmap_bh) {
4721		err = -EIO;
 
4722		goto error_return;
4723	}
4724
4725	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
4726	if (!desc) {
4727		err = -EIO;
4728		goto error_return;
4729	}
4730
4731	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4732	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4733	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4734	    in_range(block + count - 1, ext4_inode_table(sb, desc),
4735		     sbi->s_itb_per_group)) {
4736		ext4_error(sb, "Adding blocks in system zones - "
4737			   "Block = %llu, count = %lu",
4738			   block, count);
4739		err = -EINVAL;
4740		goto error_return;
4741	}
4742
4743	BUFFER_TRACE(bitmap_bh, "getting write access");
4744	err = ext4_journal_get_write_access(handle, bitmap_bh);
 
4745	if (err)
4746		goto error_return;
4747
4748	/*
4749	 * We are about to modify some metadata.  Call the journal APIs
4750	 * to unshare ->b_data if a currently-committing transaction is
4751	 * using it
4752	 */
4753	BUFFER_TRACE(gd_bh, "get_write_access");
4754	err = ext4_journal_get_write_access(handle, gd_bh);
4755	if (err)
4756		goto error_return;
4757
4758	for (i = 0, blocks_freed = 0; i < count; i++) {
4759		BUFFER_TRACE(bitmap_bh, "clear bit");
4760		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
4761			ext4_error(sb, "bit already cleared for block %llu",
4762				   (ext4_fsblk_t)(block + i));
4763			BUFFER_TRACE(bitmap_bh, "bit already cleared");
4764		} else {
4765			blocks_freed++;
4766		}
4767	}
4768
4769	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4770	if (err)
4771		goto error_return;
4772
4773	/*
4774	 * need to update group_info->bb_free and bitmap
4775	 * with group lock held. generate_buddy look at
4776	 * them with group lock_held
4777	 */
4778	ext4_lock_group(sb, block_group);
4779	mb_clear_bits(bitmap_bh->b_data, bit, count);
4780	mb_free_blocks(NULL, &e4b, bit, count);
4781	blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
4782	ext4_free_blks_set(sb, desc, blk_free_count);
4783	desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
 
 
4784	ext4_unlock_group(sb, block_group);
4785	percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
 
4786
4787	if (sbi->s_log_groups_per_flex) {
4788		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4789		atomic_add(blocks_freed,
4790			   &sbi->s_flex_groups[flex_group].free_blocks);
 
4791	}
4792
4793	ext4_mb_unload_buddy(&e4b);
4794
4795	/* We dirtied the bitmap block */
4796	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4797	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4798
4799	/* And the group descriptor block */
4800	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4801	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4802	if (!err)
4803		err = ret;
4804
4805error_return:
4806	brelse(bitmap_bh);
4807	ext4_std_error(sb, err);
4808	return err;
4809}
4810
4811/**
4812 * ext4_trim_extent -- function to TRIM one single free extent in the group
4813 * @sb:		super block for the file system
4814 * @start:	starting block of the free extent in the alloc. group
4815 * @count:	number of blocks to TRIM
4816 * @group:	alloc. group we are working with
4817 * @e4b:	ext4 buddy for the group
4818 *
4819 * Trim "count" blocks starting at "start" in the "group". To assure that no
4820 * one will allocate those blocks, mark it as used in buddy bitmap. This must
4821 * be called with under the group lock.
4822 */
4823static void ext4_trim_extent(struct super_block *sb, int start, int count,
4824			     ext4_group_t group, struct ext4_buddy *e4b)
 
 
4825{
4826	struct ext4_free_extent ex;
 
 
4827
4828	trace_ext4_trim_extent(sb, group, start, count);
4829
4830	assert_spin_locked(ext4_group_lock_ptr(sb, group));
4831
4832	ex.fe_start = start;
4833	ex.fe_group = group;
4834	ex.fe_len = count;
4835
4836	/*
4837	 * Mark blocks used, so no one can reuse them while
4838	 * being trimmed.
4839	 */
4840	mb_mark_used(e4b, &ex);
4841	ext4_unlock_group(sb, group);
4842	ext4_issue_discard(sb, group, start, count);
4843	ext4_lock_group(sb, group);
4844	mb_free_blocks(NULL, e4b, start, ex.fe_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4845}
4846
4847/**
4848 * ext4_trim_all_free -- function to trim all free space in alloc. group
4849 * @sb:			super block for file system
4850 * @group:		group to be trimmed
4851 * @start:		first group block to examine
4852 * @max:		last group block to examine
4853 * @minblocks:		minimum extent block count
4854 *
4855 * ext4_trim_all_free walks through group's buddy bitmap searching for free
4856 * extents. When the free block is found, ext4_trim_extent is called to TRIM
4857 * the extent.
4858 *
4859 *
4860 * ext4_trim_all_free walks through group's block bitmap searching for free
4861 * extents. When the free extent is found, mark it as used in group buddy
4862 * bitmap. Then issue a TRIM command on this extent and free the extent in
4863 * the group buddy bitmap. This is done until whole group is scanned.
4864 */
4865static ext4_grpblk_t
4866ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
4867		   ext4_grpblk_t start, ext4_grpblk_t max,
4868		   ext4_grpblk_t minblocks)
4869{
4870	void *bitmap;
4871	ext4_grpblk_t next, count = 0, free_count = 0;
4872	struct ext4_buddy e4b;
4873	int ret;
4874
4875	trace_ext4_trim_all_free(sb, group, start, max);
4876
4877	ret = ext4_mb_load_buddy(sb, group, &e4b);
4878	if (ret) {
4879		ext4_error(sb, "Error in loading buddy "
4880				"information for %u", group);
4881		return ret;
4882	}
4883	bitmap = e4b.bd_bitmap;
4884
4885	ext4_lock_group(sb, group);
4886	if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
4887	    minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
4888		goto out;
4889
4890	start = (e4b.bd_info->bb_first_free > start) ?
4891		e4b.bd_info->bb_first_free : start;
4892
4893	while (start < max) {
4894		start = mb_find_next_zero_bit(bitmap, max, start);
4895		if (start >= max)
4896			break;
4897		next = mb_find_next_bit(bitmap, max, start);
4898
4899		if ((next - start) >= minblocks) {
4900			ext4_trim_extent(sb, start,
4901					 next - start, group, &e4b);
4902			count += next - start;
4903		}
4904		free_count += next - start;
4905		start = next + 1;
4906
4907		if (fatal_signal_pending(current)) {
4908			count = -ERESTARTSYS;
4909			break;
4910		}
4911
4912		if (need_resched()) {
4913			ext4_unlock_group(sb, group);
4914			cond_resched();
4915			ext4_lock_group(sb, group);
4916		}
4917
4918		if ((e4b.bd_info->bb_free - free_count) < minblocks)
4919			break;
4920	}
4921
4922	if (!ret)
4923		EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
4924out:
4925	ext4_unlock_group(sb, group);
4926	ext4_mb_unload_buddy(&e4b);
4927
4928	ext4_debug("trimmed %d blocks in the group %d\n",
4929		count, group);
4930
4931	return count;
4932}
4933
4934/**
4935 * ext4_trim_fs() -- trim ioctl handle function
4936 * @sb:			superblock for filesystem
4937 * @range:		fstrim_range structure
4938 *
4939 * start:	First Byte to trim
4940 * len:		number of Bytes to trim from start
4941 * minlen:	minimum extent length in Bytes
4942 * ext4_trim_fs goes through all allocation groups containing Bytes from
4943 * start to start+len. For each such a group ext4_trim_all_free function
4944 * is invoked to trim all free space.
4945 */
4946int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4947{
 
4948	struct ext4_group_info *grp;
4949	ext4_group_t first_group, last_group;
4950	ext4_group_t group, ngroups = ext4_get_groups_count(sb);
4951	ext4_grpblk_t cnt = 0, first_block, last_block;
4952	uint64_t start, len, minlen, trimmed = 0;
4953	ext4_fsblk_t first_data_blk =
4954			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
 
 
4955	int ret = 0;
4956
4957	start = range->start >> sb->s_blocksize_bits;
4958	len = range->len >> sb->s_blocksize_bits;
4959	minlen = range->minlen >> sb->s_blocksize_bits;
4960
4961	if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
 
 
 
4962		return -EINVAL;
4963	if (start + len <= first_data_blk)
 
 
 
 
 
 
 
 
 
 
 
4964		goto out;
4965	if (start < first_data_blk) {
4966		len -= first_data_blk - start;
4967		start = first_data_blk;
4968	}
4969
4970	/* Determine first and last group to examine based on start and len */
4971	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
4972				     &first_group, &first_block);
4973	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) (start + len),
4974				     &last_group, &last_block);
4975	last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
4976	last_block = EXT4_BLOCKS_PER_GROUP(sb);
4977
4978	if (first_group > last_group)
4979		return -EINVAL;
4980
4981	for (group = first_group; group <= last_group; group++) {
4982		grp = ext4_get_group_info(sb, group);
4983		/* We only do this if the grp has never been initialized */
4984		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
4985			ret = ext4_mb_init_group(sb, group);
4986			if (ret)
4987				break;
4988		}
4989
4990		/*
4991		 * For all the groups except the last one, last block will
4992		 * always be EXT4_BLOCKS_PER_GROUP(sb), so we only need to
4993		 * change it for the last group in which case start +
4994		 * len < EXT4_BLOCKS_PER_GROUP(sb).
4995		 */
4996		if (first_block + len < EXT4_BLOCKS_PER_GROUP(sb))
4997			last_block = first_block + len;
4998		len -= last_block - first_block;
4999
5000		if (grp->bb_free >= minlen) {
5001			cnt = ext4_trim_all_free(sb, group, first_block,
5002						last_block, minlen);
5003			if (cnt < 0) {
5004				ret = cnt;
5005				break;
5006			}
 
5007		}
5008		trimmed += cnt;
5009		first_block = 0;
 
 
 
 
5010	}
5011	range->len = trimmed * sb->s_blocksize;
5012
5013	if (!ret)
5014		atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5015
5016out:
 
5017	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5018}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
   4 * Written by Alex Tomas <alex@clusterfs.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7
   8/*
   9 * mballoc.c contains the multiblocks allocation routines
  10 */
  11
  12#include "ext4_jbd2.h"
  13#include "mballoc.h"
  14#include <linux/log2.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/nospec.h>
  18#include <linux/backing-dev.h>
  19#include <trace/events/ext4.h>
  20
  21/*
  22 * MUSTDO:
  23 *   - test ext4_ext_search_left() and ext4_ext_search_right()
  24 *   - search for metadata in few groups
  25 *
  26 * TODO v4:
  27 *   - normalization should take into account whether file is still open
  28 *   - discard preallocations if no free space left (policy?)
  29 *   - don't normalize tails
  30 *   - quota
  31 *   - reservation for superuser
  32 *
  33 * TODO v3:
  34 *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
  35 *   - track min/max extents in each group for better group selection
  36 *   - mb_mark_used() may allocate chunk right after splitting buddy
  37 *   - tree of groups sorted by number of free blocks
  38 *   - error handling
  39 */
  40
  41/*
  42 * The allocation request involve request for multiple number of blocks
  43 * near to the goal(block) value specified.
  44 *
  45 * During initialization phase of the allocator we decide to use the
  46 * group preallocation or inode preallocation depending on the size of
  47 * the file. The size of the file could be the resulting file size we
  48 * would have after allocation, or the current file size, which ever
  49 * is larger. If the size is less than sbi->s_mb_stream_request we
  50 * select to use the group preallocation. The default value of
  51 * s_mb_stream_request is 16 blocks. This can also be tuned via
  52 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
  53 * terms of number of blocks.
  54 *
  55 * The main motivation for having small file use group preallocation is to
  56 * ensure that we have small files closer together on the disk.
  57 *
  58 * First stage the allocator looks at the inode prealloc list,
  59 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
  60 * spaces for this particular inode. The inode prealloc space is
  61 * represented as:
  62 *
  63 * pa_lstart -> the logical start block for this prealloc space
  64 * pa_pstart -> the physical start block for this prealloc space
  65 * pa_len    -> length for this prealloc space (in clusters)
  66 * pa_free   ->  free space available in this prealloc space (in clusters)
  67 *
  68 * The inode preallocation space is used looking at the _logical_ start
  69 * block. If only the logical file block falls within the range of prealloc
  70 * space we will consume the particular prealloc space. This makes sure that
  71 * we have contiguous physical blocks representing the file blocks
  72 *
  73 * The important thing to be noted in case of inode prealloc space is that
  74 * we don't modify the values associated to inode prealloc space except
  75 * pa_free.
  76 *
  77 * If we are not able to find blocks in the inode prealloc space and if we
  78 * have the group allocation flag set then we look at the locality group
  79 * prealloc space. These are per CPU prealloc list represented as
  80 *
  81 * ext4_sb_info.s_locality_groups[smp_processor_id()]
  82 *
  83 * The reason for having a per cpu locality group is to reduce the contention
  84 * between CPUs. It is possible to get scheduled at this point.
  85 *
  86 * The locality group prealloc space is used looking at whether we have
  87 * enough free space (pa_free) within the prealloc space.
  88 *
  89 * If we can't allocate blocks via inode prealloc or/and locality group
  90 * prealloc then we look at the buddy cache. The buddy cache is represented
  91 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
  92 * mapped to the buddy and bitmap information regarding different
  93 * groups. The buddy information is attached to buddy cache inode so that
  94 * we can access them through the page cache. The information regarding
  95 * each group is loaded via ext4_mb_load_buddy.  The information involve
  96 * block bitmap and buddy information. The information are stored in the
  97 * inode as:
  98 *
  99 *  {                        page                        }
 100 *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
 101 *
 102 *
 103 * one block each for bitmap and buddy information.  So for each group we
 104 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
 105 * blocksize) blocks.  So it can have information regarding groups_per_page
 106 * which is blocks_per_page/2
 107 *
 108 * The buddy cache inode is not stored on disk. The inode is thrown
 109 * away when the filesystem is unmounted.
 110 *
 111 * We look for count number of blocks in the buddy cache. If we were able
 112 * to locate that many free blocks we return with additional information
 113 * regarding rest of the contiguous physical block available
 114 *
 115 * Before allocating blocks via buddy cache we normalize the request
 116 * blocks. This ensure we ask for more blocks that we needed. The extra
 117 * blocks that we get after allocation is added to the respective prealloc
 118 * list. In case of inode preallocation we follow a list of heuristics
 119 * based on file size. This can be found in ext4_mb_normalize_request. If
 120 * we are doing a group prealloc we try to normalize the request to
 121 * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
 122 * dependent on the cluster size; for non-bigalloc file systems, it is
 123 * 512 blocks. This can be tuned via
 124 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
 125 * terms of number of blocks. If we have mounted the file system with -O
 126 * stripe=<value> option the group prealloc request is normalized to the
 127 * smallest multiple of the stripe value (sbi->s_stripe) which is
 128 * greater than the default mb_group_prealloc.
 129 *
 130 * If "mb_optimize_scan" mount option is set, we maintain in memory group info
 131 * structures in two data structures:
 132 *
 133 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
 134 *
 135 *    Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
 136 *
 137 *    This is an array of lists where the index in the array represents the
 138 *    largest free order in the buddy bitmap of the participating group infos of
 139 *    that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
 140 *    number of buddy bitmap orders possible) number of lists. Group-infos are
 141 *    placed in appropriate lists.
 142 *
 143 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
 144 *
 145 *    Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
 146 *
 147 *    This is an array of lists where in the i-th list there are groups with
 148 *    average fragment size >= 2^i and < 2^(i+1). The average fragment size
 149 *    is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
 150 *    Note that we don't bother with a special list for completely empty groups
 151 *    so we only have MB_NUM_ORDERS(sb) lists.
 152 *
 153 * When "mb_optimize_scan" mount option is set, mballoc consults the above data
 154 * structures to decide the order in which groups are to be traversed for
 155 * fulfilling an allocation request.
 156 *
 157 * At CR = 0, we look for groups which have the largest_free_order >= the order
 158 * of the request. We directly look at the largest free order list in the data
 159 * structure (1) above where largest_free_order = order of the request. If that
 160 * list is empty, we look at remaining list in the increasing order of
 161 * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time.
 162 *
 163 * At CR = 1, we only consider groups where average fragment size > request
 164 * size. So, we lookup a group which has average fragment size just above or
 165 * equal to request size using our average fragment size group lists (data
 166 * structure 2) in O(1) time.
 167 *
 168 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
 169 * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
 170 *
 171 * The regular allocator (using the buddy cache) supports a few tunables.
 172 *
 173 * /sys/fs/ext4/<partition>/mb_min_to_scan
 174 * /sys/fs/ext4/<partition>/mb_max_to_scan
 175 * /sys/fs/ext4/<partition>/mb_order2_req
 176 * /sys/fs/ext4/<partition>/mb_linear_limit
 177 *
 178 * The regular allocator uses buddy scan only if the request len is power of
 179 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
 180 * value of s_mb_order2_reqs can be tuned via
 181 * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
 182 * stripe size (sbi->s_stripe), we try to search for contiguous block in
 183 * stripe size. This should result in better allocation on RAID setups. If
 184 * not, we search in the specific group using bitmap for best extents. The
 185 * tunable min_to_scan and max_to_scan control the behaviour here.
 186 * min_to_scan indicate how long the mballoc __must__ look for a best
 187 * extent and max_to_scan indicates how long the mballoc __can__ look for a
 188 * best extent in the found extents. Searching for the blocks starts with
 189 * the group specified as the goal value in allocation context via
 190 * ac_g_ex. Each group is first checked based on the criteria whether it
 191 * can be used for allocation. ext4_mb_good_group explains how the groups are
 192 * checked.
 193 *
 194 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
 195 * get traversed linearly. That may result in subsequent allocations being not
 196 * close to each other. And so, the underlying device may get filled up in a
 197 * non-linear fashion. While that may not matter on non-rotational devices, for
 198 * rotational devices that may result in higher seek times. "mb_linear_limit"
 199 * tells mballoc how many groups mballoc should search linearly before
 200 * performing consulting above data structures for more efficient lookups. For
 201 * non rotational devices, this value defaults to 0 and for rotational devices
 202 * this is set to MB_DEFAULT_LINEAR_LIMIT.
 203 *
 204 * Both the prealloc space are getting populated as above. So for the first
 205 * request we will hit the buddy cache which will result in this prealloc
 206 * space getting filled. The prealloc space is then later used for the
 207 * subsequent request.
 208 */
 209
 210/*
 211 * mballoc operates on the following data:
 212 *  - on-disk bitmap
 213 *  - in-core buddy (actually includes buddy and bitmap)
 214 *  - preallocation descriptors (PAs)
 215 *
 216 * there are two types of preallocations:
 217 *  - inode
 218 *    assiged to specific inode and can be used for this inode only.
 219 *    it describes part of inode's space preallocated to specific
 220 *    physical blocks. any block from that preallocated can be used
 221 *    independent. the descriptor just tracks number of blocks left
 222 *    unused. so, before taking some block from descriptor, one must
 223 *    make sure corresponded logical block isn't allocated yet. this
 224 *    also means that freeing any block within descriptor's range
 225 *    must discard all preallocated blocks.
 226 *  - locality group
 227 *    assigned to specific locality group which does not translate to
 228 *    permanent set of inodes: inode can join and leave group. space
 229 *    from this type of preallocation can be used for any inode. thus
 230 *    it's consumed from the beginning to the end.
 231 *
 232 * relation between them can be expressed as:
 233 *    in-core buddy = on-disk bitmap + preallocation descriptors
 234 *
 235 * this mean blocks mballoc considers used are:
 236 *  - allocated blocks (persistent)
 237 *  - preallocated blocks (non-persistent)
 238 *
 239 * consistency in mballoc world means that at any time a block is either
 240 * free or used in ALL structures. notice: "any time" should not be read
 241 * literally -- time is discrete and delimited by locks.
 242 *
 243 *  to keep it simple, we don't use block numbers, instead we count number of
 244 *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
 245 *
 246 * all operations can be expressed as:
 247 *  - init buddy:			buddy = on-disk + PAs
 248 *  - new PA:				buddy += N; PA = N
 249 *  - use inode PA:			on-disk += N; PA -= N
 250 *  - discard inode PA			buddy -= on-disk - PA; PA = 0
 251 *  - use locality group PA		on-disk += N; PA -= N
 252 *  - discard locality group PA		buddy -= PA; PA = 0
 253 *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
 254 *        is used in real operation because we can't know actual used
 255 *        bits from PA, only from on-disk bitmap
 256 *
 257 * if we follow this strict logic, then all operations above should be atomic.
 258 * given some of them can block, we'd have to use something like semaphores
 259 * killing performance on high-end SMP hardware. let's try to relax it using
 260 * the following knowledge:
 261 *  1) if buddy is referenced, it's already initialized
 262 *  2) while block is used in buddy and the buddy is referenced,
 263 *     nobody can re-allocate that block
 264 *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
 265 *     bit set and PA claims same block, it's OK. IOW, one can set bit in
 266 *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
 267 *     block
 268 *
 269 * so, now we're building a concurrency table:
 270 *  - init buddy vs.
 271 *    - new PA
 272 *      blocks for PA are allocated in the buddy, buddy must be referenced
 273 *      until PA is linked to allocation group to avoid concurrent buddy init
 274 *    - use inode PA
 275 *      we need to make sure that either on-disk bitmap or PA has uptodate data
 276 *      given (3) we care that PA-=N operation doesn't interfere with init
 277 *    - discard inode PA
 278 *      the simplest way would be to have buddy initialized by the discard
 279 *    - use locality group PA
 280 *      again PA-=N must be serialized with init
 281 *    - discard locality group PA
 282 *      the simplest way would be to have buddy initialized by the discard
 283 *  - new PA vs.
 284 *    - use inode PA
 285 *      i_data_sem serializes them
 286 *    - discard inode PA
 287 *      discard process must wait until PA isn't used by another process
 288 *    - use locality group PA
 289 *      some mutex should serialize them
 290 *    - discard locality group PA
 291 *      discard process must wait until PA isn't used by another process
 292 *  - use inode PA
 293 *    - use inode PA
 294 *      i_data_sem or another mutex should serializes them
 295 *    - discard inode PA
 296 *      discard process must wait until PA isn't used by another process
 297 *    - use locality group PA
 298 *      nothing wrong here -- they're different PAs covering different blocks
 299 *    - discard locality group PA
 300 *      discard process must wait until PA isn't used by another process
 301 *
 302 * now we're ready to make few consequences:
 303 *  - PA is referenced and while it is no discard is possible
 304 *  - PA is referenced until block isn't marked in on-disk bitmap
 305 *  - PA changes only after on-disk bitmap
 306 *  - discard must not compete with init. either init is done before
 307 *    any discard or they're serialized somehow
 308 *  - buddy init as sum of on-disk bitmap and PAs is done atomically
 309 *
 310 * a special case when we've used PA to emptiness. no need to modify buddy
 311 * in this case, but we should care about concurrent init
 312 *
 313 */
 314
 315 /*
 316 * Logic in few words:
 317 *
 318 *  - allocation:
 319 *    load group
 320 *    find blocks
 321 *    mark bits in on-disk bitmap
 322 *    release group
 323 *
 324 *  - use preallocation:
 325 *    find proper PA (per-inode or group)
 326 *    load group
 327 *    mark bits in on-disk bitmap
 328 *    release group
 329 *    release PA
 330 *
 331 *  - free:
 332 *    load group
 333 *    mark bits in on-disk bitmap
 334 *    release group
 335 *
 336 *  - discard preallocations in group:
 337 *    mark PAs deleted
 338 *    move them onto local list
 339 *    load on-disk bitmap
 340 *    load group
 341 *    remove PA from object (inode or locality group)
 342 *    mark free blocks in-core
 343 *
 344 *  - discard inode's preallocations:
 345 */
 346
 347/*
 348 * Locking rules
 349 *
 350 * Locks:
 351 *  - bitlock on a group	(group)
 352 *  - object (inode/locality)	(object)
 353 *  - per-pa lock		(pa)
 354 *  - cr0 lists lock		(cr0)
 355 *  - cr1 tree lock		(cr1)
 356 *
 357 * Paths:
 358 *  - new pa
 359 *    object
 360 *    group
 361 *
 362 *  - find and use pa:
 363 *    pa
 364 *
 365 *  - release consumed pa:
 366 *    pa
 367 *    group
 368 *    object
 369 *
 370 *  - generate in-core bitmap:
 371 *    group
 372 *        pa
 373 *
 374 *  - discard all for given object (inode, locality group):
 375 *    object
 376 *        pa
 377 *    group
 378 *
 379 *  - discard all for given group:
 380 *    group
 381 *        pa
 382 *    group
 383 *        object
 384 *
 385 *  - allocation path (ext4_mb_regular_allocator)
 386 *    group
 387 *    cr0/cr1
 388 */
 389static struct kmem_cache *ext4_pspace_cachep;
 390static struct kmem_cache *ext4_ac_cachep;
 391static struct kmem_cache *ext4_free_data_cachep;
 392
 393/* We create slab caches for groupinfo data structures based on the
 394 * superblock block size.  There will be one per mounted filesystem for
 395 * each unique s_blocksize_bits */
 396#define NR_GRPINFO_CACHES 8
 397static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
 398
 399static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
 400	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
 401	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
 402	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
 403};
 404
 405static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
 406					ext4_group_t group);
 407static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
 408						ext4_group_t group);
 409static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
 410
 411static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
 412			       ext4_group_t group, int cr);
 413
 414static int ext4_try_to_trim_range(struct super_block *sb,
 415		struct ext4_buddy *e4b, ext4_grpblk_t start,
 416		ext4_grpblk_t max, ext4_grpblk_t minblocks);
 417
 418/*
 419 * The algorithm using this percpu seq counter goes below:
 420 * 1. We sample the percpu discard_pa_seq counter before trying for block
 421 *    allocation in ext4_mb_new_blocks().
 422 * 2. We increment this percpu discard_pa_seq counter when we either allocate
 423 *    or free these blocks i.e. while marking those blocks as used/free in
 424 *    mb_mark_used()/mb_free_blocks().
 425 * 3. We also increment this percpu seq counter when we successfully identify
 426 *    that the bb_prealloc_list is not empty and hence proceed for discarding
 427 *    of those PAs inside ext4_mb_discard_group_preallocations().
 428 *
 429 * Now to make sure that the regular fast path of block allocation is not
 430 * affected, as a small optimization we only sample the percpu seq counter
 431 * on that cpu. Only when the block allocation fails and when freed blocks
 432 * found were 0, that is when we sample percpu seq counter for all cpus using
 433 * below function ext4_get_discard_pa_seq_sum(). This happens after making
 434 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
 435 */
 436static DEFINE_PER_CPU(u64, discard_pa_seq);
 437static inline u64 ext4_get_discard_pa_seq_sum(void)
 438{
 439	int __cpu;
 440	u64 __seq = 0;
 441
 442	for_each_possible_cpu(__cpu)
 443		__seq += per_cpu(discard_pa_seq, __cpu);
 444	return __seq;
 445}
 446
 447static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
 448{
 449#if BITS_PER_LONG == 64
 450	*bit += ((unsigned long) addr & 7UL) << 3;
 451	addr = (void *) ((unsigned long) addr & ~7UL);
 452#elif BITS_PER_LONG == 32
 453	*bit += ((unsigned long) addr & 3UL) << 3;
 454	addr = (void *) ((unsigned long) addr & ~3UL);
 455#else
 456#error "how many bits you are?!"
 457#endif
 458	return addr;
 459}
 460
 461static inline int mb_test_bit(int bit, void *addr)
 462{
 463	/*
 464	 * ext4_test_bit on architecture like powerpc
 465	 * needs unsigned long aligned address
 466	 */
 467	addr = mb_correct_addr_and_bit(&bit, addr);
 468	return ext4_test_bit(bit, addr);
 469}
 470
 471static inline void mb_set_bit(int bit, void *addr)
 472{
 473	addr = mb_correct_addr_and_bit(&bit, addr);
 474	ext4_set_bit(bit, addr);
 475}
 476
 477static inline void mb_clear_bit(int bit, void *addr)
 478{
 479	addr = mb_correct_addr_and_bit(&bit, addr);
 480	ext4_clear_bit(bit, addr);
 481}
 482
 483static inline int mb_test_and_clear_bit(int bit, void *addr)
 484{
 485	addr = mb_correct_addr_and_bit(&bit, addr);
 486	return ext4_test_and_clear_bit(bit, addr);
 487}
 488
 489static inline int mb_find_next_zero_bit(void *addr, int max, int start)
 490{
 491	int fix = 0, ret, tmpmax;
 492	addr = mb_correct_addr_and_bit(&fix, addr);
 493	tmpmax = max + fix;
 494	start += fix;
 495
 496	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
 497	if (ret > max)
 498		return max;
 499	return ret;
 500}
 501
 502static inline int mb_find_next_bit(void *addr, int max, int start)
 503{
 504	int fix = 0, ret, tmpmax;
 505	addr = mb_correct_addr_and_bit(&fix, addr);
 506	tmpmax = max + fix;
 507	start += fix;
 508
 509	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
 510	if (ret > max)
 511		return max;
 512	return ret;
 513}
 514
 515static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
 516{
 517	char *bb;
 518
 519	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
 520	BUG_ON(max == NULL);
 521
 522	if (order > e4b->bd_blkbits + 1) {
 523		*max = 0;
 524		return NULL;
 525	}
 526
 527	/* at order 0 we see each particular block */
 528	if (order == 0) {
 529		*max = 1 << (e4b->bd_blkbits + 3);
 530		return e4b->bd_bitmap;
 531	}
 532
 533	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
 534	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
 535
 536	return bb;
 537}
 538
 539#ifdef DOUBLE_CHECK
 540static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
 541			   int first, int count)
 542{
 543	int i;
 544	struct super_block *sb = e4b->bd_sb;
 545
 546	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 547		return;
 548	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
 549	for (i = 0; i < count; i++) {
 550		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
 551			ext4_fsblk_t blocknr;
 552
 553			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
 554			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
 555			ext4_grp_locked_error(sb, e4b->bd_group,
 556					      inode ? inode->i_ino : 0,
 557					      blocknr,
 558					      "freeing block already freed "
 559					      "(bit %u)",
 560					      first + i);
 561			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
 562					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
 563		}
 564		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
 565	}
 566}
 567
 568static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
 569{
 570	int i;
 571
 572	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 573		return;
 574	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
 575	for (i = 0; i < count; i++) {
 576		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
 577		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
 578	}
 579}
 580
 581static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
 582{
 583	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
 584		return;
 585	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
 586		unsigned char *b1, *b2;
 587		int i;
 588		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
 589		b2 = (unsigned char *) bitmap;
 590		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
 591			if (b1[i] != b2[i]) {
 592				ext4_msg(e4b->bd_sb, KERN_ERR,
 593					 "corruption in group %u "
 594					 "at byte %u(%u): %x in copy != %x "
 595					 "on disk/prealloc",
 596					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
 597				BUG();
 598			}
 599		}
 600	}
 601}
 602
 603static void mb_group_bb_bitmap_alloc(struct super_block *sb,
 604			struct ext4_group_info *grp, ext4_group_t group)
 605{
 606	struct buffer_head *bh;
 607
 608	grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
 609	if (!grp->bb_bitmap)
 610		return;
 611
 612	bh = ext4_read_block_bitmap(sb, group);
 613	if (IS_ERR_OR_NULL(bh)) {
 614		kfree(grp->bb_bitmap);
 615		grp->bb_bitmap = NULL;
 616		return;
 617	}
 618
 619	memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
 620	put_bh(bh);
 621}
 622
 623static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
 624{
 625	kfree(grp->bb_bitmap);
 626}
 627
 628#else
 629static inline void mb_free_blocks_double(struct inode *inode,
 630				struct ext4_buddy *e4b, int first, int count)
 631{
 632	return;
 633}
 634static inline void mb_mark_used_double(struct ext4_buddy *e4b,
 635						int first, int count)
 636{
 637	return;
 638}
 639static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
 640{
 641	return;
 642}
 643
 644static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
 645			struct ext4_group_info *grp, ext4_group_t group)
 646{
 647	return;
 648}
 649
 650static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
 651{
 652	return;
 653}
 654#endif
 655
 656#ifdef AGGRESSIVE_CHECK
 657
 658#define MB_CHECK_ASSERT(assert)						\
 659do {									\
 660	if (!(assert)) {						\
 661		printk(KERN_EMERG					\
 662			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
 663			function, file, line, # assert);		\
 664		BUG();							\
 665	}								\
 666} while (0)
 667
 668static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
 669				const char *function, int line)
 670{
 671	struct super_block *sb = e4b->bd_sb;
 672	int order = e4b->bd_blkbits + 1;
 673	int max;
 674	int max2;
 675	int i;
 676	int j;
 677	int k;
 678	int count;
 679	struct ext4_group_info *grp;
 680	int fragments = 0;
 681	int fstart;
 682	struct list_head *cur;
 683	void *buddy;
 684	void *buddy2;
 685
 686	if (e4b->bd_info->bb_check_counter++ % 10)
 687		return 0;
 
 
 
 688
 689	while (order > 1) {
 690		buddy = mb_find_buddy(e4b, order, &max);
 691		MB_CHECK_ASSERT(buddy);
 692		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
 693		MB_CHECK_ASSERT(buddy2);
 694		MB_CHECK_ASSERT(buddy != buddy2);
 695		MB_CHECK_ASSERT(max * 2 == max2);
 696
 697		count = 0;
 698		for (i = 0; i < max; i++) {
 699
 700			if (mb_test_bit(i, buddy)) {
 701				/* only single bit in buddy2 may be 0 */
 702				if (!mb_test_bit(i << 1, buddy2)) {
 703					MB_CHECK_ASSERT(
 704						mb_test_bit((i<<1)+1, buddy2));
 
 
 
 705				}
 706				continue;
 707			}
 708
 709			/* both bits in buddy2 must be 1 */
 710			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
 711			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
 712
 713			for (j = 0; j < (1 << order); j++) {
 714				k = (i * (1 << order)) + j;
 715				MB_CHECK_ASSERT(
 716					!mb_test_bit(k, e4b->bd_bitmap));
 717			}
 718			count++;
 719		}
 720		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
 721		order--;
 722	}
 723
 724	fstart = -1;
 725	buddy = mb_find_buddy(e4b, 0, &max);
 726	for (i = 0; i < max; i++) {
 727		if (!mb_test_bit(i, buddy)) {
 728			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
 729			if (fstart == -1) {
 730				fragments++;
 731				fstart = i;
 732			}
 733			continue;
 734		}
 735		fstart = -1;
 736		/* check used bits only */
 737		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
 738			buddy2 = mb_find_buddy(e4b, j, &max2);
 739			k = i >> j;
 740			MB_CHECK_ASSERT(k < max2);
 741			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
 742		}
 743	}
 744	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
 745	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
 746
 747	grp = ext4_get_group_info(sb, e4b->bd_group);
 748	list_for_each(cur, &grp->bb_prealloc_list) {
 749		ext4_group_t groupnr;
 750		struct ext4_prealloc_space *pa;
 751		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
 752		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
 753		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
 754		for (i = 0; i < pa->pa_len; i++)
 755			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
 756	}
 757	return 0;
 758}
 759#undef MB_CHECK_ASSERT
 760#define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
 761					__FILE__, __func__, __LINE__)
 762#else
 763#define mb_check_buddy(e4b)
 764#endif
 765
 766/*
 767 * Divide blocks started from @first with length @len into
 768 * smaller chunks with power of 2 blocks.
 769 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
 770 * then increase bb_counters[] for corresponded chunk size.
 771 */
 772static void ext4_mb_mark_free_simple(struct super_block *sb,
 773				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
 774					struct ext4_group_info *grp)
 775{
 776	struct ext4_sb_info *sbi = EXT4_SB(sb);
 777	ext4_grpblk_t min;
 778	ext4_grpblk_t max;
 779	ext4_grpblk_t chunk;
 780	unsigned int border;
 781
 782	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
 783
 784	border = 2 << sb->s_blocksize_bits;
 785
 786	while (len > 0) {
 787		/* find how many blocks can be covered since this position */
 788		max = ffs(first | border) - 1;
 789
 790		/* find how many blocks of power 2 we need to mark */
 791		min = fls(len) - 1;
 792
 793		if (max < min)
 794			min = max;
 795		chunk = 1 << min;
 796
 797		/* mark multiblock chunks only */
 798		grp->bb_counters[min]++;
 799		if (min > 0)
 800			mb_clear_bit(first >> min,
 801				     buddy + sbi->s_mb_offsets[min]);
 802
 803		len -= chunk;
 804		first += chunk;
 805	}
 806}
 807
 808static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
 809{
 810	int order;
 811
 812	/*
 813	 * We don't bother with a special lists groups with only 1 block free
 814	 * extents and for completely empty groups.
 815	 */
 816	order = fls(len) - 2;
 817	if (order < 0)
 818		return 0;
 819	if (order == MB_NUM_ORDERS(sb))
 820		order--;
 821	return order;
 822}
 823
 824/* Move group to appropriate avg_fragment_size list */
 825static void
 826mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
 827{
 828	struct ext4_sb_info *sbi = EXT4_SB(sb);
 829	int new_order;
 830
 831	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
 832		return;
 833
 834	new_order = mb_avg_fragment_size_order(sb,
 835					grp->bb_free / grp->bb_fragments);
 836	if (new_order == grp->bb_avg_fragment_size_order)
 837		return;
 838
 839	if (grp->bb_avg_fragment_size_order != -1) {
 840		write_lock(&sbi->s_mb_avg_fragment_size_locks[
 841					grp->bb_avg_fragment_size_order]);
 842		list_del(&grp->bb_avg_fragment_size_node);
 843		write_unlock(&sbi->s_mb_avg_fragment_size_locks[
 844					grp->bb_avg_fragment_size_order]);
 845	}
 846	grp->bb_avg_fragment_size_order = new_order;
 847	write_lock(&sbi->s_mb_avg_fragment_size_locks[
 848					grp->bb_avg_fragment_size_order]);
 849	list_add_tail(&grp->bb_avg_fragment_size_node,
 850		&sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
 851	write_unlock(&sbi->s_mb_avg_fragment_size_locks[
 852					grp->bb_avg_fragment_size_order]);
 853}
 854
 855/*
 856 * Choose next group by traversing largest_free_order lists. Updates *new_cr if
 857 * cr level needs an update.
 858 */
 859static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
 860			int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
 861{
 862	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 863	struct ext4_group_info *iter, *grp;
 864	int i;
 865
 866	if (ac->ac_status == AC_STATUS_FOUND)
 867		return;
 868
 869	if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED))
 870		atomic_inc(&sbi->s_bal_cr0_bad_suggestions);
 871
 872	grp = NULL;
 873	for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
 874		if (list_empty(&sbi->s_mb_largest_free_orders[i]))
 875			continue;
 876		read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
 877		if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
 878			read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
 879			continue;
 880		}
 881		grp = NULL;
 882		list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
 883				    bb_largest_free_order_node) {
 884			if (sbi->s_mb_stats)
 885				atomic64_inc(&sbi->s_bal_cX_groups_considered[0]);
 886			if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) {
 887				grp = iter;
 888				break;
 889			}
 890		}
 891		read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
 892		if (grp)
 893			break;
 894	}
 895
 896	if (!grp) {
 897		/* Increment cr and search again */
 898		*new_cr = 1;
 899	} else {
 900		*group = grp->bb_group;
 901		ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
 902	}
 903}
 904
 905/*
 906 * Choose next group by traversing average fragment size list of suitable
 907 * order. Updates *new_cr if cr level needs an update.
 908 */
 909static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
 910		int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
 911{
 912	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 913	struct ext4_group_info *grp = NULL, *iter;
 914	int i;
 915
 916	if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
 917		if (sbi->s_mb_stats)
 918			atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
 919	}
 920
 921	for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
 922	     i < MB_NUM_ORDERS(ac->ac_sb); i++) {
 923		if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
 924			continue;
 925		read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
 926		if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
 927			read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
 928			continue;
 929		}
 930		list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
 931				    bb_avg_fragment_size_node) {
 932			if (sbi->s_mb_stats)
 933				atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
 934			if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) {
 935				grp = iter;
 936				break;
 937			}
 938		}
 939		read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
 940		if (grp)
 941			break;
 942	}
 943
 944	if (grp) {
 945		*group = grp->bb_group;
 946		ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
 947	} else {
 948		*new_cr = 2;
 949	}
 950}
 951
 952static inline int should_optimize_scan(struct ext4_allocation_context *ac)
 953{
 954	if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
 955		return 0;
 956	if (ac->ac_criteria >= 2)
 957		return 0;
 958	if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
 959		return 0;
 960	return 1;
 961}
 962
 963/*
 964 * Return next linear group for allocation. If linear traversal should not be
 965 * performed, this function just returns the same group
 966 */
 967static int
 968next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
 969{
 970	if (!should_optimize_scan(ac))
 971		goto inc_and_return;
 972
 973	if (ac->ac_groups_linear_remaining) {
 974		ac->ac_groups_linear_remaining--;
 975		goto inc_and_return;
 976	}
 977
 978	return group;
 979inc_and_return:
 980	/*
 981	 * Artificially restricted ngroups for non-extent
 982	 * files makes group > ngroups possible on first loop.
 983	 */
 984	return group + 1 >= ngroups ? 0 : group + 1;
 985}
 986
 987/*
 988 * ext4_mb_choose_next_group: choose next group for allocation.
 989 *
 990 * @ac        Allocation Context
 991 * @new_cr    This is an output parameter. If the there is no good group
 992 *            available at current CR level, this field is updated to indicate
 993 *            the new cr level that should be used.
 994 * @group     This is an input / output parameter. As an input it indicates the
 995 *            next group that the allocator intends to use for allocation. As
 996 *            output, this field indicates the next group that should be used as
 997 *            determined by the optimization functions.
 998 * @ngroups   Total number of groups
 999 */
1000static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1001		int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1002{
1003	*new_cr = ac->ac_criteria;
1004
1005	if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1006		*group = next_linear_group(ac, *group, ngroups);
1007		return;
1008	}
1009
1010	if (*new_cr == 0) {
1011		ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
1012	} else if (*new_cr == 1) {
1013		ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups);
1014	} else {
1015		/*
1016		 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1017		 * bb_free. But until that happens, we should never come here.
1018		 */
1019		WARN_ON(1);
1020	}
1021}
1022
1023/*
1024 * Cache the order of the largest free extent we have available in this block
1025 * group.
1026 */
1027static void
1028mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1029{
1030	struct ext4_sb_info *sbi = EXT4_SB(sb);
1031	int i;
 
 
 
1032
1033	for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1034		if (grp->bb_counters[i] > 0)
 
 
1035			break;
1036	/* No need to move between order lists? */
1037	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1038	    i == grp->bb_largest_free_order) {
1039		grp->bb_largest_free_order = i;
1040		return;
1041	}
1042
1043	if (grp->bb_largest_free_order >= 0) {
1044		write_lock(&sbi->s_mb_largest_free_orders_locks[
1045					      grp->bb_largest_free_order]);
1046		list_del_init(&grp->bb_largest_free_order_node);
1047		write_unlock(&sbi->s_mb_largest_free_orders_locks[
1048					      grp->bb_largest_free_order]);
1049	}
1050	grp->bb_largest_free_order = i;
1051	if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1052		write_lock(&sbi->s_mb_largest_free_orders_locks[
1053					      grp->bb_largest_free_order]);
1054		list_add_tail(&grp->bb_largest_free_order_node,
1055		      &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1056		write_unlock(&sbi->s_mb_largest_free_orders_locks[
1057					      grp->bb_largest_free_order]);
1058	}
1059}
1060
1061static noinline_for_stack
1062void ext4_mb_generate_buddy(struct super_block *sb,
1063				void *buddy, void *bitmap, ext4_group_t group)
1064{
1065	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1066	struct ext4_sb_info *sbi = EXT4_SB(sb);
1067	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1068	ext4_grpblk_t i = 0;
1069	ext4_grpblk_t first;
1070	ext4_grpblk_t len;
1071	unsigned free = 0;
1072	unsigned fragments = 0;
1073	unsigned long long period = get_cycles();
1074
1075	/* initialize buddy from bitmap which is aggregation
1076	 * of on-disk bitmap and preallocations */
1077	i = mb_find_next_zero_bit(bitmap, max, 0);
1078	grp->bb_first_free = i;
1079	while (i < max) {
1080		fragments++;
1081		first = i;
1082		i = mb_find_next_bit(bitmap, max, i);
1083		len = i - first;
1084		free += len;
1085		if (len > 1)
1086			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1087		else
1088			grp->bb_counters[0]++;
1089		if (i < max)
1090			i = mb_find_next_zero_bit(bitmap, max, i);
1091	}
1092	grp->bb_fragments = fragments;
1093
1094	if (free != grp->bb_free) {
1095		ext4_grp_locked_error(sb, group, 0, 0,
1096				      "block bitmap and bg descriptor "
1097				      "inconsistent: %u vs %u free clusters",
1098				      free, grp->bb_free);
1099		/*
1100		 * If we intend to continue, we consider group descriptor
1101		 * corrupt and update bb_free using bitmap value
1102		 */
1103		grp->bb_free = free;
1104		ext4_mark_group_bitmap_corrupted(sb, group,
1105					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1106	}
1107	mb_set_largest_free_order(sb, grp);
1108	mb_update_avg_fragment_size(sb, grp);
1109
1110	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1111
1112	period = get_cycles() - period;
1113	atomic_inc(&sbi->s_mb_buddies_generated);
1114	atomic64_add(period, &sbi->s_mb_generation_time);
 
 
1115}
1116
1117/* The buddy information is attached the buddy cache inode
1118 * for convenience. The information regarding each group
1119 * is loaded via ext4_mb_load_buddy. The information involve
1120 * block bitmap and buddy information. The information are
1121 * stored in the inode as
1122 *
1123 * {                        page                        }
1124 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1125 *
1126 *
1127 * one block each for bitmap and buddy information.
1128 * So for each group we take up 2 blocks. A page can
1129 * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
1130 * So it can have information regarding groups_per_page which
1131 * is blocks_per_page/2
1132 *
1133 * Locking note:  This routine takes the block group lock of all groups
1134 * for this page; do not hold this lock when calling this routine!
1135 */
1136
1137static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1138{
1139	ext4_group_t ngroups;
1140	int blocksize;
1141	int blocks_per_page;
1142	int groups_per_page;
1143	int err = 0;
1144	int i;
1145	ext4_group_t first_group, group;
1146	int first_block;
1147	struct super_block *sb;
1148	struct buffer_head *bhs;
1149	struct buffer_head **bh = NULL;
1150	struct inode *inode;
1151	char *data;
1152	char *bitmap;
1153	struct ext4_group_info *grinfo;
1154
 
 
1155	inode = page->mapping->host;
1156	sb = inode->i_sb;
1157	ngroups = ext4_get_groups_count(sb);
1158	blocksize = i_blocksize(inode);
1159	blocks_per_page = PAGE_SIZE / blocksize;
1160
1161	mb_debug(sb, "init page %lu\n", page->index);
1162
1163	groups_per_page = blocks_per_page >> 1;
1164	if (groups_per_page == 0)
1165		groups_per_page = 1;
1166
1167	/* allocate buffer_heads to read bitmaps */
1168	if (groups_per_page > 1) {
 
1169		i = sizeof(struct buffer_head *) * groups_per_page;
1170		bh = kzalloc(i, gfp);
1171		if (bh == NULL) {
1172			err = -ENOMEM;
1173			goto out;
1174		}
1175	} else
1176		bh = &bhs;
1177
1178	first_group = page->index * blocks_per_page / 2;
1179
1180	/* read all groups the page covers into the cache */
1181	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1182		if (group >= ngroups)
 
 
1183			break;
1184
1185		grinfo = ext4_get_group_info(sb, group);
1186		/*
1187		 * If page is uptodate then we came here after online resize
1188		 * which added some new uninitialized group info structs, so
1189		 * we must skip all initialized uptodate buddies on the page,
1190		 * which may be currently in use by an allocating task.
1191		 */
1192		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1193			bh[i] = NULL;
1194			continue;
1195		}
1196		bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1197		if (IS_ERR(bh[i])) {
1198			err = PTR_ERR(bh[i]);
1199			bh[i] = NULL;
 
 
 
 
 
1200			goto out;
 
 
 
 
 
 
 
 
1201		}
1202		mb_debug(sb, "read bitmap for group %u\n", group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1203	}
1204
1205	/* wait for I/O completion */
1206	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1207		int err2;
 
1208
1209		if (!bh[i])
1210			continue;
1211		err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1212		if (!err)
1213			err = err2;
1214	}
1215
 
1216	first_block = page->index * blocks_per_page;
1217	for (i = 0; i < blocks_per_page; i++) {
 
 
1218		group = (first_block + i) >> 1;
1219		if (group >= ngroups)
1220			break;
1221
1222		if (!bh[group - first_group])
1223			/* skip initialized uptodate buddy */
1224			continue;
1225
1226		if (!buffer_verified(bh[group - first_group]))
1227			/* Skip faulty bitmaps */
1228			continue;
1229		err = 0;
1230
1231		/*
1232		 * data carry information regarding this
1233		 * particular group in the format specified
1234		 * above
1235		 *
1236		 */
1237		data = page_address(page) + (i * blocksize);
1238		bitmap = bh[group - first_group]->b_data;
1239
1240		/*
1241		 * We place the buddy block and bitmap block
1242		 * close together
1243		 */
1244		if ((first_block + i) & 1) {
1245			/* this is block of buddy */
1246			BUG_ON(incore == NULL);
1247			mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1248				group, page->index, i * blocksize);
1249			trace_ext4_mb_buddy_bitmap_load(sb, group);
1250			grinfo = ext4_get_group_info(sb, group);
1251			grinfo->bb_fragments = 0;
1252			memset(grinfo->bb_counters, 0,
1253			       sizeof(*grinfo->bb_counters) *
1254			       (MB_NUM_ORDERS(sb)));
1255			/*
1256			 * incore got set to the group block bitmap below
1257			 */
1258			ext4_lock_group(sb, group);
1259			/* init the buddy */
1260			memset(data, 0xff, blocksize);
1261			ext4_mb_generate_buddy(sb, data, incore, group);
1262			ext4_unlock_group(sb, group);
1263			incore = NULL;
1264		} else {
1265			/* this is block of bitmap */
1266			BUG_ON(incore != NULL);
1267			mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1268				group, page->index, i * blocksize);
1269			trace_ext4_mb_bitmap_load(sb, group);
1270
1271			/* see comments in ext4_mb_put_pa() */
1272			ext4_lock_group(sb, group);
1273			memcpy(data, bitmap, blocksize);
1274
1275			/* mark all preallocated blks used in in-core bitmap */
1276			ext4_mb_generate_from_pa(sb, data, group);
1277			ext4_mb_generate_from_freelist(sb, data, group);
1278			ext4_unlock_group(sb, group);
1279
1280			/* set incore so that the buddy information can be
1281			 * generated using this
1282			 */
1283			incore = data;
1284		}
1285	}
1286	SetPageUptodate(page);
1287
1288out:
1289	if (bh) {
1290		for (i = 0; i < groups_per_page; i++)
1291			brelse(bh[i]);
1292		if (bh != &bhs)
1293			kfree(bh);
1294	}
1295	return err;
1296}
1297
1298/*
1299 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1300 * on the same buddy page doesn't happen whild holding the buddy page lock.
1301 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1302 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1303 */
1304static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1305		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1306{
1307	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1308	int block, pnum, poff;
1309	int blocks_per_page;
1310	struct page *page;
1311
1312	e4b->bd_buddy_page = NULL;
1313	e4b->bd_bitmap_page = NULL;
1314
1315	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1316	/*
1317	 * the buddy cache inode stores the block bitmap
1318	 * and buddy information in consecutive blocks.
1319	 * So for each group we need two blocks.
1320	 */
1321	block = group * 2;
1322	pnum = block / blocks_per_page;
1323	poff = block % blocks_per_page;
1324	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1325	if (!page)
1326		return -ENOMEM;
1327	BUG_ON(page->mapping != inode->i_mapping);
1328	e4b->bd_bitmap_page = page;
1329	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1330
1331	if (blocks_per_page >= 2) {
1332		/* buddy and bitmap are on the same page */
1333		return 0;
1334	}
1335
1336	block++;
1337	pnum = block / blocks_per_page;
1338	page = find_or_create_page(inode->i_mapping, pnum, gfp);
 
1339	if (!page)
1340		return -ENOMEM;
1341	BUG_ON(page->mapping != inode->i_mapping);
1342	e4b->bd_buddy_page = page;
1343	return 0;
1344}
1345
1346static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1347{
1348	if (e4b->bd_bitmap_page) {
1349		unlock_page(e4b->bd_bitmap_page);
1350		put_page(e4b->bd_bitmap_page);
1351	}
1352	if (e4b->bd_buddy_page) {
1353		unlock_page(e4b->bd_buddy_page);
1354		put_page(e4b->bd_buddy_page);
1355	}
1356}
1357
1358/*
1359 * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1360 * block group lock of all groups for this page; do not hold the BG lock when
1361 * calling this routine!
1362 */
1363static noinline_for_stack
1364int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1365{
1366
1367	struct ext4_group_info *this_grp;
1368	struct ext4_buddy e4b;
1369	struct page *page;
1370	int ret = 0;
1371
1372	might_sleep();
1373	mb_debug(sb, "init group %u\n", group);
1374	this_grp = ext4_get_group_info(sb, group);
1375	/*
1376	 * This ensures that we don't reinit the buddy cache
1377	 * page which map to the group from which we are already
1378	 * allocating. If we are looking at the buddy cache we would
1379	 * have taken a reference using ext4_mb_load_buddy and that
1380	 * would have pinned buddy page to page cache.
1381	 * The call to ext4_mb_get_buddy_page_lock will mark the
1382	 * page accessed.
1383	 */
1384	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1385	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1386		/*
1387		 * somebody initialized the group
1388		 * return without doing anything
1389		 */
1390		goto err;
1391	}
1392
1393	page = e4b.bd_bitmap_page;
1394	ret = ext4_mb_init_cache(page, NULL, gfp);
1395	if (ret)
1396		goto err;
1397	if (!PageUptodate(page)) {
1398		ret = -EIO;
1399		goto err;
1400	}
 
1401
1402	if (e4b.bd_buddy_page == NULL) {
1403		/*
1404		 * If both the bitmap and buddy are in
1405		 * the same page we don't need to force
1406		 * init the buddy
1407		 */
1408		ret = 0;
1409		goto err;
1410	}
1411	/* init buddy cache */
1412	page = e4b.bd_buddy_page;
1413	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1414	if (ret)
1415		goto err;
1416	if (!PageUptodate(page)) {
1417		ret = -EIO;
1418		goto err;
1419	}
 
1420err:
1421	ext4_mb_put_buddy_page_lock(&e4b);
1422	return ret;
1423}
1424
1425/*
1426 * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1427 * block group lock of all groups for this page; do not hold the BG lock when
1428 * calling this routine!
1429 */
1430static noinline_for_stack int
1431ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1432		       struct ext4_buddy *e4b, gfp_t gfp)
1433{
1434	int blocks_per_page;
1435	int block;
1436	int pnum;
1437	int poff;
1438	struct page *page;
1439	int ret;
1440	struct ext4_group_info *grp;
1441	struct ext4_sb_info *sbi = EXT4_SB(sb);
1442	struct inode *inode = sbi->s_buddy_cache;
1443
1444	might_sleep();
1445	mb_debug(sb, "load group %u\n", group);
1446
1447	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1448	grp = ext4_get_group_info(sb, group);
1449
1450	e4b->bd_blkbits = sb->s_blocksize_bits;
1451	e4b->bd_info = grp;
1452	e4b->bd_sb = sb;
1453	e4b->bd_group = group;
1454	e4b->bd_buddy_page = NULL;
1455	e4b->bd_bitmap_page = NULL;
1456
1457	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1458		/*
1459		 * we need full data about the group
1460		 * to make a good selection
1461		 */
1462		ret = ext4_mb_init_group(sb, group, gfp);
1463		if (ret)
1464			return ret;
1465	}
1466
1467	/*
1468	 * the buddy cache inode stores the block bitmap
1469	 * and buddy information in consecutive blocks.
1470	 * So for each group we need two blocks.
1471	 */
1472	block = group * 2;
1473	pnum = block / blocks_per_page;
1474	poff = block % blocks_per_page;
1475
1476	/* we could use find_or_create_page(), but it locks page
1477	 * what we'd like to avoid in fast path ... */
1478	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1479	if (page == NULL || !PageUptodate(page)) {
1480		if (page)
1481			/*
1482			 * drop the page reference and try
1483			 * to get the page with lock. If we
1484			 * are not uptodate that implies
1485			 * somebody just created the page but
1486			 * is yet to initialize the same. So
1487			 * wait for it to initialize.
1488			 */
1489			put_page(page);
1490		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1491		if (page) {
1492			BUG_ON(page->mapping != inode->i_mapping);
1493			if (!PageUptodate(page)) {
1494				ret = ext4_mb_init_cache(page, NULL, gfp);
1495				if (ret) {
1496					unlock_page(page);
1497					goto err;
1498				}
1499				mb_cmp_bitmaps(e4b, page_address(page) +
1500					       (poff * sb->s_blocksize));
1501			}
1502			unlock_page(page);
1503		}
1504	}
1505	if (page == NULL) {
1506		ret = -ENOMEM;
1507		goto err;
1508	}
1509	if (!PageUptodate(page)) {
1510		ret = -EIO;
1511		goto err;
1512	}
1513
1514	/* Pages marked accessed already */
1515	e4b->bd_bitmap_page = page;
1516	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
 
1517
1518	block++;
1519	pnum = block / blocks_per_page;
1520	poff = block % blocks_per_page;
1521
1522	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1523	if (page == NULL || !PageUptodate(page)) {
1524		if (page)
1525			put_page(page);
1526		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1527		if (page) {
1528			BUG_ON(page->mapping != inode->i_mapping);
1529			if (!PageUptodate(page)) {
1530				ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1531							 gfp);
1532				if (ret) {
1533					unlock_page(page);
1534					goto err;
1535				}
1536			}
1537			unlock_page(page);
1538		}
1539	}
1540	if (page == NULL) {
1541		ret = -ENOMEM;
1542		goto err;
1543	}
1544	if (!PageUptodate(page)) {
1545		ret = -EIO;
1546		goto err;
1547	}
1548
1549	/* Pages marked accessed already */
1550	e4b->bd_buddy_page = page;
1551	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
 
 
 
 
1552
1553	return 0;
1554
1555err:
1556	if (page)
1557		put_page(page);
1558	if (e4b->bd_bitmap_page)
1559		put_page(e4b->bd_bitmap_page);
1560	if (e4b->bd_buddy_page)
1561		put_page(e4b->bd_buddy_page);
1562	e4b->bd_buddy = NULL;
1563	e4b->bd_bitmap = NULL;
1564	return ret;
1565}
1566
1567static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1568			      struct ext4_buddy *e4b)
1569{
1570	return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1571}
1572
1573static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1574{
1575	if (e4b->bd_bitmap_page)
1576		put_page(e4b->bd_bitmap_page);
1577	if (e4b->bd_buddy_page)
1578		put_page(e4b->bd_buddy_page);
1579}
1580
1581
1582static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1583{
1584	int order = 1, max;
1585	void *bb;
1586
1587	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1588	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1589
 
1590	while (order <= e4b->bd_blkbits + 1) {
1591		bb = mb_find_buddy(e4b, order, &max);
1592		if (!mb_test_bit(block >> order, bb)) {
1593			/* this block is part of buddy of order 'order' */
1594			return order;
1595		}
 
1596		order++;
1597	}
1598	return 0;
1599}
1600
1601static void mb_clear_bits(void *bm, int cur, int len)
1602{
1603	__u32 *addr;
1604
1605	len = cur + len;
1606	while (cur < len) {
1607		if ((cur & 31) == 0 && (len - cur) >= 32) {
1608			/* fast path: clear whole word at once */
1609			addr = bm + (cur >> 3);
1610			*addr = 0;
1611			cur += 32;
1612			continue;
1613		}
1614		mb_clear_bit(cur, bm);
1615		cur++;
1616	}
1617}
1618
1619/* clear bits in given range
1620 * will return first found zero bit if any, -1 otherwise
1621 */
1622static int mb_test_and_clear_bits(void *bm, int cur, int len)
1623{
1624	__u32 *addr;
1625	int zero_bit = -1;
1626
1627	len = cur + len;
1628	while (cur < len) {
1629		if ((cur & 31) == 0 && (len - cur) >= 32) {
1630			/* fast path: clear whole word at once */
1631			addr = bm + (cur >> 3);
1632			if (*addr != (__u32)(-1) && zero_bit == -1)
1633				zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1634			*addr = 0;
1635			cur += 32;
1636			continue;
1637		}
1638		if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1639			zero_bit = cur;
1640		cur++;
1641	}
1642
1643	return zero_bit;
1644}
1645
1646void mb_set_bits(void *bm, int cur, int len)
1647{
1648	__u32 *addr;
1649
1650	len = cur + len;
1651	while (cur < len) {
1652		if ((cur & 31) == 0 && (len - cur) >= 32) {
1653			/* fast path: set whole word at once */
1654			addr = bm + (cur >> 3);
1655			*addr = 0xffffffff;
1656			cur += 32;
1657			continue;
1658		}
1659		mb_set_bit(cur, bm);
1660		cur++;
1661	}
1662}
1663
1664static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1665{
1666	if (mb_test_bit(*bit + side, bitmap)) {
1667		mb_clear_bit(*bit, bitmap);
1668		(*bit) -= side;
1669		return 1;
1670	}
1671	else {
1672		(*bit) += side;
1673		mb_set_bit(*bit, bitmap);
1674		return -1;
1675	}
1676}
1677
1678static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1679{
1680	int max;
1681	int order = 1;
1682	void *buddy = mb_find_buddy(e4b, order, &max);
1683
1684	while (buddy) {
1685		void *buddy2;
1686
1687		/* Bits in range [first; last] are known to be set since
1688		 * corresponding blocks were allocated. Bits in range
1689		 * (first; last) will stay set because they form buddies on
1690		 * upper layer. We just deal with borders if they don't
1691		 * align with upper layer and then go up.
1692		 * Releasing entire group is all about clearing
1693		 * single bit of highest order buddy.
1694		 */
1695
1696		/* Example:
1697		 * ---------------------------------
1698		 * |   1   |   1   |   1   |   1   |
1699		 * ---------------------------------
1700		 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1701		 * ---------------------------------
1702		 *   0   1   2   3   4   5   6   7
1703		 *      \_____________________/
1704		 *
1705		 * Neither [1] nor [6] is aligned to above layer.
1706		 * Left neighbour [0] is free, so mark it busy,
1707		 * decrease bb_counters and extend range to
1708		 * [0; 6]
1709		 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1710		 * mark [6] free, increase bb_counters and shrink range to
1711		 * [0; 5].
1712		 * Then shift range to [0; 2], go up and do the same.
1713		 */
1714
1715
1716		if (first & 1)
1717			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1718		if (!(last & 1))
1719			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1720		if (first > last)
1721			break;
1722		order++;
1723
1724		if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1725			mb_clear_bits(buddy, first, last - first + 1);
1726			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1727			break;
1728		}
1729		first >>= 1;
1730		last >>= 1;
1731		buddy = buddy2;
1732	}
1733}
1734
1735static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1736			   int first, int count)
1737{
1738	int left_is_free = 0;
1739	int right_is_free = 0;
1740	int block;
1741	int last = first + count - 1;
 
1742	struct super_block *sb = e4b->bd_sb;
1743
1744	if (WARN_ON(count == 0))
1745		return;
1746	BUG_ON(last >= (sb->s_blocksize << 3));
1747	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1748	/* Don't bother if the block group is corrupt. */
1749	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1750		return;
1751
1752	mb_check_buddy(e4b);
1753	mb_free_blocks_double(inode, e4b, first, count);
1754
1755	this_cpu_inc(discard_pa_seq);
1756	e4b->bd_info->bb_free += count;
1757	if (first < e4b->bd_info->bb_first_free)
1758		e4b->bd_info->bb_first_free = first;
1759
1760	/* access memory sequentially: check left neighbour,
1761	 * clear range and then check right neighbour
1762	 */
1763	if (first != 0)
1764		left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1765	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1766	if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1767		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1768
1769	if (unlikely(block != -1)) {
1770		struct ext4_sb_info *sbi = EXT4_SB(sb);
1771		ext4_fsblk_t blocknr;
1772
1773		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1774		blocknr += EXT4_C2B(sbi, block);
1775		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
 
 
 
 
 
 
1776			ext4_grp_locked_error(sb, e4b->bd_group,
1777					      inode ? inode->i_ino : 0,
1778					      blocknr,
1779					      "freeing already freed block (bit %u); block bitmap corrupt.",
1780					      block);
1781			ext4_mark_group_bitmap_corrupted(
1782				sb, e4b->bd_group,
1783				EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1784		}
1785		goto done;
1786	}
 
 
 
 
 
 
 
 
 
1787
1788	/* let's maintain fragments counter */
1789	if (left_is_free && right_is_free)
1790		e4b->bd_info->bb_fragments--;
1791	else if (!left_is_free && !right_is_free)
1792		e4b->bd_info->bb_fragments++;
1793
1794	/* buddy[0] == bd_bitmap is a special case, so handle
1795	 * it right away and let mb_buddy_mark_free stay free of
1796	 * zero order checks.
1797	 * Check if neighbours are to be coaleasced,
1798	 * adjust bitmap bb_counters and borders appropriately.
1799	 */
1800	if (first & 1) {
1801		first += !left_is_free;
1802		e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1803	}
1804	if (!(last & 1)) {
1805		last -= !right_is_free;
1806		e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1807	}
1808
1809	if (first <= last)
1810		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
 
 
 
 
 
 
1811
1812done:
 
 
 
 
 
 
 
1813	mb_set_largest_free_order(sb, e4b->bd_info);
1814	mb_update_avg_fragment_size(sb, e4b->bd_info);
1815	mb_check_buddy(e4b);
1816}
1817
1818static int mb_find_extent(struct ext4_buddy *e4b, int block,
1819				int needed, struct ext4_free_extent *ex)
1820{
1821	int next = block;
1822	int max, order;
 
1823	void *buddy;
1824
1825	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1826	BUG_ON(ex == NULL);
1827
1828	buddy = mb_find_buddy(e4b, 0, &max);
1829	BUG_ON(buddy == NULL);
1830	BUG_ON(block >= max);
1831	if (mb_test_bit(block, buddy)) {
1832		ex->fe_len = 0;
1833		ex->fe_start = 0;
1834		ex->fe_group = 0;
1835		return 0;
1836	}
1837
1838	/* find actual order */
1839	order = mb_find_order_for_block(e4b, block);
1840	block = block >> order;
 
 
 
1841
1842	ex->fe_len = 1 << order;
1843	ex->fe_start = block << order;
1844	ex->fe_group = e4b->bd_group;
1845
1846	/* calc difference from given start */
1847	next = next - ex->fe_start;
1848	ex->fe_len -= next;
1849	ex->fe_start += next;
1850
1851	while (needed > ex->fe_len &&
1852	       mb_find_buddy(e4b, order, &max)) {
1853
1854		if (block + 1 >= max)
1855			break;
1856
1857		next = (block + 1) * (1 << order);
1858		if (mb_test_bit(next, e4b->bd_bitmap))
1859			break;
1860
1861		order = mb_find_order_for_block(e4b, next);
1862
 
1863		block = next >> order;
1864		ex->fe_len += 1 << order;
1865	}
1866
1867	if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1868		/* Should never happen! (but apparently sometimes does?!?) */
1869		WARN_ON(1);
1870		ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1871			"corruption or bug in mb_find_extent "
1872			"block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1873			block, order, needed, ex->fe_group, ex->fe_start,
1874			ex->fe_len, ex->fe_logical);
1875		ex->fe_len = 0;
1876		ex->fe_start = 0;
1877		ex->fe_group = 0;
1878	}
1879	return ex->fe_len;
1880}
1881
1882static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1883{
1884	int ord;
1885	int mlen = 0;
1886	int max = 0;
1887	int cur;
1888	int start = ex->fe_start;
1889	int len = ex->fe_len;
1890	unsigned ret = 0;
1891	int len0 = len;
1892	void *buddy;
1893	bool split = false;
1894
1895	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1896	BUG_ON(e4b->bd_group != ex->fe_group);
1897	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1898	mb_check_buddy(e4b);
1899	mb_mark_used_double(e4b, start, len);
1900
1901	this_cpu_inc(discard_pa_seq);
1902	e4b->bd_info->bb_free -= len;
1903	if (e4b->bd_info->bb_first_free == start)
1904		e4b->bd_info->bb_first_free += len;
1905
1906	/* let's maintain fragments counter */
1907	if (start != 0)
1908		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1909	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1910		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1911	if (mlen && max)
1912		e4b->bd_info->bb_fragments++;
1913	else if (!mlen && !max)
1914		e4b->bd_info->bb_fragments--;
1915
1916	/* let's maintain buddy itself */
1917	while (len) {
1918		if (!split)
1919			ord = mb_find_order_for_block(e4b, start);
1920
1921		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1922			/* the whole chunk may be allocated at once! */
1923			mlen = 1 << ord;
1924			if (!split)
1925				buddy = mb_find_buddy(e4b, ord, &max);
1926			else
1927				split = false;
1928			BUG_ON((start >> ord) >= max);
1929			mb_set_bit(start >> ord, buddy);
1930			e4b->bd_info->bb_counters[ord]--;
1931			start += mlen;
1932			len -= mlen;
1933			BUG_ON(len < 0);
1934			continue;
1935		}
1936
1937		/* store for history */
1938		if (ret == 0)
1939			ret = len | (ord << 16);
1940
1941		/* we have to split large buddy */
1942		BUG_ON(ord <= 0);
1943		buddy = mb_find_buddy(e4b, ord, &max);
1944		mb_set_bit(start >> ord, buddy);
1945		e4b->bd_info->bb_counters[ord]--;
1946
1947		ord--;
1948		cur = (start >> ord) & ~1U;
1949		buddy = mb_find_buddy(e4b, ord, &max);
1950		mb_clear_bit(cur, buddy);
1951		mb_clear_bit(cur + 1, buddy);
1952		e4b->bd_info->bb_counters[ord]++;
1953		e4b->bd_info->bb_counters[ord]++;
1954		split = true;
1955	}
1956	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1957
1958	mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
1959	mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1960	mb_check_buddy(e4b);
1961
1962	return ret;
1963}
1964
1965/*
1966 * Must be called under group lock!
1967 */
1968static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1969					struct ext4_buddy *e4b)
1970{
1971	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1972	int ret;
1973
1974	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1975	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1976
1977	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1978	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1979	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1980
1981	/* preallocation can change ac_b_ex, thus we store actually
1982	 * allocated blocks for history */
1983	ac->ac_f_ex = ac->ac_b_ex;
1984
1985	ac->ac_status = AC_STATUS_FOUND;
1986	ac->ac_tail = ret & 0xffff;
1987	ac->ac_buddy = ret >> 16;
1988
1989	/*
1990	 * take the page reference. We want the page to be pinned
1991	 * so that we don't get a ext4_mb_init_cache_call for this
1992	 * group until we update the bitmap. That would mean we
1993	 * double allocate blocks. The reference is dropped
1994	 * in ext4_mb_release_context
1995	 */
1996	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1997	get_page(ac->ac_bitmap_page);
1998	ac->ac_buddy_page = e4b->bd_buddy_page;
1999	get_page(ac->ac_buddy_page);
2000	/* store last allocated for subsequent stream allocation */
2001	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2002		spin_lock(&sbi->s_md_lock);
2003		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2004		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2005		spin_unlock(&sbi->s_md_lock);
2006	}
2007	/*
2008	 * As we've just preallocated more space than
2009	 * user requested originally, we store allocated
2010	 * space in a special descriptor.
2011	 */
2012	if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2013		ext4_mb_new_preallocation(ac);
2014
2015}
 
 
2016
2017static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2018					struct ext4_buddy *e4b,
2019					int finish_group)
2020{
2021	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2022	struct ext4_free_extent *bex = &ac->ac_b_ex;
2023	struct ext4_free_extent *gex = &ac->ac_g_ex;
2024	struct ext4_free_extent ex;
2025	int max;
2026
2027	if (ac->ac_status == AC_STATUS_FOUND)
2028		return;
2029	/*
2030	 * We don't want to scan for a whole year
2031	 */
2032	if (ac->ac_found > sbi->s_mb_max_to_scan &&
2033			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2034		ac->ac_status = AC_STATUS_BREAK;
2035		return;
2036	}
2037
2038	/*
2039	 * Haven't found good chunk so far, let's continue
2040	 */
2041	if (bex->fe_len < gex->fe_len)
2042		return;
2043
2044	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2045			&& bex->fe_group == e4b->bd_group) {
2046		/* recheck chunk's availability - we don't know
2047		 * when it was found (within this lock-unlock
2048		 * period or not) */
2049		max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
2050		if (max >= gex->fe_len) {
2051			ext4_mb_use_best_found(ac, e4b);
2052			return;
2053		}
2054	}
2055}
2056
2057/*
2058 * The routine checks whether found extent is good enough. If it is,
2059 * then the extent gets marked used and flag is set to the context
2060 * to stop scanning. Otherwise, the extent is compared with the
2061 * previous found extent and if new one is better, then it's stored
2062 * in the context. Later, the best found extent will be used, if
2063 * mballoc can't find good enough extent.
2064 *
2065 * FIXME: real allocation policy is to be designed yet!
2066 */
2067static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2068					struct ext4_free_extent *ex,
2069					struct ext4_buddy *e4b)
2070{
2071	struct ext4_free_extent *bex = &ac->ac_b_ex;
2072	struct ext4_free_extent *gex = &ac->ac_g_ex;
2073
2074	BUG_ON(ex->fe_len <= 0);
2075	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2076	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2077	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2078
2079	ac->ac_found++;
2080
2081	/*
2082	 * The special case - take what you catch first
2083	 */
2084	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2085		*bex = *ex;
2086		ext4_mb_use_best_found(ac, e4b);
2087		return;
2088	}
2089
2090	/*
2091	 * Let's check whether the chuck is good enough
2092	 */
2093	if (ex->fe_len == gex->fe_len) {
2094		*bex = *ex;
2095		ext4_mb_use_best_found(ac, e4b);
2096		return;
2097	}
2098
2099	/*
2100	 * If this is first found extent, just store it in the context
2101	 */
2102	if (bex->fe_len == 0) {
2103		*bex = *ex;
2104		return;
2105	}
2106
2107	/*
2108	 * If new found extent is better, store it in the context
2109	 */
2110	if (bex->fe_len < gex->fe_len) {
2111		/* if the request isn't satisfied, any found extent
2112		 * larger than previous best one is better */
2113		if (ex->fe_len > bex->fe_len)
2114			*bex = *ex;
2115	} else if (ex->fe_len > gex->fe_len) {
2116		/* if the request is satisfied, then we try to find
2117		 * an extent that still satisfy the request, but is
2118		 * smaller than previous one */
2119		if (ex->fe_len < bex->fe_len)
2120			*bex = *ex;
2121	}
2122
2123	ext4_mb_check_limits(ac, e4b, 0);
2124}
2125
2126static noinline_for_stack
2127int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2128					struct ext4_buddy *e4b)
2129{
2130	struct ext4_free_extent ex = ac->ac_b_ex;
2131	ext4_group_t group = ex.fe_group;
2132	int max;
2133	int err;
2134
2135	BUG_ON(ex.fe_len <= 0);
2136	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2137	if (err)
2138		return err;
2139
2140	ext4_lock_group(ac->ac_sb, group);
2141	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2142
2143	if (max > 0) {
2144		ac->ac_b_ex = ex;
2145		ext4_mb_use_best_found(ac, e4b);
2146	}
2147
2148	ext4_unlock_group(ac->ac_sb, group);
2149	ext4_mb_unload_buddy(e4b);
2150
2151	return 0;
2152}
2153
2154static noinline_for_stack
2155int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2156				struct ext4_buddy *e4b)
2157{
2158	ext4_group_t group = ac->ac_g_ex.fe_group;
2159	int max;
2160	int err;
2161	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2162	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2163	struct ext4_free_extent ex;
2164
2165	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
2166		return 0;
2167	if (grp->bb_free == 0)
2168		return 0;
2169
2170	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2171	if (err)
2172		return err;
2173
2174	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
2175		ext4_mb_unload_buddy(e4b);
2176		return 0;
2177	}
2178
2179	ext4_lock_group(ac->ac_sb, group);
2180	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2181			     ac->ac_g_ex.fe_len, &ex);
2182	ex.fe_logical = 0xDEADFA11; /* debug value */
2183
2184	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
2185		ext4_fsblk_t start;
2186
2187		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
2188			ex.fe_start;
2189		/* use do_div to get remainder (would be 64-bit modulo) */
2190		if (do_div(start, sbi->s_stripe) == 0) {
2191			ac->ac_found++;
2192			ac->ac_b_ex = ex;
2193			ext4_mb_use_best_found(ac, e4b);
2194		}
2195	} else if (max >= ac->ac_g_ex.fe_len) {
2196		BUG_ON(ex.fe_len <= 0);
2197		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2198		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2199		ac->ac_found++;
2200		ac->ac_b_ex = ex;
2201		ext4_mb_use_best_found(ac, e4b);
2202	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2203		/* Sometimes, caller may want to merge even small
2204		 * number of blocks to an existing extent */
2205		BUG_ON(ex.fe_len <= 0);
2206		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2207		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2208		ac->ac_found++;
2209		ac->ac_b_ex = ex;
2210		ext4_mb_use_best_found(ac, e4b);
2211	}
2212	ext4_unlock_group(ac->ac_sb, group);
2213	ext4_mb_unload_buddy(e4b);
2214
2215	return 0;
2216}
2217
2218/*
2219 * The routine scans buddy structures (not bitmap!) from given order
2220 * to max order and tries to find big enough chunk to satisfy the req
2221 */
2222static noinline_for_stack
2223void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2224					struct ext4_buddy *e4b)
2225{
2226	struct super_block *sb = ac->ac_sb;
2227	struct ext4_group_info *grp = e4b->bd_info;
2228	void *buddy;
2229	int i;
2230	int k;
2231	int max;
2232
2233	BUG_ON(ac->ac_2order <= 0);
2234	for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2235		if (grp->bb_counters[i] == 0)
2236			continue;
2237
2238		buddy = mb_find_buddy(e4b, i, &max);
2239		BUG_ON(buddy == NULL);
2240
2241		k = mb_find_next_zero_bit(buddy, max, 0);
2242		if (k >= max) {
2243			ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2244				"%d free clusters of order %d. But found 0",
2245				grp->bb_counters[i], i);
2246			ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2247					 e4b->bd_group,
2248					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2249			break;
2250		}
2251		ac->ac_found++;
2252
2253		ac->ac_b_ex.fe_len = 1 << i;
2254		ac->ac_b_ex.fe_start = k << i;
2255		ac->ac_b_ex.fe_group = e4b->bd_group;
2256
2257		ext4_mb_use_best_found(ac, e4b);
2258
2259		BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2260
2261		if (EXT4_SB(sb)->s_mb_stats)
2262			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2263
2264		break;
2265	}
2266}
2267
2268/*
2269 * The routine scans the group and measures all found extents.
2270 * In order to optimize scanning, caller must pass number of
2271 * free blocks in the group, so the routine can know upper limit.
2272 */
2273static noinline_for_stack
2274void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2275					struct ext4_buddy *e4b)
2276{
2277	struct super_block *sb = ac->ac_sb;
2278	void *bitmap = e4b->bd_bitmap;
2279	struct ext4_free_extent ex;
2280	int i;
2281	int free;
2282
2283	free = e4b->bd_info->bb_free;
2284	if (WARN_ON(free <= 0))
2285		return;
2286
2287	i = e4b->bd_info->bb_first_free;
2288
2289	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2290		i = mb_find_next_zero_bit(bitmap,
2291						EXT4_CLUSTERS_PER_GROUP(sb), i);
2292		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2293			/*
2294			 * IF we have corrupt bitmap, we won't find any
2295			 * free blocks even though group info says we
2296			 * have free blocks
2297			 */
2298			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2299					"%d free clusters as per "
2300					"group info. But bitmap says 0",
2301					free);
2302			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2303					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2304			break;
2305		}
2306
2307		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2308		if (WARN_ON(ex.fe_len <= 0))
2309			break;
2310		if (free < ex.fe_len) {
2311			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2312					"%d free clusters as per "
2313					"group info. But got %d blocks",
2314					free, ex.fe_len);
2315			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2316					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2317			/*
2318			 * The number of free blocks differs. This mostly
2319			 * indicate that the bitmap is corrupt. So exit
2320			 * without claiming the space.
2321			 */
2322			break;
2323		}
2324		ex.fe_logical = 0xDEADC0DE; /* debug value */
2325		ext4_mb_measure_extent(ac, &ex, e4b);
2326
2327		i += ex.fe_len;
2328		free -= ex.fe_len;
2329	}
2330
2331	ext4_mb_check_limits(ac, e4b, 1);
2332}
2333
2334/*
2335 * This is a special case for storages like raid5
2336 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2337 */
2338static noinline_for_stack
2339void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2340				 struct ext4_buddy *e4b)
2341{
2342	struct super_block *sb = ac->ac_sb;
2343	struct ext4_sb_info *sbi = EXT4_SB(sb);
2344	void *bitmap = e4b->bd_bitmap;
2345	struct ext4_free_extent ex;
2346	ext4_fsblk_t first_group_block;
2347	ext4_fsblk_t a;
2348	ext4_grpblk_t i;
2349	int max;
2350
2351	BUG_ON(sbi->s_stripe == 0);
2352
2353	/* find first stripe-aligned block in group */
2354	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2355
2356	a = first_group_block + sbi->s_stripe - 1;
2357	do_div(a, sbi->s_stripe);
2358	i = (a * sbi->s_stripe) - first_group_block;
2359
2360	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2361		if (!mb_test_bit(i, bitmap)) {
2362			max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2363			if (max >= sbi->s_stripe) {
2364				ac->ac_found++;
2365				ex.fe_logical = 0xDEADF00D; /* debug value */
2366				ac->ac_b_ex = ex;
2367				ext4_mb_use_best_found(ac, e4b);
2368				break;
2369			}
2370		}
2371		i += sbi->s_stripe;
2372	}
2373}
2374
2375/*
2376 * This is also called BEFORE we load the buddy bitmap.
2377 * Returns either 1 or 0 indicating that the group is either suitable
2378 * for the allocation or not.
2379 */
2380static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2381				ext4_group_t group, int cr)
2382{
2383	ext4_grpblk_t free, fragments;
2384	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2385	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2386
2387	BUG_ON(cr < 0 || cr >= 4);
2388
2389	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2390		return false;
 
 
 
 
2391
2392	free = grp->bb_free;
 
2393	if (free == 0)
2394		return false;
2395
2396	fragments = grp->bb_fragments;
2397	if (fragments == 0)
2398		return false;
2399
2400	switch (cr) {
2401	case 0:
2402		BUG_ON(ac->ac_2order == 0);
2403
 
 
 
2404		/* Avoid using the first bg of a flexgroup for data files */
2405		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2406		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2407		    ((group % flex_size) == 0))
2408			return false;
2409
2410		if (free < ac->ac_g_ex.fe_len)
2411			return false;
2412
2413		if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2414			return true;
2415
2416		if (grp->bb_largest_free_order < ac->ac_2order)
2417			return false;
2418
2419		return true;
2420	case 1:
2421		if ((free / fragments) >= ac->ac_g_ex.fe_len)
2422			return true;
2423		break;
2424	case 2:
2425		if (free >= ac->ac_g_ex.fe_len)
2426			return true;
2427		break;
2428	case 3:
2429		return true;
2430	default:
2431		BUG();
2432	}
2433
2434	return false;
2435}
2436
2437/*
2438 * This could return negative error code if something goes wrong
2439 * during ext4_mb_init_group(). This should not be called with
2440 * ext4_lock_group() held.
2441 *
2442 * Note: because we are conditionally operating with the group lock in
2443 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2444 * function using __acquire and __release.  This means we need to be
2445 * super careful before messing with the error path handling via "goto
2446 * out"!
2447 */
2448static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2449				     ext4_group_t group, int cr)
2450{
2451	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2452	struct super_block *sb = ac->ac_sb;
2453	struct ext4_sb_info *sbi = EXT4_SB(sb);
2454	bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2455	ext4_grpblk_t free;
2456	int ret = 0;
2457
2458	if (sbi->s_mb_stats)
2459		atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2460	if (should_lock) {
2461		ext4_lock_group(sb, group);
2462		__release(ext4_group_lock_ptr(sb, group));
2463	}
2464	free = grp->bb_free;
2465	if (free == 0)
2466		goto out;
2467	if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2468		goto out;
2469	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2470		goto out;
2471	if (should_lock) {
2472		__acquire(ext4_group_lock_ptr(sb, group));
2473		ext4_unlock_group(sb, group);
2474	}
2475
2476	/* We only do this if the grp has never been initialized */
2477	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2478		struct ext4_group_desc *gdp =
2479			ext4_get_group_desc(sb, group, NULL);
2480		int ret;
2481
2482		/* cr=0/1 is a very optimistic search to find large
2483		 * good chunks almost for free.  If buddy data is not
2484		 * ready, then this optimization makes no sense.  But
2485		 * we never skip the first block group in a flex_bg,
2486		 * since this gets used for metadata block allocation,
2487		 * and we want to make sure we locate metadata blocks
2488		 * in the first block group in the flex_bg if possible.
2489		 */
2490		if (cr < 2 &&
2491		    (!sbi->s_log_groups_per_flex ||
2492		     ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2493		    !(ext4_has_group_desc_csum(sb) &&
2494		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2495			return 0;
2496		ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2497		if (ret)
2498			return ret;
2499	}
2500
2501	if (should_lock) {
2502		ext4_lock_group(sb, group);
2503		__release(ext4_group_lock_ptr(sb, group));
2504	}
2505	ret = ext4_mb_good_group(ac, group, cr);
2506out:
2507	if (should_lock) {
2508		__acquire(ext4_group_lock_ptr(sb, group));
2509		ext4_unlock_group(sb, group);
2510	}
2511	return ret;
2512}
2513
2514/*
2515 * Start prefetching @nr block bitmaps starting at @group.
2516 * Return the next group which needs to be prefetched.
2517 */
2518ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2519			      unsigned int nr, int *cnt)
2520{
2521	ext4_group_t ngroups = ext4_get_groups_count(sb);
2522	struct buffer_head *bh;
2523	struct blk_plug plug;
2524
2525	blk_start_plug(&plug);
2526	while (nr-- > 0) {
2527		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2528								  NULL);
2529		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2530
2531		/*
2532		 * Prefetch block groups with free blocks; but don't
2533		 * bother if it is marked uninitialized on disk, since
2534		 * it won't require I/O to read.  Also only try to
2535		 * prefetch once, so we avoid getblk() call, which can
2536		 * be expensive.
2537		 */
2538		if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2539		    EXT4_MB_GRP_NEED_INIT(grp) &&
2540		    ext4_free_group_clusters(sb, gdp) > 0 &&
2541		    !(ext4_has_group_desc_csum(sb) &&
2542		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2543			bh = ext4_read_block_bitmap_nowait(sb, group, true);
2544			if (bh && !IS_ERR(bh)) {
2545				if (!buffer_uptodate(bh) && cnt)
2546					(*cnt)++;
2547				brelse(bh);
2548			}
2549		}
2550		if (++group >= ngroups)
2551			group = 0;
2552	}
2553	blk_finish_plug(&plug);
2554	return group;
2555}
2556
2557/*
2558 * Prefetching reads the block bitmap into the buffer cache; but we
2559 * need to make sure that the buddy bitmap in the page cache has been
2560 * initialized.  Note that ext4_mb_init_group() will block if the I/O
2561 * is not yet completed, or indeed if it was not initiated by
2562 * ext4_mb_prefetch did not start the I/O.
2563 *
2564 * TODO: We should actually kick off the buddy bitmap setup in a work
2565 * queue when the buffer I/O is completed, so that we don't block
2566 * waiting for the block allocation bitmap read to finish when
2567 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2568 */
2569void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2570			   unsigned int nr)
2571{
2572	while (nr-- > 0) {
2573		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2574								  NULL);
2575		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2576
2577		if (!group)
2578			group = ext4_get_groups_count(sb);
2579		group--;
2580		grp = ext4_get_group_info(sb, group);
2581
2582		if (EXT4_MB_GRP_NEED_INIT(grp) &&
2583		    ext4_free_group_clusters(sb, gdp) > 0 &&
2584		    !(ext4_has_group_desc_csum(sb) &&
2585		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2586			if (ext4_mb_init_group(sb, group, GFP_NOFS))
2587				break;
2588		}
2589	}
2590}
2591
2592static noinline_for_stack int
2593ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2594{
2595	ext4_group_t prefetch_grp = 0, ngroups, group, i;
2596	int cr = -1, new_cr;
2597	int err = 0, first_err = 0;
2598	unsigned int nr = 0, prefetch_ios = 0;
2599	struct ext4_sb_info *sbi;
2600	struct super_block *sb;
2601	struct ext4_buddy e4b;
2602	int lost;
2603
2604	sb = ac->ac_sb;
2605	sbi = EXT4_SB(sb);
2606	ngroups = ext4_get_groups_count(sb);
2607	/* non-extent files are limited to low blocks/groups */
2608	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2609		ngroups = sbi->s_blockfile_groups;
2610
2611	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2612
2613	/* first, try the goal */
2614	err = ext4_mb_find_by_goal(ac, &e4b);
2615	if (err || ac->ac_status == AC_STATUS_FOUND)
2616		goto out;
2617
2618	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2619		goto out;
2620
2621	/*
2622	 * ac->ac_2order is set only if the fe_len is a power of 2
2623	 * if ac->ac_2order is set we also set criteria to 0 so that we
2624	 * try exact allocation using buddy.
2625	 */
2626	i = fls(ac->ac_g_ex.fe_len);
2627	ac->ac_2order = 0;
2628	/*
2629	 * We search using buddy data only if the order of the request
2630	 * is greater than equal to the sbi_s_mb_order2_reqs
2631	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2632	 * We also support searching for power-of-two requests only for
2633	 * requests upto maximum buddy size we have constructed.
2634	 */
2635	if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2636		/*
2637		 * This should tell if fe_len is exactly power of 2
2638		 */
2639		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2640			ac->ac_2order = array_index_nospec(i - 1,
2641							   MB_NUM_ORDERS(sb));
2642	}
2643
2644	/* if stream allocation is enabled, use global goal */
2645	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2646		/* TBD: may be hot point */
2647		spin_lock(&sbi->s_md_lock);
2648		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2649		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2650		spin_unlock(&sbi->s_md_lock);
2651	}
2652
2653	/* Let's just scan groups to find more-less suitable blocks */
2654	cr = ac->ac_2order ? 0 : 1;
2655	/*
2656	 * cr == 0 try to get exact allocation,
2657	 * cr == 3  try to get anything
2658	 */
2659repeat:
2660	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2661		ac->ac_criteria = cr;
2662		/*
2663		 * searching for the right group start
2664		 * from the goal value specified
2665		 */
2666		group = ac->ac_g_ex.fe_group;
2667		ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2668		prefetch_grp = group;
2669
2670		for (i = 0, new_cr = cr; i < ngroups; i++,
2671		     ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2672			int ret = 0;
2673
2674			cond_resched();
2675			if (new_cr != cr) {
2676				cr = new_cr;
2677				goto repeat;
2678			}
2679
2680			/*
2681			 * Batch reads of the block allocation bitmaps
2682			 * to get multiple READs in flight; limit
2683			 * prefetching at cr=0/1, otherwise mballoc can
2684			 * spend a lot of time loading imperfect groups
2685			 */
2686			if ((prefetch_grp == group) &&
2687			    (cr > 1 ||
2688			     prefetch_ios < sbi->s_mb_prefetch_limit)) {
2689				unsigned int curr_ios = prefetch_ios;
2690
2691				nr = sbi->s_mb_prefetch;
2692				if (ext4_has_feature_flex_bg(sb)) {
2693					nr = 1 << sbi->s_log_groups_per_flex;
2694					nr -= group & (nr - 1);
2695					nr = min(nr, sbi->s_mb_prefetch);
2696				}
2697				prefetch_grp = ext4_mb_prefetch(sb, group,
2698							nr, &prefetch_ios);
2699				if (prefetch_ios == curr_ios)
2700					nr = 0;
2701			}
2702
2703			/* This now checks without needing the buddy page */
2704			ret = ext4_mb_good_group_nolock(ac, group, cr);
2705			if (ret <= 0) {
2706				if (!first_err)
2707					first_err = ret;
2708				continue;
2709			}
2710
2711			err = ext4_mb_load_buddy(sb, group, &e4b);
2712			if (err)
2713				goto out;
2714
2715			ext4_lock_group(sb, group);
2716
2717			/*
2718			 * We need to check again after locking the
2719			 * block group
2720			 */
2721			ret = ext4_mb_good_group(ac, group, cr);
2722			if (ret == 0) {
2723				ext4_unlock_group(sb, group);
2724				ext4_mb_unload_buddy(&e4b);
2725				continue;
2726			}
2727
2728			ac->ac_groups_scanned++;
2729			if (cr == 0)
2730				ext4_mb_simple_scan_group(ac, &e4b);
2731			else if (cr == 1 && sbi->s_stripe &&
2732					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2733				ext4_mb_scan_aligned(ac, &e4b);
2734			else
2735				ext4_mb_complex_scan_group(ac, &e4b);
2736
2737			ext4_unlock_group(sb, group);
2738			ext4_mb_unload_buddy(&e4b);
2739
2740			if (ac->ac_status != AC_STATUS_CONTINUE)
2741				break;
2742		}
2743		/* Processed all groups and haven't found blocks */
2744		if (sbi->s_mb_stats && i == ngroups)
2745			atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2746	}
2747
2748	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2749	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2750		/*
2751		 * We've been searching too long. Let's try to allocate
2752		 * the best chunk we've found so far
2753		 */
 
2754		ext4_mb_try_best_found(ac, &e4b);
2755		if (ac->ac_status != AC_STATUS_FOUND) {
2756			/*
2757			 * Someone more lucky has already allocated it.
2758			 * The only thing we can do is just take first
2759			 * found block(s)
 
2760			 */
2761			lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2762			mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2763				 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2764				 ac->ac_b_ex.fe_len, lost);
2765
2766			ac->ac_b_ex.fe_group = 0;
2767			ac->ac_b_ex.fe_start = 0;
2768			ac->ac_b_ex.fe_len = 0;
2769			ac->ac_status = AC_STATUS_CONTINUE;
2770			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2771			cr = 3;
 
2772			goto repeat;
2773		}
2774	}
2775
2776	if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2777		atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2778out:
2779	if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2780		err = first_err;
2781
2782	mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2783		 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2784		 ac->ac_flags, cr, err);
2785
2786	if (nr)
2787		ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2788
2789	return err;
2790}
2791
2792static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2793{
2794	struct super_block *sb = pde_data(file_inode(seq->file));
2795	ext4_group_t group;
2796
2797	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2798		return NULL;
2799	group = *pos + 1;
2800	return (void *) ((unsigned long) group);
2801}
2802
2803static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2804{
2805	struct super_block *sb = pde_data(file_inode(seq->file));
2806	ext4_group_t group;
2807
2808	++*pos;
2809	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2810		return NULL;
2811	group = *pos + 1;
2812	return (void *) ((unsigned long) group);
2813}
2814
2815static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2816{
2817	struct super_block *sb = pde_data(file_inode(seq->file));
2818	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2819	int i;
2820	int err, buddy_loaded = 0;
2821	struct ext4_buddy e4b;
2822	struct ext4_group_info *grinfo;
2823	unsigned char blocksize_bits = min_t(unsigned char,
2824					     sb->s_blocksize_bits,
2825					     EXT4_MAX_BLOCK_LOG_SIZE);
2826	struct sg {
2827		struct ext4_group_info info;
2828		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2829	} sg;
2830
2831	group--;
2832	if (group == 0)
2833		seq_puts(seq, "#group: free  frags first ["
2834			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2835			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
 
 
 
2836
2837	i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2838		sizeof(struct ext4_group_info);
2839
2840	grinfo = ext4_get_group_info(sb, group);
2841	/* Load the group info in memory only if not already loaded. */
2842	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2843		err = ext4_mb_load_buddy(sb, group, &e4b);
2844		if (err) {
2845			seq_printf(seq, "#%-5u: I/O error\n", group);
2846			return 0;
2847		}
2848		buddy_loaded = 1;
2849	}
2850
2851	memcpy(&sg, ext4_get_group_info(sb, group), i);
2852
2853	if (buddy_loaded)
2854		ext4_mb_unload_buddy(&e4b);
2855
2856	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2857			sg.info.bb_fragments, sg.info.bb_first_free);
2858	for (i = 0; i <= 13; i++)
2859		seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2860				sg.info.bb_counters[i] : 0);
2861	seq_puts(seq, " ]\n");
2862
2863	return 0;
2864}
2865
2866static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2867{
2868}
2869
2870const struct seq_operations ext4_mb_seq_groups_ops = {
2871	.start  = ext4_mb_seq_groups_start,
2872	.next   = ext4_mb_seq_groups_next,
2873	.stop   = ext4_mb_seq_groups_stop,
2874	.show   = ext4_mb_seq_groups_show,
2875};
2876
2877int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
2878{
2879	struct super_block *sb = seq->private;
2880	struct ext4_sb_info *sbi = EXT4_SB(sb);
2881
2882	seq_puts(seq, "mballoc:\n");
2883	if (!sbi->s_mb_stats) {
2884		seq_puts(seq, "\tmb stats collection turned off.\n");
2885		seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2886		return 0;
2887	}
2888	seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
2889	seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
2890
2891	seq_printf(seq, "\tgroups_scanned: %u\n",  atomic_read(&sbi->s_bal_groups_scanned));
2892
2893	seq_puts(seq, "\tcr0_stats:\n");
2894	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
2895	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2896		   atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
2897	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2898		   atomic64_read(&sbi->s_bal_cX_failed[0]));
2899	seq_printf(seq, "\t\tbad_suggestions: %u\n",
2900		   atomic_read(&sbi->s_bal_cr0_bad_suggestions));
2901
2902	seq_puts(seq, "\tcr1_stats:\n");
2903	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
2904	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2905		   atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
2906	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2907		   atomic64_read(&sbi->s_bal_cX_failed[1]));
2908	seq_printf(seq, "\t\tbad_suggestions: %u\n",
2909		   atomic_read(&sbi->s_bal_cr1_bad_suggestions));
2910
2911	seq_puts(seq, "\tcr2_stats:\n");
2912	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
2913	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2914		   atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
2915	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2916		   atomic64_read(&sbi->s_bal_cX_failed[2]));
2917
2918	seq_puts(seq, "\tcr3_stats:\n");
2919	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
2920	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2921		   atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
2922	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2923		   atomic64_read(&sbi->s_bal_cX_failed[3]));
2924	seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
2925	seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
2926	seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
2927	seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
2928	seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
2929
2930	seq_printf(seq, "\tbuddies_generated: %u/%u\n",
2931		   atomic_read(&sbi->s_mb_buddies_generated),
2932		   ext4_get_groups_count(sb));
2933	seq_printf(seq, "\tbuddies_time_used: %llu\n",
2934		   atomic64_read(&sbi->s_mb_generation_time));
2935	seq_printf(seq, "\tpreallocated: %u\n",
2936		   atomic_read(&sbi->s_mb_preallocated));
2937	seq_printf(seq, "\tdiscarded: %u\n",
2938		   atomic_read(&sbi->s_mb_discarded));
2939	return 0;
2940}
2941
2942static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
2943__acquires(&EXT4_SB(sb)->s_mb_rb_lock)
2944{
2945	struct super_block *sb = pde_data(file_inode(seq->file));
2946	unsigned long position;
2947
2948	if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
2949		return NULL;
2950	position = *pos + 1;
2951	return (void *) ((unsigned long) position);
2952}
2953
2954static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
2955{
2956	struct super_block *sb = pde_data(file_inode(seq->file));
2957	unsigned long position;
2958
2959	++*pos;
2960	if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
2961		return NULL;
2962	position = *pos + 1;
2963	return (void *) ((unsigned long) position);
2964}
2965
2966static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
2967{
2968	struct super_block *sb = pde_data(file_inode(seq->file));
2969	struct ext4_sb_info *sbi = EXT4_SB(sb);
2970	unsigned long position = ((unsigned long) v);
2971	struct ext4_group_info *grp;
2972	unsigned int count;
2973
2974	position--;
2975	if (position >= MB_NUM_ORDERS(sb)) {
2976		position -= MB_NUM_ORDERS(sb);
2977		if (position == 0)
2978			seq_puts(seq, "avg_fragment_size_lists:\n");
2979
2980		count = 0;
2981		read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
2982		list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
2983				    bb_avg_fragment_size_node)
2984			count++;
2985		read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
2986		seq_printf(seq, "\tlist_order_%u_groups: %u\n",
2987					(unsigned int)position, count);
2988		return 0;
2989	}
2990
2991	if (position == 0) {
2992		seq_printf(seq, "optimize_scan: %d\n",
2993			   test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
2994		seq_puts(seq, "max_free_order_lists:\n");
2995	}
2996	count = 0;
2997	read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
2998	list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
2999			    bb_largest_free_order_node)
3000		count++;
3001	read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
3002	seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3003		   (unsigned int)position, count);
3004
3005	return 0;
3006}
3007
3008static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3009{
3010}
3011
3012const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3013	.start  = ext4_mb_seq_structs_summary_start,
3014	.next   = ext4_mb_seq_structs_summary_next,
3015	.stop   = ext4_mb_seq_structs_summary_stop,
3016	.show   = ext4_mb_seq_structs_summary_show,
3017};
3018
3019static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3020{
3021	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3022	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3023
3024	BUG_ON(!cachep);
3025	return cachep;
3026}
3027
3028/*
3029 * Allocate the top-level s_group_info array for the specified number
3030 * of groups
3031 */
3032int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3033{
3034	struct ext4_sb_info *sbi = EXT4_SB(sb);
3035	unsigned size;
3036	struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3037
3038	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3039		EXT4_DESC_PER_BLOCK_BITS(sb);
3040	if (size <= sbi->s_group_info_size)
3041		return 0;
3042
3043	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3044	new_groupinfo = kvzalloc(size, GFP_KERNEL);
3045	if (!new_groupinfo) {
3046		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3047		return -ENOMEM;
3048	}
3049	rcu_read_lock();
3050	old_groupinfo = rcu_dereference(sbi->s_group_info);
3051	if (old_groupinfo)
3052		memcpy(new_groupinfo, old_groupinfo,
3053		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3054	rcu_read_unlock();
3055	rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3056	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3057	if (old_groupinfo)
3058		ext4_kvfree_array_rcu(old_groupinfo);
3059	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3060		   sbi->s_group_info_size);
3061	return 0;
3062}
3063
3064/* Create and initialize ext4_group_info data for the given group. */
3065int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3066			  struct ext4_group_desc *desc)
3067{
3068	int i;
3069	int metalen = 0;
3070	int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3071	struct ext4_sb_info *sbi = EXT4_SB(sb);
3072	struct ext4_group_info **meta_group_info;
3073	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3074
3075	/*
3076	 * First check if this group is the first of a reserved block.
3077	 * If it's true, we have to allocate a new table of pointers
3078	 * to ext4_group_info structures
3079	 */
3080	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3081		metalen = sizeof(*meta_group_info) <<
3082			EXT4_DESC_PER_BLOCK_BITS(sb);
3083		meta_group_info = kmalloc(metalen, GFP_NOFS);
3084		if (meta_group_info == NULL) {
3085			ext4_msg(sb, KERN_ERR, "can't allocate mem "
3086				 "for a buddy group");
3087			goto exit_meta_group_info;
3088		}
3089		rcu_read_lock();
3090		rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3091		rcu_read_unlock();
3092	}
3093
3094	meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
 
3095	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3096
3097	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3098	if (meta_group_info[i] == NULL) {
3099		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3100		goto exit_group_info;
3101	}
 
3102	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3103		&(meta_group_info[i]->bb_state));
3104
3105	/*
3106	 * initialize bb_free to be able to skip
3107	 * empty groups without initialization
3108	 */
3109	if (ext4_has_group_desc_csum(sb) &&
3110	    (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3111		meta_group_info[i]->bb_free =
3112			ext4_free_clusters_after_init(sb, group, desc);
3113	} else {
3114		meta_group_info[i]->bb_free =
3115			ext4_free_group_clusters(sb, desc);
3116	}
3117
3118	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3119	init_rwsem(&meta_group_info[i]->alloc_sem);
3120	meta_group_info[i]->bb_free_root = RB_ROOT;
3121	INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3122	INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3123	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
3124	meta_group_info[i]->bb_avg_fragment_size_order = -1;  /* uninit */
3125	meta_group_info[i]->bb_group = group;
3126
3127	mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
 
 
 
 
 
 
 
 
 
 
 
 
 
3128	return 0;
3129
3130exit_group_info:
3131	/* If a meta_group_info table has been allocated, release it now */
3132	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3133		struct ext4_group_info ***group_info;
3134
3135		rcu_read_lock();
3136		group_info = rcu_dereference(sbi->s_group_info);
3137		kfree(group_info[idx]);
3138		group_info[idx] = NULL;
3139		rcu_read_unlock();
3140	}
3141exit_meta_group_info:
3142	return -ENOMEM;
3143} /* ext4_mb_add_groupinfo */
3144
3145static int ext4_mb_init_backend(struct super_block *sb)
3146{
3147	ext4_group_t ngroups = ext4_get_groups_count(sb);
3148	ext4_group_t i;
3149	struct ext4_sb_info *sbi = EXT4_SB(sb);
3150	int err;
 
 
 
3151	struct ext4_group_desc *desc;
3152	struct ext4_group_info ***group_info;
3153	struct kmem_cache *cachep;
3154
3155	err = ext4_mb_alloc_groupinfo(sb, ngroups);
3156	if (err)
3157		return err;
3158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3159	sbi->s_buddy_cache = new_inode(sb);
3160	if (sbi->s_buddy_cache == NULL) {
3161		ext4_msg(sb, KERN_ERR, "can't get new inode");
3162		goto err_freesgi;
3163	}
3164	/* To avoid potentially colliding with an valid on-disk inode number,
3165	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
3166	 * not in the inode hash, so it should never be found by iget(), but
3167	 * this will avoid confusion if it ever shows up during debugging. */
3168	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3169	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3170	for (i = 0; i < ngroups; i++) {
3171		cond_resched();
3172		desc = ext4_get_group_desc(sb, i, NULL);
3173		if (desc == NULL) {
3174			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3175			goto err_freebuddy;
3176		}
3177		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3178			goto err_freebuddy;
3179	}
3180
3181	if (ext4_has_feature_flex_bg(sb)) {
3182		/* a single flex group is supposed to be read by a single IO.
3183		 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3184		 * unsigned integer, so the maximum shift is 32.
3185		 */
3186		if (sbi->s_es->s_log_groups_per_flex >= 32) {
3187			ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3188			goto err_freebuddy;
3189		}
3190		sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3191			BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3192		sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3193	} else {
3194		sbi->s_mb_prefetch = 32;
3195	}
3196	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3197		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3198	/* now many real IOs to prefetch within a single allocation at cr=0
3199	 * given cr=0 is an CPU-related optimization we shouldn't try to
3200	 * load too many groups, at some point we should start to use what
3201	 * we've got in memory.
3202	 * with an average random access time 5ms, it'd take a second to get
3203	 * 200 groups (* N with flex_bg), so let's make this limit 4
3204	 */
3205	sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3206	if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3207		sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3208
3209	return 0;
3210
3211err_freebuddy:
3212	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3213	while (i-- > 0)
3214		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
3215	i = sbi->s_group_info_size;
3216	rcu_read_lock();
3217	group_info = rcu_dereference(sbi->s_group_info);
3218	while (i-- > 0)
3219		kfree(group_info[i]);
3220	rcu_read_unlock();
3221	iput(sbi->s_buddy_cache);
3222err_freesgi:
3223	rcu_read_lock();
3224	kvfree(rcu_dereference(sbi->s_group_info));
3225	rcu_read_unlock();
3226	return -ENOMEM;
3227}
3228
3229static void ext4_groupinfo_destroy_slabs(void)
3230{
3231	int i;
3232
3233	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3234		kmem_cache_destroy(ext4_groupinfo_caches[i]);
 
3235		ext4_groupinfo_caches[i] = NULL;
3236	}
3237}
3238
3239static int ext4_groupinfo_create_slab(size_t size)
3240{
3241	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3242	int slab_size;
3243	int blocksize_bits = order_base_2(size);
3244	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3245	struct kmem_cache *cachep;
3246
3247	if (cache_index >= NR_GRPINFO_CACHES)
3248		return -EINVAL;
3249
3250	if (unlikely(cache_index < 0))
3251		cache_index = 0;
3252
3253	mutex_lock(&ext4_grpinfo_slab_create_mutex);
3254	if (ext4_groupinfo_caches[cache_index]) {
3255		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3256		return 0;	/* Already created */
3257	}
3258
3259	slab_size = offsetof(struct ext4_group_info,
3260				bb_counters[blocksize_bits + 2]);
3261
3262	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3263					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3264					NULL);
3265
3266	ext4_groupinfo_caches[cache_index] = cachep;
3267
3268	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3269	if (!cachep) {
3270		printk(KERN_EMERG
3271		       "EXT4-fs: no memory for groupinfo slab cache\n");
3272		return -ENOMEM;
3273	}
3274
3275	return 0;
3276}
3277
3278static void ext4_discard_work(struct work_struct *work)
3279{
3280	struct ext4_sb_info *sbi = container_of(work,
3281			struct ext4_sb_info, s_discard_work);
3282	struct super_block *sb = sbi->s_sb;
3283	struct ext4_free_data *fd, *nfd;
3284	struct ext4_buddy e4b;
3285	struct list_head discard_list;
3286	ext4_group_t grp, load_grp;
3287	int err = 0;
3288
3289	INIT_LIST_HEAD(&discard_list);
3290	spin_lock(&sbi->s_md_lock);
3291	list_splice_init(&sbi->s_discard_list, &discard_list);
3292	spin_unlock(&sbi->s_md_lock);
3293
3294	load_grp = UINT_MAX;
3295	list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3296		/*
3297		 * If filesystem is umounting or no memory or suffering
3298		 * from no space, give up the discard
3299		 */
3300		if ((sb->s_flags & SB_ACTIVE) && !err &&
3301		    !atomic_read(&sbi->s_retry_alloc_pending)) {
3302			grp = fd->efd_group;
3303			if (grp != load_grp) {
3304				if (load_grp != UINT_MAX)
3305					ext4_mb_unload_buddy(&e4b);
3306
3307				err = ext4_mb_load_buddy(sb, grp, &e4b);
3308				if (err) {
3309					kmem_cache_free(ext4_free_data_cachep, fd);
3310					load_grp = UINT_MAX;
3311					continue;
3312				} else {
3313					load_grp = grp;
3314				}
3315			}
3316
3317			ext4_lock_group(sb, grp);
3318			ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3319						fd->efd_start_cluster + fd->efd_count - 1, 1);
3320			ext4_unlock_group(sb, grp);
3321		}
3322		kmem_cache_free(ext4_free_data_cachep, fd);
3323	}
3324
3325	if (load_grp != UINT_MAX)
3326		ext4_mb_unload_buddy(&e4b);
3327}
3328
3329int ext4_mb_init(struct super_block *sb)
3330{
3331	struct ext4_sb_info *sbi = EXT4_SB(sb);
3332	unsigned i, j;
3333	unsigned offset, offset_incr;
3334	unsigned max;
3335	int ret;
3336
3337	i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3338
3339	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3340	if (sbi->s_mb_offsets == NULL) {
3341		ret = -ENOMEM;
3342		goto out;
3343	}
3344
3345	i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3346	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3347	if (sbi->s_mb_maxs == NULL) {
3348		ret = -ENOMEM;
3349		goto out;
3350	}
3351
3352	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3353	if (ret < 0)
3354		goto out;
3355
3356	/* order 0 is regular bitmap */
3357	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3358	sbi->s_mb_offsets[0] = 0;
3359
3360	i = 1;
3361	offset = 0;
3362	offset_incr = 1 << (sb->s_blocksize_bits - 1);
3363	max = sb->s_blocksize << 2;
3364	do {
3365		sbi->s_mb_offsets[i] = offset;
3366		sbi->s_mb_maxs[i] = max;
3367		offset += offset_incr;
3368		offset_incr = offset_incr >> 1;
3369		max = max >> 1;
3370		i++;
3371	} while (i < MB_NUM_ORDERS(sb));
3372
3373	sbi->s_mb_avg_fragment_size =
3374		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3375			GFP_KERNEL);
3376	if (!sbi->s_mb_avg_fragment_size) {
3377		ret = -ENOMEM;
3378		goto out;
3379	}
3380	sbi->s_mb_avg_fragment_size_locks =
3381		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3382			GFP_KERNEL);
3383	if (!sbi->s_mb_avg_fragment_size_locks) {
3384		ret = -ENOMEM;
3385		goto out;
3386	}
3387	for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3388		INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3389		rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3390	}
3391	sbi->s_mb_largest_free_orders =
3392		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3393			GFP_KERNEL);
3394	if (!sbi->s_mb_largest_free_orders) {
3395		ret = -ENOMEM;
3396		goto out;
3397	}
3398	sbi->s_mb_largest_free_orders_locks =
3399		kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3400			GFP_KERNEL);
3401	if (!sbi->s_mb_largest_free_orders_locks) {
3402		ret = -ENOMEM;
3403		goto out;
3404	}
3405	for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3406		INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3407		rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3408	}
3409
3410	spin_lock_init(&sbi->s_md_lock);
3411	sbi->s_mb_free_pending = 0;
3412	INIT_LIST_HEAD(&sbi->s_freed_data_list);
3413	INIT_LIST_HEAD(&sbi->s_discard_list);
3414	INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3415	atomic_set(&sbi->s_retry_alloc_pending, 0);
3416
3417	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3418	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3419	sbi->s_mb_stats = MB_DEFAULT_STATS;
3420	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3421	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3422	sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
3423	/*
3424	 * The default group preallocation is 512, which for 4k block
3425	 * sizes translates to 2 megabytes.  However for bigalloc file
3426	 * systems, this is probably too big (i.e, if the cluster size
3427	 * is 1 megabyte, then group preallocation size becomes half a
3428	 * gigabyte!).  As a default, we will keep a two megabyte
3429	 * group pralloc size for cluster sizes up to 64k, and after
3430	 * that, we will force a minimum group preallocation size of
3431	 * 32 clusters.  This translates to 8 megs when the cluster
3432	 * size is 256k, and 32 megs when the cluster size is 1 meg,
3433	 * which seems reasonable as a default.
3434	 */
3435	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3436				       sbi->s_cluster_bits, 32);
3437	/*
3438	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3439	 * to the lowest multiple of s_stripe which is bigger than
3440	 * the s_mb_group_prealloc as determined above. We want
3441	 * the preallocation size to be an exact multiple of the
3442	 * RAID stripe size so that preallocations don't fragment
3443	 * the stripes.
3444	 */
3445	if (sbi->s_stripe > 1) {
3446		sbi->s_mb_group_prealloc = roundup(
3447			sbi->s_mb_group_prealloc, sbi->s_stripe);
3448	}
3449
3450	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3451	if (sbi->s_locality_groups == NULL) {
3452		ret = -ENOMEM;
3453		goto out;
3454	}
3455	for_each_possible_cpu(i) {
3456		struct ext4_locality_group *lg;
3457		lg = per_cpu_ptr(sbi->s_locality_groups, i);
3458		mutex_init(&lg->lg_mutex);
3459		for (j = 0; j < PREALLOC_TB_SIZE; j++)
3460			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3461		spin_lock_init(&lg->lg_prealloc_lock);
3462	}
3463
3464	if (bdev_nonrot(sb->s_bdev))
3465		sbi->s_mb_max_linear_groups = 0;
3466	else
3467		sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3468	/* init file for buddy data */
3469	ret = ext4_mb_init_backend(sb);
3470	if (ret != 0)
3471		goto out_free_locality_groups;
 
3472
3473	return 0;
 
 
3474
3475out_free_locality_groups:
3476	free_percpu(sbi->s_locality_groups);
3477	sbi->s_locality_groups = NULL;
3478out:
3479	kfree(sbi->s_mb_avg_fragment_size);
3480	kfree(sbi->s_mb_avg_fragment_size_locks);
3481	kfree(sbi->s_mb_largest_free_orders);
3482	kfree(sbi->s_mb_largest_free_orders_locks);
3483	kfree(sbi->s_mb_offsets);
3484	sbi->s_mb_offsets = NULL;
3485	kfree(sbi->s_mb_maxs);
3486	sbi->s_mb_maxs = NULL;
3487	return ret;
3488}
3489
3490/* need to called with the ext4 group lock held */
3491static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3492{
3493	struct ext4_prealloc_space *pa;
3494	struct list_head *cur, *tmp;
3495	int count = 0;
3496
3497	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3498		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3499		list_del(&pa->pa_group_list);
3500		count++;
3501		kmem_cache_free(ext4_pspace_cachep, pa);
3502	}
3503	return count;
 
 
3504}
3505
3506int ext4_mb_release(struct super_block *sb)
3507{
3508	ext4_group_t ngroups = ext4_get_groups_count(sb);
3509	ext4_group_t i;
3510	int num_meta_group_infos;
3511	struct ext4_group_info *grinfo, ***group_info;
3512	struct ext4_sb_info *sbi = EXT4_SB(sb);
3513	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3514	int count;
3515
3516	if (test_opt(sb, DISCARD)) {
3517		/*
3518		 * wait the discard work to drain all of ext4_free_data
3519		 */
3520		flush_work(&sbi->s_discard_work);
3521		WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3522	}
3523
3524	if (sbi->s_group_info) {
3525		for (i = 0; i < ngroups; i++) {
3526			cond_resched();
3527			grinfo = ext4_get_group_info(sb, i);
3528			mb_group_bb_bitmap_free(grinfo);
 
 
3529			ext4_lock_group(sb, i);
3530			count = ext4_mb_cleanup_pa(grinfo);
3531			if (count)
3532				mb_debug(sb, "mballoc: %d PAs left\n",
3533					 count);
3534			ext4_unlock_group(sb, i);
3535			kmem_cache_free(cachep, grinfo);
3536		}
3537		num_meta_group_infos = (ngroups +
3538				EXT4_DESC_PER_BLOCK(sb) - 1) >>
3539			EXT4_DESC_PER_BLOCK_BITS(sb);
3540		rcu_read_lock();
3541		group_info = rcu_dereference(sbi->s_group_info);
3542		for (i = 0; i < num_meta_group_infos; i++)
3543			kfree(group_info[i]);
3544		kvfree(group_info);
3545		rcu_read_unlock();
3546	}
3547	kfree(sbi->s_mb_avg_fragment_size);
3548	kfree(sbi->s_mb_avg_fragment_size_locks);
3549	kfree(sbi->s_mb_largest_free_orders);
3550	kfree(sbi->s_mb_largest_free_orders_locks);
3551	kfree(sbi->s_mb_offsets);
3552	kfree(sbi->s_mb_maxs);
3553	iput(sbi->s_buddy_cache);
 
3554	if (sbi->s_mb_stats) {
3555		ext4_msg(sb, KERN_INFO,
3556		       "mballoc: %u blocks %u reqs (%u success)",
3557				atomic_read(&sbi->s_bal_allocated),
3558				atomic_read(&sbi->s_bal_reqs),
3559				atomic_read(&sbi->s_bal_success));
3560		ext4_msg(sb, KERN_INFO,
3561		      "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3562				"%u 2^N hits, %u breaks, %u lost",
3563				atomic_read(&sbi->s_bal_ex_scanned),
3564				atomic_read(&sbi->s_bal_groups_scanned),
3565				atomic_read(&sbi->s_bal_goals),
3566				atomic_read(&sbi->s_bal_2orders),
3567				atomic_read(&sbi->s_bal_breaks),
3568				atomic_read(&sbi->s_mb_lost_chunks));
3569		ext4_msg(sb, KERN_INFO,
3570		       "mballoc: %u generated and it took %llu",
3571				atomic_read(&sbi->s_mb_buddies_generated),
3572				atomic64_read(&sbi->s_mb_generation_time));
3573		ext4_msg(sb, KERN_INFO,
3574		       "mballoc: %u preallocated, %u discarded",
3575				atomic_read(&sbi->s_mb_preallocated),
3576				atomic_read(&sbi->s_mb_discarded));
3577	}
3578
3579	free_percpu(sbi->s_locality_groups);
 
 
3580
3581	return 0;
3582}
3583
3584static inline int ext4_issue_discard(struct super_block *sb,
3585		ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3586		struct bio **biop)
3587{
3588	ext4_fsblk_t discard_block;
3589
3590	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3591			 ext4_group_first_block_no(sb, block_group));
3592	count = EXT4_C2B(EXT4_SB(sb), count);
3593	trace_ext4_discard_blocks(sb,
3594			(unsigned long long) discard_block, count);
3595	if (biop) {
3596		return __blkdev_issue_discard(sb->s_bdev,
3597			(sector_t)discard_block << (sb->s_blocksize_bits - 9),
3598			(sector_t)count << (sb->s_blocksize_bits - 9),
3599			GFP_NOFS, biop);
3600	} else
3601		return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3602}
3603
3604static void ext4_free_data_in_buddy(struct super_block *sb,
3605				    struct ext4_free_data *entry)
 
 
 
3606{
 
3607	struct ext4_buddy e4b;
3608	struct ext4_group_info *db;
3609	int err, count = 0, count2 = 0;
 
 
3610
3611	mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3612		 entry->efd_count, entry->efd_group, entry);
3613
3614	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3615	/* we expect to find existing buddy because it's pinned */
3616	BUG_ON(err != 0);
3617
3618	spin_lock(&EXT4_SB(sb)->s_md_lock);
3619	EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3620	spin_unlock(&EXT4_SB(sb)->s_md_lock);
3621
3622	db = e4b.bd_info;
3623	/* there are blocks to put in buddy to make them really free */
3624	count += entry->efd_count;
3625	count2++;
3626	ext4_lock_group(sb, entry->efd_group);
3627	/* Take it out of per group rb tree */
3628	rb_erase(&entry->efd_node, &(db->bb_free_root));
3629	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3630
3631	/*
3632	 * Clear the trimmed flag for the group so that the next
3633	 * ext4_trim_fs can trim it.
3634	 * If the volume is mounted with -o discard, online discard
3635	 * is supported and the free blocks will be trimmed online.
3636	 */
3637	if (!test_opt(sb, DISCARD))
3638		EXT4_MB_GRP_CLEAR_TRIMMED(db);
 
 
 
 
 
 
 
 
3639
3640	if (!db->bb_free_root.rb_node) {
3641		/* No more items in the per group rb tree
3642		 * balance refcounts from ext4_mb_free_metadata()
 
 
3643		 */
3644		put_page(e4b.bd_buddy_page);
3645		put_page(e4b.bd_bitmap_page);
 
 
 
 
 
 
 
 
 
 
 
3646	}
3647	ext4_unlock_group(sb, entry->efd_group);
3648	ext4_mb_unload_buddy(&e4b);
3649
3650	mb_debug(sb, "freed %d blocks in %d structures\n", count,
3651		 count2);
3652}
3653
3654/*
3655 * This function is called by the jbd2 layer once the commit has finished,
3656 * so we know we can free the blocks that were released with that commit.
3657 */
3658void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
 
 
3659{
3660	struct ext4_sb_info *sbi = EXT4_SB(sb);
3661	struct ext4_free_data *entry, *tmp;
3662	struct list_head freed_data_list;
3663	struct list_head *cut_pos = NULL;
3664	bool wake;
 
 
3665
3666	INIT_LIST_HEAD(&freed_data_list);
 
 
 
 
3667
3668	spin_lock(&sbi->s_md_lock);
3669	list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3670		if (entry->efd_tid != commit_tid)
3671			break;
3672		cut_pos = &entry->efd_list;
3673	}
3674	if (cut_pos)
3675		list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3676				  cut_pos);
3677	spin_unlock(&sbi->s_md_lock);
3678
3679	list_for_each_entry(entry, &freed_data_list, efd_list)
3680		ext4_free_data_in_buddy(sb, entry);
 
3681
3682	if (test_opt(sb, DISCARD)) {
3683		spin_lock(&sbi->s_md_lock);
3684		wake = list_empty(&sbi->s_discard_list);
3685		list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3686		spin_unlock(&sbi->s_md_lock);
3687		if (wake)
3688			queue_work(system_unbound_wq, &sbi->s_discard_work);
3689	} else {
3690		list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3691			kmem_cache_free(ext4_free_data_cachep, entry);
3692	}
3693}
3694
 
 
3695int __init ext4_init_mballoc(void)
3696{
3697	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3698					SLAB_RECLAIM_ACCOUNT);
3699	if (ext4_pspace_cachep == NULL)
3700		goto out;
3701
3702	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3703				    SLAB_RECLAIM_ACCOUNT);
3704	if (ext4_ac_cachep == NULL)
3705		goto out_pa_free;
3706
3707	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3708					   SLAB_RECLAIM_ACCOUNT);
3709	if (ext4_free_data_cachep == NULL)
3710		goto out_ac_free;
3711
 
 
 
 
 
 
 
 
3712	return 0;
3713
3714out_ac_free:
3715	kmem_cache_destroy(ext4_ac_cachep);
3716out_pa_free:
3717	kmem_cache_destroy(ext4_pspace_cachep);
3718out:
3719	return -ENOMEM;
3720}
3721
3722void ext4_exit_mballoc(void)
3723{
3724	/*
3725	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3726	 * before destroying the slab cache.
3727	 */
3728	rcu_barrier();
3729	kmem_cache_destroy(ext4_pspace_cachep);
3730	kmem_cache_destroy(ext4_ac_cachep);
3731	kmem_cache_destroy(ext4_free_data_cachep);
3732	ext4_groupinfo_destroy_slabs();
 
3733}
3734
3735
3736/*
3737 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3738 * Returns 0 if success or error code
3739 */
3740static noinline_for_stack int
3741ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3742				handle_t *handle, unsigned int reserv_clstrs)
3743{
3744	struct buffer_head *bitmap_bh = NULL;
3745	struct ext4_group_desc *gdp;
3746	struct buffer_head *gdp_bh;
3747	struct ext4_sb_info *sbi;
3748	struct super_block *sb;
3749	ext4_fsblk_t block;
3750	int err, len;
3751
3752	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3753	BUG_ON(ac->ac_b_ex.fe_len <= 0);
3754
3755	sb = ac->ac_sb;
3756	sbi = EXT4_SB(sb);
3757
 
3758	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3759	if (IS_ERR(bitmap_bh)) {
3760		err = PTR_ERR(bitmap_bh);
3761		bitmap_bh = NULL;
3762		goto out_err;
3763	}
3764
3765	BUFFER_TRACE(bitmap_bh, "getting write access");
3766	err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3767					    EXT4_JTR_NONE);
3768	if (err)
3769		goto out_err;
3770
3771	err = -EIO;
3772	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3773	if (!gdp)
3774		goto out_err;
3775
3776	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3777			ext4_free_group_clusters(sb, gdp));
3778
3779	BUFFER_TRACE(gdp_bh, "get_write_access");
3780	err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
3781	if (err)
3782		goto out_err;
3783
3784	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3785
3786	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3787	if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3788		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3789			   "fs metadata", block, block+len);
3790		/* File system mounted not to panic on error
3791		 * Fix the bitmap and return EFSCORRUPTED
3792		 * We leak some of the blocks here.
3793		 */
3794		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3795		mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3796			      ac->ac_b_ex.fe_len);
3797		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3798		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3799		if (!err)
3800			err = -EFSCORRUPTED;
3801		goto out_err;
3802	}
3803
3804	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3805#ifdef AGGRESSIVE_CHECK
3806	{
3807		int i;
3808		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3809			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3810						bitmap_bh->b_data));
3811		}
3812	}
3813#endif
3814	mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3815		      ac->ac_b_ex.fe_len);
3816	if (ext4_has_group_desc_csum(sb) &&
3817	    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3818		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3819		ext4_free_group_clusters_set(sb, gdp,
3820					     ext4_free_clusters_after_init(sb,
3821						ac->ac_b_ex.fe_group, gdp));
3822	}
3823	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3824	ext4_free_group_clusters_set(sb, gdp, len);
3825	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3826	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3827
3828	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3829	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3830	/*
3831	 * Now reduce the dirty block count also. Should not go negative
3832	 */
3833	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3834		/* release all the reserved blocks if non delalloc */
3835		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3836				   reserv_clstrs);
3837
3838	if (sbi->s_log_groups_per_flex) {
3839		ext4_group_t flex_group = ext4_flex_group(sbi,
3840							  ac->ac_b_ex.fe_group);
3841		atomic64_sub(ac->ac_b_ex.fe_len,
3842			     &sbi_array_rcu_deref(sbi, s_flex_groups,
3843						  flex_group)->free_clusters);
3844	}
3845
3846	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3847	if (err)
3848		goto out_err;
3849	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3850
3851out_err:
 
3852	brelse(bitmap_bh);
3853	return err;
3854}
3855
3856/*
3857 * Idempotent helper for Ext4 fast commit replay path to set the state of
3858 * blocks in bitmaps and update counters.
3859 */
3860void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3861			int len, int state)
3862{
3863	struct buffer_head *bitmap_bh = NULL;
3864	struct ext4_group_desc *gdp;
3865	struct buffer_head *gdp_bh;
3866	struct ext4_sb_info *sbi = EXT4_SB(sb);
3867	ext4_group_t group;
3868	ext4_grpblk_t blkoff;
3869	int i, err;
3870	int already;
3871	unsigned int clen, clen_changed, thisgrp_len;
3872
3873	while (len > 0) {
3874		ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3875
3876		/*
3877		 * Check to see if we are freeing blocks across a group
3878		 * boundary.
3879		 * In case of flex_bg, this can happen that (block, len) may
3880		 * span across more than one group. In that case we need to
3881		 * get the corresponding group metadata to work with.
3882		 * For this we have goto again loop.
3883		 */
3884		thisgrp_len = min_t(unsigned int, (unsigned int)len,
3885			EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3886		clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3887
3888		if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
3889			ext4_error(sb, "Marking blocks in system zone - "
3890				   "Block = %llu, len = %u",
3891				   block, thisgrp_len);
3892			bitmap_bh = NULL;
3893			break;
3894		}
3895
3896		bitmap_bh = ext4_read_block_bitmap(sb, group);
3897		if (IS_ERR(bitmap_bh)) {
3898			err = PTR_ERR(bitmap_bh);
3899			bitmap_bh = NULL;
3900			break;
3901		}
3902
3903		err = -EIO;
3904		gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3905		if (!gdp)
3906			break;
3907
3908		ext4_lock_group(sb, group);
3909		already = 0;
3910		for (i = 0; i < clen; i++)
3911			if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3912					 !state)
3913				already++;
3914
3915		clen_changed = clen - already;
3916		if (state)
3917			mb_set_bits(bitmap_bh->b_data, blkoff, clen);
3918		else
3919			mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
3920		if (ext4_has_group_desc_csum(sb) &&
3921		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3922			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3923			ext4_free_group_clusters_set(sb, gdp,
3924			     ext4_free_clusters_after_init(sb, group, gdp));
3925		}
3926		if (state)
3927			clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
3928		else
3929			clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
3930
3931		ext4_free_group_clusters_set(sb, gdp, clen);
3932		ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
3933		ext4_group_desc_csum_set(sb, group, gdp);
3934
3935		ext4_unlock_group(sb, group);
3936
3937		if (sbi->s_log_groups_per_flex) {
3938			ext4_group_t flex_group = ext4_flex_group(sbi, group);
3939			struct flex_groups *fg = sbi_array_rcu_deref(sbi,
3940						   s_flex_groups, flex_group);
3941
3942			if (state)
3943				atomic64_sub(clen_changed, &fg->free_clusters);
3944			else
3945				atomic64_add(clen_changed, &fg->free_clusters);
3946
3947		}
3948
3949		err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
3950		if (err)
3951			break;
3952		sync_dirty_buffer(bitmap_bh);
3953		err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
3954		sync_dirty_buffer(gdp_bh);
3955		if (err)
3956			break;
3957
3958		block += thisgrp_len;
3959		len -= thisgrp_len;
3960		brelse(bitmap_bh);
3961		BUG_ON(len < 0);
3962	}
3963
3964	if (err)
3965		brelse(bitmap_bh);
3966}
3967
3968/*
3969 * here we normalize request for locality group
3970 * Group request are normalized to s_mb_group_prealloc, which goes to
3971 * s_strip if we set the same via mount option.
3972 * s_mb_group_prealloc can be configured via
3973 * /sys/fs/ext4/<partition>/mb_group_prealloc
3974 *
3975 * XXX: should we try to preallocate more than the group has now?
3976 */
3977static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3978{
3979	struct super_block *sb = ac->ac_sb;
3980	struct ext4_locality_group *lg = ac->ac_lg;
3981
3982	BUG_ON(lg == NULL);
3983	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3984	mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
 
3985}
3986
3987/*
3988 * Normalization means making request better in terms of
3989 * size and alignment
3990 */
3991static noinline_for_stack void
3992ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3993				struct ext4_allocation_request *ar)
3994{
3995	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3996	int bsbits, max;
3997	ext4_lblk_t end;
3998	loff_t size, start_off;
3999	loff_t orig_size __maybe_unused;
4000	ext4_lblk_t start;
4001	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4002	struct ext4_prealloc_space *pa;
4003
4004	/* do normalize only data requests, metadata requests
4005	   do not need preallocation */
4006	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4007		return;
4008
4009	/* sometime caller may want exact blocks */
4010	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4011		return;
4012
4013	/* caller may indicate that preallocation isn't
4014	 * required (it's a tail, for example) */
4015	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4016		return;
4017
4018	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4019		ext4_mb_normalize_group_request(ac);
4020		return ;
4021	}
4022
4023	bsbits = ac->ac_sb->s_blocksize_bits;
4024
4025	/* first, let's learn actual file size
4026	 * given current request is allocated */
4027	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4028	size = size << bsbits;
4029	if (size < i_size_read(ac->ac_inode))
4030		size = i_size_read(ac->ac_inode);
4031	orig_size = size;
4032
4033	/* max size of free chunks */
4034	max = 2 << bsbits;
4035
4036#define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
4037		(req <= (size) || max <= (chunk_size))
4038
4039	/* first, try to predict filesize */
4040	/* XXX: should this table be tunable? */
4041	start_off = 0;
4042	if (size <= 16 * 1024) {
4043		size = 16 * 1024;
4044	} else if (size <= 32 * 1024) {
4045		size = 32 * 1024;
4046	} else if (size <= 64 * 1024) {
4047		size = 64 * 1024;
4048	} else if (size <= 128 * 1024) {
4049		size = 128 * 1024;
4050	} else if (size <= 256 * 1024) {
4051		size = 256 * 1024;
4052	} else if (size <= 512 * 1024) {
4053		size = 512 * 1024;
4054	} else if (size <= 1024 * 1024) {
4055		size = 1024 * 1024;
4056	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4057		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4058						(21 - bsbits)) << 21;
4059		size = 2 * 1024 * 1024;
4060	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4061		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4062							(22 - bsbits)) << 22;
4063		size = 4 * 1024 * 1024;
4064	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
4065					(8<<20)>>bsbits, max, 8 * 1024)) {
4066		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4067							(23 - bsbits)) << 23;
4068		size = 8 * 1024 * 1024;
4069	} else {
4070		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4071		size	  = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
4072					      ac->ac_o_ex.fe_len) << bsbits;
4073	}
4074	size = size >> bsbits;
4075	start = start_off >> bsbits;
4076
4077	/*
4078	 * For tiny groups (smaller than 8MB) the chosen allocation
4079	 * alignment may be larger than group size. Make sure the
4080	 * alignment does not move allocation to a different group which
4081	 * makes mballoc fail assertions later.
4082	 */
4083	start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4084			(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4085
4086	/* don't cover already allocated blocks in selected range */
4087	if (ar->pleft && start <= ar->lleft) {
4088		size -= ar->lleft + 1 - start;
4089		start = ar->lleft + 1;
4090	}
4091	if (ar->pright && start + size - 1 >= ar->lright)
4092		size -= start + size - ar->lright;
4093
4094	/*
4095	 * Trim allocation request for filesystems with artificially small
4096	 * groups.
4097	 */
4098	if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4099		size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4100
4101	end = start + size;
4102
4103	/* check we don't cross already preallocated blocks */
4104	rcu_read_lock();
4105	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4106		ext4_lblk_t pa_end;
4107
4108		if (pa->pa_deleted)
4109			continue;
4110		spin_lock(&pa->pa_lock);
4111		if (pa->pa_deleted) {
4112			spin_unlock(&pa->pa_lock);
4113			continue;
4114		}
4115
4116		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4117						  pa->pa_len);
4118
4119		/* PA must not overlap original request */
4120		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
4121			ac->ac_o_ex.fe_logical < pa->pa_lstart));
4122
4123		/* skip PAs this normalized request doesn't overlap with */
4124		if (pa->pa_lstart >= end || pa_end <= start) {
4125			spin_unlock(&pa->pa_lock);
4126			continue;
4127		}
4128		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
4129
4130		/* adjust start or end to be adjacent to this pa */
4131		if (pa_end <= ac->ac_o_ex.fe_logical) {
4132			BUG_ON(pa_end < start);
4133			start = pa_end;
4134		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4135			BUG_ON(pa->pa_lstart > end);
4136			end = pa->pa_lstart;
4137		}
4138		spin_unlock(&pa->pa_lock);
4139	}
4140	rcu_read_unlock();
4141	size = end - start;
4142
4143	/* XXX: extra loop to check we really don't overlap preallocations */
4144	rcu_read_lock();
4145	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4146		ext4_lblk_t pa_end;
4147
4148		spin_lock(&pa->pa_lock);
4149		if (pa->pa_deleted == 0) {
4150			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4151							  pa->pa_len);
4152			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
4153		}
4154		spin_unlock(&pa->pa_lock);
4155	}
4156	rcu_read_unlock();
4157
4158	/*
4159	 * In this function "start" and "size" are normalized for better
4160	 * alignment and length such that we could preallocate more blocks.
4161	 * This normalization is done such that original request of
4162	 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4163	 * "size" boundaries.
4164	 * (Note fe_len can be relaxed since FS block allocation API does not
4165	 * provide gurantee on number of contiguous blocks allocation since that
4166	 * depends upon free space left, etc).
4167	 * In case of inode pa, later we use the allocated blocks
4168	 * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated
4169	 * range of goal/best blocks [start, size] to put it at the
4170	 * ac_o_ex.fe_logical extent of this inode.
4171	 * (See ext4_mb_use_inode_pa() for more details)
4172	 */
4173	if (start + size <= ac->ac_o_ex.fe_logical ||
4174			start > ac->ac_o_ex.fe_logical) {
4175		ext4_msg(ac->ac_sb, KERN_ERR,
4176			 "start %lu, size %lu, fe_logical %lu",
4177			 (unsigned long) start, (unsigned long) size,
4178			 (unsigned long) ac->ac_o_ex.fe_logical);
4179		BUG();
4180	}
 
 
4181	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4182
4183	/* now prepare goal request */
4184
4185	/* XXX: is it better to align blocks WRT to logical
4186	 * placement or satisfy big request as is */
4187	ac->ac_g_ex.fe_logical = start;
4188	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4189
4190	/* define goal start in order to merge */
4191	if (ar->pright && (ar->lright == (start + size))) {
4192		/* merge to the right */
4193		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4194						&ac->ac_f_ex.fe_group,
4195						&ac->ac_f_ex.fe_start);
4196		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4197	}
4198	if (ar->pleft && (ar->lleft + 1 == start)) {
4199		/* merge to the left */
4200		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4201						&ac->ac_f_ex.fe_group,
4202						&ac->ac_f_ex.fe_start);
4203		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4204	}
4205
4206	mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4207		 orig_size, start);
4208}
4209
4210static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4211{
4212	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4213
4214	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4215		atomic_inc(&sbi->s_bal_reqs);
4216		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4217		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4218			atomic_inc(&sbi->s_bal_success);
4219		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4220		atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4221		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4222				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4223			atomic_inc(&sbi->s_bal_goals);
4224		if (ac->ac_found > sbi->s_mb_max_to_scan)
4225			atomic_inc(&sbi->s_bal_breaks);
4226	}
4227
4228	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4229		trace_ext4_mballoc_alloc(ac);
4230	else
4231		trace_ext4_mballoc_prealloc(ac);
4232}
4233
4234/*
4235 * Called on failure; free up any blocks from the inode PA for this
4236 * context.  We don't need this for MB_GROUP_PA because we only change
4237 * pa_free in ext4_mb_release_context(), but on failure, we've already
4238 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4239 */
4240static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4241{
4242	struct ext4_prealloc_space *pa = ac->ac_pa;
4243	struct ext4_buddy e4b;
4244	int err;
4245
4246	if (pa == NULL) {
4247		if (ac->ac_f_ex.fe_len == 0)
4248			return;
4249		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4250		if (err) {
4251			/*
4252			 * This should never happen since we pin the
4253			 * pages in the ext4_allocation_context so
4254			 * ext4_mb_load_buddy() should never fail.
4255			 */
4256			WARN(1, "mb_load_buddy failed (%d)", err);
4257			return;
4258		}
4259		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4260		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4261			       ac->ac_f_ex.fe_len);
4262		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4263		ext4_mb_unload_buddy(&e4b);
4264		return;
4265	}
4266	if (pa->pa_type == MB_INODE_PA)
4267		pa->pa_free += ac->ac_b_ex.fe_len;
4268}
4269
4270/*
4271 * use blocks preallocated to inode
4272 */
4273static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4274				struct ext4_prealloc_space *pa)
4275{
4276	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4277	ext4_fsblk_t start;
4278	ext4_fsblk_t end;
4279	int len;
4280
4281	/* found preallocated blocks, use them */
4282	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4283	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4284		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4285	len = EXT4_NUM_B2C(sbi, end - start);
4286	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4287					&ac->ac_b_ex.fe_start);
4288	ac->ac_b_ex.fe_len = len;
4289	ac->ac_status = AC_STATUS_FOUND;
4290	ac->ac_pa = pa;
4291
4292	BUG_ON(start < pa->pa_pstart);
4293	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4294	BUG_ON(pa->pa_free < len);
4295	pa->pa_free -= len;
4296
4297	mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4298}
4299
4300/*
4301 * use blocks preallocated to locality group
4302 */
4303static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4304				struct ext4_prealloc_space *pa)
4305{
4306	unsigned int len = ac->ac_o_ex.fe_len;
4307
4308	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4309					&ac->ac_b_ex.fe_group,
4310					&ac->ac_b_ex.fe_start);
4311	ac->ac_b_ex.fe_len = len;
4312	ac->ac_status = AC_STATUS_FOUND;
4313	ac->ac_pa = pa;
4314
4315	/* we don't correct pa_pstart or pa_plen here to avoid
4316	 * possible race when the group is being loaded concurrently
4317	 * instead we correct pa later, after blocks are marked
4318	 * in on-disk bitmap -- see ext4_mb_release_context()
4319	 * Other CPUs are prevented from allocating from this pa by lg_mutex
4320	 */
4321	mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4322		 pa->pa_lstart-len, len, pa);
4323}
4324
4325/*
4326 * Return the prealloc space that have minimal distance
4327 * from the goal block. @cpa is the prealloc
4328 * space that is having currently known minimal distance
4329 * from the goal block.
4330 */
4331static struct ext4_prealloc_space *
4332ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4333			struct ext4_prealloc_space *pa,
4334			struct ext4_prealloc_space *cpa)
4335{
4336	ext4_fsblk_t cur_distance, new_distance;
4337
4338	if (cpa == NULL) {
4339		atomic_inc(&pa->pa_count);
4340		return pa;
4341	}
4342	cur_distance = abs(goal_block - cpa->pa_pstart);
4343	new_distance = abs(goal_block - pa->pa_pstart);
4344
4345	if (cur_distance <= new_distance)
4346		return cpa;
4347
4348	/* drop the previous reference */
4349	atomic_dec(&cpa->pa_count);
4350	atomic_inc(&pa->pa_count);
4351	return pa;
4352}
4353
4354/*
4355 * search goal blocks in preallocated space
4356 */
4357static noinline_for_stack bool
4358ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4359{
4360	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4361	int order, i;
4362	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4363	struct ext4_locality_group *lg;
4364	struct ext4_prealloc_space *pa, *cpa = NULL;
4365	ext4_fsblk_t goal_block;
4366
4367	/* only data can be preallocated */
4368	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4369		return false;
4370
4371	/* first, try per-file preallocation */
4372	rcu_read_lock();
4373	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4374
4375		/* all fields in this condition don't change,
4376		 * so we can skip locking for them */
4377		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
4378		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
4379					       EXT4_C2B(sbi, pa->pa_len)))
4380			continue;
4381
4382		/* non-extent files can't have physical blocks past 2^32 */
4383		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4384		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
4385		     EXT4_MAX_BLOCK_FILE_PHYS))
4386			continue;
4387
4388		/* found preallocated blocks, use them */
4389		spin_lock(&pa->pa_lock);
4390		if (pa->pa_deleted == 0 && pa->pa_free) {
4391			atomic_inc(&pa->pa_count);
4392			ext4_mb_use_inode_pa(ac, pa);
4393			spin_unlock(&pa->pa_lock);
4394			ac->ac_criteria = 10;
4395			rcu_read_unlock();
4396			return true;
4397		}
4398		spin_unlock(&pa->pa_lock);
4399	}
4400	rcu_read_unlock();
4401
4402	/* can we use group allocation? */
4403	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4404		return false;
4405
4406	/* inode may have no locality group for some reason */
4407	lg = ac->ac_lg;
4408	if (lg == NULL)
4409		return false;
4410	order  = fls(ac->ac_o_ex.fe_len) - 1;
4411	if (order > PREALLOC_TB_SIZE - 1)
4412		/* The max size of hash table is PREALLOC_TB_SIZE */
4413		order = PREALLOC_TB_SIZE - 1;
4414
4415	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4416	/*
4417	 * search for the prealloc space that is having
4418	 * minimal distance from the goal block.
4419	 */
4420	for (i = order; i < PREALLOC_TB_SIZE; i++) {
4421		rcu_read_lock();
4422		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
4423					pa_inode_list) {
4424			spin_lock(&pa->pa_lock);
4425			if (pa->pa_deleted == 0 &&
4426					pa->pa_free >= ac->ac_o_ex.fe_len) {
4427
4428				cpa = ext4_mb_check_group_pa(goal_block,
4429								pa, cpa);
4430			}
4431			spin_unlock(&pa->pa_lock);
4432		}
4433		rcu_read_unlock();
4434	}
4435	if (cpa) {
4436		ext4_mb_use_group_pa(ac, cpa);
4437		ac->ac_criteria = 20;
4438		return true;
4439	}
4440	return false;
4441}
4442
4443/*
4444 * the function goes through all block freed in the group
4445 * but not yet committed and marks them used in in-core bitmap.
4446 * buddy must be generated from this bitmap
4447 * Need to be called with the ext4 group lock held
4448 */
4449static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
4450						ext4_group_t group)
4451{
4452	struct rb_node *n;
4453	struct ext4_group_info *grp;
4454	struct ext4_free_data *entry;
4455
4456	grp = ext4_get_group_info(sb, group);
4457	n = rb_first(&(grp->bb_free_root));
4458
4459	while (n) {
4460		entry = rb_entry(n, struct ext4_free_data, efd_node);
4461		mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
4462		n = rb_next(n);
4463	}
4464	return;
4465}
4466
4467/*
4468 * the function goes through all preallocation in this group and marks them
4469 * used in in-core bitmap. buddy must be generated from this bitmap
4470 * Need to be called with ext4 group lock held
4471 */
4472static noinline_for_stack
4473void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4474					ext4_group_t group)
4475{
4476	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4477	struct ext4_prealloc_space *pa;
4478	struct list_head *cur;
4479	ext4_group_t groupnr;
4480	ext4_grpblk_t start;
4481	int preallocated = 0;
 
4482	int len;
4483
4484	/* all form of preallocation discards first load group,
4485	 * so the only competing code is preallocation use.
4486	 * we don't need any locking here
4487	 * notice we do NOT ignore preallocations with pa_deleted
4488	 * otherwise we could leave used blocks available for
4489	 * allocation in buddy when concurrent ext4_mb_put_pa()
4490	 * is dropping preallocation
4491	 */
4492	list_for_each(cur, &grp->bb_prealloc_list) {
4493		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4494		spin_lock(&pa->pa_lock);
4495		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4496					     &groupnr, &start);
4497		len = pa->pa_len;
4498		spin_unlock(&pa->pa_lock);
4499		if (unlikely(len == 0))
4500			continue;
4501		BUG_ON(groupnr != group);
4502		mb_set_bits(bitmap, start, len);
4503		preallocated += len;
 
4504	}
4505	mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4506}
4507
4508static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4509				    struct ext4_prealloc_space *pa)
4510{
4511	struct ext4_inode_info *ei;
4512
4513	if (pa->pa_deleted) {
4514		ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4515			     pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4516			     pa->pa_len);
4517		return;
4518	}
4519
4520	pa->pa_deleted = 1;
4521
4522	if (pa->pa_type == MB_INODE_PA) {
4523		ei = EXT4_I(pa->pa_inode);
4524		atomic_dec(&ei->i_prealloc_active);
4525	}
4526}
4527
4528static void ext4_mb_pa_callback(struct rcu_head *head)
4529{
4530	struct ext4_prealloc_space *pa;
4531	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4532
4533	BUG_ON(atomic_read(&pa->pa_count));
4534	BUG_ON(pa->pa_deleted == 0);
4535	kmem_cache_free(ext4_pspace_cachep, pa);
4536}
4537
4538/*
4539 * drops a reference to preallocated space descriptor
4540 * if this was the last reference and the space is consumed
4541 */
4542static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4543			struct super_block *sb, struct ext4_prealloc_space *pa)
4544{
4545	ext4_group_t grp;
4546	ext4_fsblk_t grp_blk;
4547
 
 
 
4548	/* in this short window concurrent discard can set pa_deleted */
4549	spin_lock(&pa->pa_lock);
4550	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4551		spin_unlock(&pa->pa_lock);
4552		return;
4553	}
4554
4555	if (pa->pa_deleted == 1) {
4556		spin_unlock(&pa->pa_lock);
4557		return;
4558	}
4559
4560	ext4_mb_mark_pa_deleted(sb, pa);
4561	spin_unlock(&pa->pa_lock);
4562
4563	grp_blk = pa->pa_pstart;
4564	/*
4565	 * If doing group-based preallocation, pa_pstart may be in the
4566	 * next group when pa is used up
4567	 */
4568	if (pa->pa_type == MB_GROUP_PA)
4569		grp_blk--;
4570
4571	grp = ext4_get_group_number(sb, grp_blk);
4572
4573	/*
4574	 * possible race:
4575	 *
4576	 *  P1 (buddy init)			P2 (regular allocation)
4577	 *					find block B in PA
4578	 *  copy on-disk bitmap to buddy
4579	 *  					mark B in on-disk bitmap
4580	 *					drop PA from group
4581	 *  mark all PAs in buddy
4582	 *
4583	 * thus, P1 initializes buddy with B available. to prevent this
4584	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4585	 * against that pair
4586	 */
4587	ext4_lock_group(sb, grp);
4588	list_del(&pa->pa_group_list);
4589	ext4_unlock_group(sb, grp);
4590
4591	spin_lock(pa->pa_obj_lock);
4592	list_del_rcu(&pa->pa_inode_list);
4593	spin_unlock(pa->pa_obj_lock);
4594
4595	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4596}
4597
4598/*
4599 * creates new preallocated space for given inode
4600 */
4601static noinline_for_stack void
4602ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4603{
4604	struct super_block *sb = ac->ac_sb;
4605	struct ext4_sb_info *sbi = EXT4_SB(sb);
4606	struct ext4_prealloc_space *pa;
4607	struct ext4_group_info *grp;
4608	struct ext4_inode_info *ei;
4609
4610	/* preallocate only when found space is larger then requested */
4611	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4612	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4613	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4614	BUG_ON(ac->ac_pa == NULL);
4615
4616	pa = ac->ac_pa;
 
 
4617
4618	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4619		int winl;
4620		int wins;
4621		int win;
4622		int offs;
4623
4624		/* we can't allocate as much as normalizer wants.
4625		 * so, found space must get proper lstart
4626		 * to cover original request */
4627		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4628		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4629
4630		/* we're limited by original request in that
4631		 * logical block must be covered any way
4632		 * winl is window we can move our chunk within */
4633		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
4634
4635		/* also, we should cover whole original request */
4636		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
4637
4638		/* the smallest one defines real window */
4639		win = min(winl, wins);
4640
4641		offs = ac->ac_o_ex.fe_logical %
4642			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4643		if (offs && offs < win)
4644			win = offs;
4645
4646		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
4647			EXT4_NUM_B2C(sbi, win);
4648		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4649		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4650	}
4651
4652	/* preallocation can change ac_b_ex, thus we store actually
4653	 * allocated blocks for history */
4654	ac->ac_f_ex = ac->ac_b_ex;
4655
4656	pa->pa_lstart = ac->ac_b_ex.fe_logical;
4657	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4658	pa->pa_len = ac->ac_b_ex.fe_len;
4659	pa->pa_free = pa->pa_len;
 
4660	spin_lock_init(&pa->pa_lock);
4661	INIT_LIST_HEAD(&pa->pa_inode_list);
4662	INIT_LIST_HEAD(&pa->pa_group_list);
4663	pa->pa_deleted = 0;
4664	pa->pa_type = MB_INODE_PA;
4665
4666	mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4667		 pa->pa_len, pa->pa_lstart);
4668	trace_ext4_mb_new_inode_pa(ac, pa);
4669
4670	ext4_mb_use_inode_pa(ac, pa);
4671	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4672
4673	ei = EXT4_I(ac->ac_inode);
4674	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4675
4676	pa->pa_obj_lock = &ei->i_prealloc_lock;
4677	pa->pa_inode = ac->ac_inode;
4678
 
4679	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
 
4680
4681	spin_lock(pa->pa_obj_lock);
4682	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4683	spin_unlock(pa->pa_obj_lock);
4684	atomic_inc(&ei->i_prealloc_active);
 
4685}
4686
4687/*
4688 * creates new preallocated space for locality group inodes belongs to
4689 */
4690static noinline_for_stack void
4691ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4692{
4693	struct super_block *sb = ac->ac_sb;
4694	struct ext4_locality_group *lg;
4695	struct ext4_prealloc_space *pa;
4696	struct ext4_group_info *grp;
4697
4698	/* preallocate only when found space is larger then requested */
4699	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4700	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4701	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4702	BUG_ON(ac->ac_pa == NULL);
4703
4704	pa = ac->ac_pa;
 
 
 
4705
4706	/* preallocation can change ac_b_ex, thus we store actually
4707	 * allocated blocks for history */
4708	ac->ac_f_ex = ac->ac_b_ex;
4709
4710	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4711	pa->pa_lstart = pa->pa_pstart;
4712	pa->pa_len = ac->ac_b_ex.fe_len;
4713	pa->pa_free = pa->pa_len;
 
4714	spin_lock_init(&pa->pa_lock);
4715	INIT_LIST_HEAD(&pa->pa_inode_list);
4716	INIT_LIST_HEAD(&pa->pa_group_list);
4717	pa->pa_deleted = 0;
4718	pa->pa_type = MB_GROUP_PA;
4719
4720	mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4721		 pa->pa_len, pa->pa_lstart);
4722	trace_ext4_mb_new_group_pa(ac, pa);
4723
4724	ext4_mb_use_group_pa(ac, pa);
4725	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4726
4727	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4728	lg = ac->ac_lg;
4729	BUG_ON(lg == NULL);
4730
4731	pa->pa_obj_lock = &lg->lg_prealloc_lock;
4732	pa->pa_inode = NULL;
4733
 
4734	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
 
4735
4736	/*
4737	 * We will later add the new pa to the right bucket
4738	 * after updating the pa_free in ext4_mb_release_context
4739	 */
 
4740}
4741
4742static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4743{
 
 
4744	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4745		ext4_mb_new_group_pa(ac);
4746	else
4747		ext4_mb_new_inode_pa(ac);
 
4748}
4749
4750/*
4751 * finds all unused blocks in on-disk bitmap, frees them in
4752 * in-core bitmap and buddy.
4753 * @pa must be unlinked from inode and group lists, so that
4754 * nobody else can find/use it.
4755 * the caller MUST hold group/inode locks.
4756 * TODO: optimize the case when there are no in-core structures yet
4757 */
4758static noinline_for_stack int
4759ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4760			struct ext4_prealloc_space *pa)
4761{
4762	struct super_block *sb = e4b->bd_sb;
4763	struct ext4_sb_info *sbi = EXT4_SB(sb);
4764	unsigned int end;
4765	unsigned int next;
4766	ext4_group_t group;
4767	ext4_grpblk_t bit;
4768	unsigned long long grp_blk_start;
 
4769	int free = 0;
4770
4771	BUG_ON(pa->pa_deleted == 0);
4772	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4773	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4774	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4775	end = bit + pa->pa_len;
4776
4777	while (bit < end) {
4778		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4779		if (bit >= end)
4780			break;
4781		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4782		mb_debug(sb, "free preallocated %u/%u in group %u\n",
4783			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
4784			 (unsigned) next - bit, (unsigned) group);
4785		free += next - bit;
4786
4787		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4788		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4789						    EXT4_C2B(sbi, bit)),
4790					       next - bit);
4791		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4792		bit = next + 1;
4793	}
4794	if (free != pa->pa_free) {
4795		ext4_msg(e4b->bd_sb, KERN_CRIT,
4796			 "pa %p: logic %lu, phys. %lu, len %d",
4797			 pa, (unsigned long) pa->pa_lstart,
4798			 (unsigned long) pa->pa_pstart,
4799			 pa->pa_len);
4800		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4801					free, pa->pa_free);
4802		/*
4803		 * pa is already deleted so we use the value obtained
4804		 * from the bitmap and continue.
4805		 */
4806	}
4807	atomic_add(free, &sbi->s_mb_discarded);
4808
4809	return 0;
4810}
4811
4812static noinline_for_stack int
4813ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4814				struct ext4_prealloc_space *pa)
4815{
4816	struct super_block *sb = e4b->bd_sb;
4817	ext4_group_t group;
4818	ext4_grpblk_t bit;
4819
4820	trace_ext4_mb_release_group_pa(sb, pa);
4821	BUG_ON(pa->pa_deleted == 0);
4822	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4823	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4824	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4825	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4826	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4827
4828	return 0;
4829}
4830
4831/*
4832 * releases all preallocations in given group
4833 *
4834 * first, we need to decide discard policy:
4835 * - when do we discard
4836 *   1) ENOSPC
4837 * - how many do we discard
4838 *   1) how many requested
4839 */
4840static noinline_for_stack int
4841ext4_mb_discard_group_preallocations(struct super_block *sb,
4842				     ext4_group_t group, int *busy)
4843{
4844	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4845	struct buffer_head *bitmap_bh = NULL;
4846	struct ext4_prealloc_space *pa, *tmp;
4847	struct list_head list;
4848	struct ext4_buddy e4b;
4849	int err;
 
4850	int free = 0;
4851
4852	mb_debug(sb, "discard preallocation for group %u\n", group);
 
4853	if (list_empty(&grp->bb_prealloc_list))
4854		goto out_dbg;
4855
4856	bitmap_bh = ext4_read_block_bitmap(sb, group);
4857	if (IS_ERR(bitmap_bh)) {
4858		err = PTR_ERR(bitmap_bh);
4859		ext4_error_err(sb, -err,
4860			       "Error %d reading block bitmap for %u",
4861			       err, group);
4862		goto out_dbg;
4863	}
4864
4865	err = ext4_mb_load_buddy(sb, group, &e4b);
4866	if (err) {
4867		ext4_warning(sb, "Error %d loading buddy information for %u",
4868			     err, group);
4869		put_bh(bitmap_bh);
4870		goto out_dbg;
4871	}
4872
 
 
 
4873	INIT_LIST_HEAD(&list);
 
4874	ext4_lock_group(sb, group);
4875	list_for_each_entry_safe(pa, tmp,
4876				&grp->bb_prealloc_list, pa_group_list) {
4877		spin_lock(&pa->pa_lock);
4878		if (atomic_read(&pa->pa_count)) {
4879			spin_unlock(&pa->pa_lock);
4880			*busy = 1;
4881			continue;
4882		}
4883		if (pa->pa_deleted) {
4884			spin_unlock(&pa->pa_lock);
4885			continue;
4886		}
4887
4888		/* seems this one can be freed ... */
4889		ext4_mb_mark_pa_deleted(sb, pa);
4890
4891		if (!free)
4892			this_cpu_inc(discard_pa_seq);
4893
4894		/* we can trust pa_free ... */
4895		free += pa->pa_free;
4896
4897		spin_unlock(&pa->pa_lock);
4898
4899		list_del(&pa->pa_group_list);
4900		list_add(&pa->u.pa_tmp_list, &list);
4901	}
4902
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4903	/* now free all selected PAs */
4904	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4905
4906		/* remove from object (inode or locality group) */
4907		spin_lock(pa->pa_obj_lock);
4908		list_del_rcu(&pa->pa_inode_list);
4909		spin_unlock(pa->pa_obj_lock);
4910
4911		if (pa->pa_type == MB_GROUP_PA)
4912			ext4_mb_release_group_pa(&e4b, pa);
4913		else
4914			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4915
4916		list_del(&pa->u.pa_tmp_list);
4917		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4918	}
4919
 
4920	ext4_unlock_group(sb, group);
4921	ext4_mb_unload_buddy(&e4b);
4922	put_bh(bitmap_bh);
4923out_dbg:
4924	mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4925		 free, group, grp->bb_free);
4926	return free;
4927}
4928
4929/*
4930 * releases all non-used preallocated blocks for given inode
4931 *
4932 * It's important to discard preallocations under i_data_sem
4933 * We don't want another block to be served from the prealloc
4934 * space when we are discarding the inode prealloc space.
4935 *
4936 * FIXME!! Make sure it is valid at all the call sites
4937 */
4938void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
4939{
4940	struct ext4_inode_info *ei = EXT4_I(inode);
4941	struct super_block *sb = inode->i_sb;
4942	struct buffer_head *bitmap_bh = NULL;
4943	struct ext4_prealloc_space *pa, *tmp;
4944	ext4_group_t group = 0;
4945	struct list_head list;
4946	struct ext4_buddy e4b;
4947	int err;
4948
4949	if (!S_ISREG(inode->i_mode)) {
4950		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4951		return;
4952	}
4953
4954	if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
4955		return;
4956
4957	mb_debug(sb, "discard preallocation for inode %lu\n",
4958		 inode->i_ino);
4959	trace_ext4_discard_preallocations(inode,
4960			atomic_read(&ei->i_prealloc_active), needed);
4961
4962	INIT_LIST_HEAD(&list);
4963
4964	if (needed == 0)
4965		needed = UINT_MAX;
4966
4967repeat:
4968	/* first, collect all pa's in the inode */
4969	spin_lock(&ei->i_prealloc_lock);
4970	while (!list_empty(&ei->i_prealloc_list) && needed) {
4971		pa = list_entry(ei->i_prealloc_list.prev,
4972				struct ext4_prealloc_space, pa_inode_list);
4973		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4974		spin_lock(&pa->pa_lock);
4975		if (atomic_read(&pa->pa_count)) {
4976			/* this shouldn't happen often - nobody should
4977			 * use preallocation while we're discarding it */
4978			spin_unlock(&pa->pa_lock);
4979			spin_unlock(&ei->i_prealloc_lock);
4980			ext4_msg(sb, KERN_ERR,
4981				 "uh-oh! used pa while discarding");
4982			WARN_ON(1);
4983			schedule_timeout_uninterruptible(HZ);
4984			goto repeat;
4985
4986		}
4987		if (pa->pa_deleted == 0) {
4988			ext4_mb_mark_pa_deleted(sb, pa);
4989			spin_unlock(&pa->pa_lock);
4990			list_del_rcu(&pa->pa_inode_list);
4991			list_add(&pa->u.pa_tmp_list, &list);
4992			needed--;
4993			continue;
4994		}
4995
4996		/* someone is deleting pa right now */
4997		spin_unlock(&pa->pa_lock);
4998		spin_unlock(&ei->i_prealloc_lock);
4999
5000		/* we have to wait here because pa_deleted
5001		 * doesn't mean pa is already unlinked from
5002		 * the list. as we might be called from
5003		 * ->clear_inode() the inode will get freed
5004		 * and concurrent thread which is unlinking
5005		 * pa from inode's list may access already
5006		 * freed memory, bad-bad-bad */
5007
5008		/* XXX: if this happens too often, we can
5009		 * add a flag to force wait only in case
5010		 * of ->clear_inode(), but not in case of
5011		 * regular truncate */
5012		schedule_timeout_uninterruptible(HZ);
5013		goto repeat;
5014	}
5015	spin_unlock(&ei->i_prealloc_lock);
5016
5017	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5018		BUG_ON(pa->pa_type != MB_INODE_PA);
5019		group = ext4_get_group_number(sb, pa->pa_pstart);
5020
5021		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5022					     GFP_NOFS|__GFP_NOFAIL);
5023		if (err) {
5024			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5025				       err, group);
5026			continue;
5027		}
5028
5029		bitmap_bh = ext4_read_block_bitmap(sb, group);
5030		if (IS_ERR(bitmap_bh)) {
5031			err = PTR_ERR(bitmap_bh);
5032			ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5033				       err, group);
5034			ext4_mb_unload_buddy(&e4b);
5035			continue;
5036		}
5037
5038		ext4_lock_group(sb, group);
5039		list_del(&pa->pa_group_list);
5040		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5041		ext4_unlock_group(sb, group);
5042
5043		ext4_mb_unload_buddy(&e4b);
5044		put_bh(bitmap_bh);
5045
5046		list_del(&pa->u.pa_tmp_list);
5047		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5048	}
5049}
5050
5051static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5052{
5053	struct ext4_prealloc_space *pa;
5054
5055	BUG_ON(ext4_pspace_cachep == NULL);
5056	pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5057	if (!pa)
5058		return -ENOMEM;
5059	atomic_set(&pa->pa_count, 1);
5060	ac->ac_pa = pa;
5061	return 0;
5062}
5063
5064static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
5065{
5066	struct ext4_prealloc_space *pa = ac->ac_pa;
5067
5068	BUG_ON(!pa);
5069	ac->ac_pa = NULL;
5070	WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5071	kmem_cache_free(ext4_pspace_cachep, pa);
5072}
5073
5074#ifdef CONFIG_EXT4_DEBUG
5075static inline void ext4_mb_show_pa(struct super_block *sb)
5076{
5077	ext4_group_t i, ngroups;
5078
5079	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5080		return;
5081
5082	ngroups = ext4_get_groups_count(sb);
5083	mb_debug(sb, "groups: ");
5084	for (i = 0; i < ngroups; i++) {
5085		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5086		struct ext4_prealloc_space *pa;
5087		ext4_grpblk_t start;
5088		struct list_head *cur;
5089		ext4_lock_group(sb, i);
5090		list_for_each(cur, &grp->bb_prealloc_list) {
5091			pa = list_entry(cur, struct ext4_prealloc_space,
5092					pa_group_list);
5093			spin_lock(&pa->pa_lock);
5094			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5095						     NULL, &start);
5096			spin_unlock(&pa->pa_lock);
5097			mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5098				 pa->pa_len);
5099		}
5100		ext4_unlock_group(sb, i);
5101		mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5102			 grp->bb_fragments);
5103	}
5104}
5105
5106static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5107{
5108	struct super_block *sb = ac->ac_sb;
 
5109
5110	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
 
5111		return;
5112
5113	mb_debug(sb, "Can't allocate:"
5114			" Allocation context details:");
5115	mb_debug(sb, "status %u flags 0x%x",
5116			ac->ac_status, ac->ac_flags);
5117	mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5118			"goal %lu/%lu/%lu@%lu, "
5119			"best %lu/%lu/%lu@%lu cr %d",
5120			(unsigned long)ac->ac_o_ex.fe_group,
5121			(unsigned long)ac->ac_o_ex.fe_start,
5122			(unsigned long)ac->ac_o_ex.fe_len,
5123			(unsigned long)ac->ac_o_ex.fe_logical,
5124			(unsigned long)ac->ac_g_ex.fe_group,
5125			(unsigned long)ac->ac_g_ex.fe_start,
5126			(unsigned long)ac->ac_g_ex.fe_len,
5127			(unsigned long)ac->ac_g_ex.fe_logical,
5128			(unsigned long)ac->ac_b_ex.fe_group,
5129			(unsigned long)ac->ac_b_ex.fe_start,
5130			(unsigned long)ac->ac_b_ex.fe_len,
5131			(unsigned long)ac->ac_b_ex.fe_logical,
5132			(int)ac->ac_criteria);
5133	mb_debug(sb, "%u found", ac->ac_found);
5134	ext4_mb_show_pa(sb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5135}
5136#else
5137static inline void ext4_mb_show_pa(struct super_block *sb)
5138{
5139	return;
5140}
5141static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5142{
5143	ext4_mb_show_pa(ac->ac_sb);
5144	return;
5145}
5146#endif
5147
5148/*
5149 * We use locality group preallocation for small size file. The size of the
5150 * file is determined by the current size or the resulting size after
5151 * allocation which ever is larger
5152 *
5153 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5154 */
5155static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5156{
5157	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5158	int bsbits = ac->ac_sb->s_blocksize_bits;
5159	loff_t size, isize;
5160	bool inode_pa_eligible, group_pa_eligible;
5161
5162	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5163		return;
5164
5165	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5166		return;
5167
5168	group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5169	inode_pa_eligible = true;
5170	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
5171	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5172		>> bsbits;
5173
5174	/* No point in using inode preallocation for closed files */
5175	if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5176	    !inode_is_open_for_write(ac->ac_inode))
5177		inode_pa_eligible = false;
 
 
5178
 
5179	size = max(size, isize);
5180	/* Don't use group allocation for large files */
5181	if (size > sbi->s_mb_stream_request)
5182		group_pa_eligible = false;
5183
5184	if (!group_pa_eligible) {
5185		if (inode_pa_eligible)
5186			ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5187		else
5188			ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5189		return;
5190	}
5191
5192	BUG_ON(ac->ac_lg != NULL);
5193	/*
5194	 * locality group prealloc space are per cpu. The reason for having
5195	 * per cpu locality group is to reduce the contention between block
5196	 * request from multiple CPUs.
5197	 */
5198	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5199
5200	/* we're going to use group allocation */
5201	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5202
5203	/* serialize all allocations in the group */
5204	mutex_lock(&ac->ac_lg->lg_mutex);
5205}
5206
5207static noinline_for_stack void
5208ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5209				struct ext4_allocation_request *ar)
5210{
5211	struct super_block *sb = ar->inode->i_sb;
5212	struct ext4_sb_info *sbi = EXT4_SB(sb);
5213	struct ext4_super_block *es = sbi->s_es;
5214	ext4_group_t group;
5215	unsigned int len;
5216	ext4_fsblk_t goal;
5217	ext4_grpblk_t block;
5218
5219	/* we can't allocate > group size */
5220	len = ar->len;
5221
5222	/* just a dirty hack to filter too big requests  */
5223	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5224		len = EXT4_CLUSTERS_PER_GROUP(sb);
5225
5226	/* start searching from the goal */
5227	goal = ar->goal;
5228	if (goal < le32_to_cpu(es->s_first_data_block) ||
5229			goal >= ext4_blocks_count(es))
5230		goal = le32_to_cpu(es->s_first_data_block);
5231	ext4_get_group_no_and_offset(sb, goal, &group, &block);
5232
5233	/* set up allocation goals */
5234	ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
 
5235	ac->ac_status = AC_STATUS_CONTINUE;
5236	ac->ac_sb = sb;
5237	ac->ac_inode = ar->inode;
5238	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5239	ac->ac_o_ex.fe_group = group;
5240	ac->ac_o_ex.fe_start = block;
5241	ac->ac_o_ex.fe_len = len;
5242	ac->ac_g_ex = ac->ac_o_ex;
 
 
 
5243	ac->ac_flags = ar->flags;
5244
5245	/* we have to define context: we'll work with a file or
5246	 * locality group. this is a policy, actually */
5247	ext4_mb_group_or_file(ac);
5248
5249	mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5250			"left: %u/%u, right %u/%u to %swritable\n",
5251			(unsigned) ar->len, (unsigned) ar->logical,
5252			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5253			(unsigned) ar->lleft, (unsigned) ar->pleft,
5254			(unsigned) ar->lright, (unsigned) ar->pright,
5255			inode_is_open_for_write(ar->inode) ? "" : "non-");
 
 
5256}
5257
5258static noinline_for_stack void
5259ext4_mb_discard_lg_preallocations(struct super_block *sb,
5260					struct ext4_locality_group *lg,
5261					int order, int total_entries)
5262{
5263	ext4_group_t group = 0;
5264	struct ext4_buddy e4b;
5265	struct list_head discard_list;
5266	struct ext4_prealloc_space *pa, *tmp;
5267
5268	mb_debug(sb, "discard locality group preallocation\n");
5269
5270	INIT_LIST_HEAD(&discard_list);
5271
5272	spin_lock(&lg->lg_prealloc_lock);
5273	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5274				pa_inode_list,
5275				lockdep_is_held(&lg->lg_prealloc_lock)) {
5276		spin_lock(&pa->pa_lock);
5277		if (atomic_read(&pa->pa_count)) {
5278			/*
5279			 * This is the pa that we just used
5280			 * for block allocation. So don't
5281			 * free that
5282			 */
5283			spin_unlock(&pa->pa_lock);
5284			continue;
5285		}
5286		if (pa->pa_deleted) {
5287			spin_unlock(&pa->pa_lock);
5288			continue;
5289		}
5290		/* only lg prealloc space */
5291		BUG_ON(pa->pa_type != MB_GROUP_PA);
5292
5293		/* seems this one can be freed ... */
5294		ext4_mb_mark_pa_deleted(sb, pa);
5295		spin_unlock(&pa->pa_lock);
5296
5297		list_del_rcu(&pa->pa_inode_list);
5298		list_add(&pa->u.pa_tmp_list, &discard_list);
5299
5300		total_entries--;
5301		if (total_entries <= 5) {
5302			/*
5303			 * we want to keep only 5 entries
5304			 * allowing it to grow to 8. This
5305			 * mak sure we don't call discard
5306			 * soon for this list.
5307			 */
5308			break;
5309		}
5310	}
5311	spin_unlock(&lg->lg_prealloc_lock);
5312
5313	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5314		int err;
5315
5316		group = ext4_get_group_number(sb, pa->pa_pstart);
5317		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5318					     GFP_NOFS|__GFP_NOFAIL);
5319		if (err) {
5320			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5321				       err, group);
5322			continue;
5323		}
5324		ext4_lock_group(sb, group);
5325		list_del(&pa->pa_group_list);
5326		ext4_mb_release_group_pa(&e4b, pa);
5327		ext4_unlock_group(sb, group);
5328
5329		ext4_mb_unload_buddy(&e4b);
5330		list_del(&pa->u.pa_tmp_list);
5331		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5332	}
5333}
5334
5335/*
5336 * We have incremented pa_count. So it cannot be freed at this
5337 * point. Also we hold lg_mutex. So no parallel allocation is
5338 * possible from this lg. That means pa_free cannot be updated.
5339 *
5340 * A parallel ext4_mb_discard_group_preallocations is possible.
5341 * which can cause the lg_prealloc_list to be updated.
5342 */
5343
5344static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5345{
5346	int order, added = 0, lg_prealloc_count = 1;
5347	struct super_block *sb = ac->ac_sb;
5348	struct ext4_locality_group *lg = ac->ac_lg;
5349	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5350
5351	order = fls(pa->pa_free) - 1;
5352	if (order > PREALLOC_TB_SIZE - 1)
5353		/* The max size of hash table is PREALLOC_TB_SIZE */
5354		order = PREALLOC_TB_SIZE - 1;
5355	/* Add the prealloc space to lg */
5356	spin_lock(&lg->lg_prealloc_lock);
5357	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5358				pa_inode_list,
5359				lockdep_is_held(&lg->lg_prealloc_lock)) {
5360		spin_lock(&tmp_pa->pa_lock);
5361		if (tmp_pa->pa_deleted) {
5362			spin_unlock(&tmp_pa->pa_lock);
5363			continue;
5364		}
5365		if (!added && pa->pa_free < tmp_pa->pa_free) {
5366			/* Add to the tail of the previous entry */
5367			list_add_tail_rcu(&pa->pa_inode_list,
5368						&tmp_pa->pa_inode_list);
5369			added = 1;
5370			/*
5371			 * we want to count the total
5372			 * number of entries in the list
5373			 */
5374		}
5375		spin_unlock(&tmp_pa->pa_lock);
5376		lg_prealloc_count++;
5377	}
5378	if (!added)
5379		list_add_tail_rcu(&pa->pa_inode_list,
5380					&lg->lg_prealloc_list[order]);
5381	spin_unlock(&lg->lg_prealloc_lock);
5382
5383	/* Now trim the list to be not more than 8 elements */
5384	if (lg_prealloc_count > 8) {
5385		ext4_mb_discard_lg_preallocations(sb, lg,
5386						  order, lg_prealloc_count);
5387		return;
5388	}
5389	return ;
5390}
5391
5392/*
5393 * if per-inode prealloc list is too long, trim some PA
5394 */
5395static void ext4_mb_trim_inode_pa(struct inode *inode)
5396{
5397	struct ext4_inode_info *ei = EXT4_I(inode);
5398	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5399	int count, delta;
5400
5401	count = atomic_read(&ei->i_prealloc_active);
5402	delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
5403	if (count > sbi->s_mb_max_inode_prealloc + delta) {
5404		count -= sbi->s_mb_max_inode_prealloc;
5405		ext4_discard_preallocations(inode, count);
5406	}
5407}
5408
5409/*
5410 * release all resource we used in allocation
5411 */
5412static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5413{
5414	struct inode *inode = ac->ac_inode;
5415	struct ext4_inode_info *ei = EXT4_I(inode);
5416	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5417	struct ext4_prealloc_space *pa = ac->ac_pa;
5418	if (pa) {
5419		if (pa->pa_type == MB_GROUP_PA) {
5420			/* see comment in ext4_mb_use_group_pa() */
5421			spin_lock(&pa->pa_lock);
5422			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5423			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5424			pa->pa_free -= ac->ac_b_ex.fe_len;
5425			pa->pa_len -= ac->ac_b_ex.fe_len;
5426			spin_unlock(&pa->pa_lock);
5427
5428			/*
5429			 * We want to add the pa to the right bucket.
5430			 * Remove it from the list and while adding
5431			 * make sure the list to which we are adding
5432			 * doesn't grow big.
5433			 */
5434			if (likely(pa->pa_free)) {
5435				spin_lock(pa->pa_obj_lock);
5436				list_del_rcu(&pa->pa_inode_list);
5437				spin_unlock(pa->pa_obj_lock);
5438				ext4_mb_add_n_trim(ac);
5439			}
5440		}
5441
5442		if (pa->pa_type == MB_INODE_PA) {
5443			/*
5444			 * treat per-inode prealloc list as a lru list, then try
5445			 * to trim the least recently used PA.
5446			 */
 
 
 
5447			spin_lock(pa->pa_obj_lock);
5448			list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5449			spin_unlock(pa->pa_obj_lock);
 
5450		}
5451
5452		ext4_mb_put_pa(ac, ac->ac_sb, pa);
5453	}
5454	if (ac->ac_bitmap_page)
5455		put_page(ac->ac_bitmap_page);
5456	if (ac->ac_buddy_page)
5457		put_page(ac->ac_buddy_page);
5458	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5459		mutex_unlock(&ac->ac_lg->lg_mutex);
5460	ext4_mb_collect_stats(ac);
5461	ext4_mb_trim_inode_pa(inode);
5462	return 0;
5463}
5464
5465static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5466{
5467	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5468	int ret;
5469	int freed = 0, busy = 0;
5470	int retry = 0;
5471
5472	trace_ext4_mb_discard_preallocations(sb, needed);
5473
5474	if (needed == 0)
5475		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5476 repeat:
5477	for (i = 0; i < ngroups && needed > 0; i++) {
5478		ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5479		freed += ret;
5480		needed -= ret;
5481		cond_resched();
5482	}
5483
5484	if (needed > 0 && busy && ++retry < 3) {
5485		busy = 0;
5486		goto repeat;
5487	}
5488
5489	return freed;
5490}
5491
5492static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
5493			struct ext4_allocation_context *ac, u64 *seq)
5494{
5495	int freed;
5496	u64 seq_retry = 0;
5497	bool ret = false;
5498
5499	freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
5500	if (freed) {
5501		ret = true;
5502		goto out_dbg;
5503	}
5504	seq_retry = ext4_get_discard_pa_seq_sum();
5505	if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
5506		ac->ac_flags |= EXT4_MB_STRICT_CHECK;
5507		*seq = seq_retry;
5508		ret = true;
5509	}
5510
5511out_dbg:
5512	mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
5513	return ret;
5514}
5515
5516static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5517				struct ext4_allocation_request *ar, int *errp);
5518
5519/*
5520 * Main entry point into mballoc to allocate blocks
5521 * it tries to use preallocation first, then falls back
5522 * to usual allocation
5523 */
5524ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
5525				struct ext4_allocation_request *ar, int *errp)
5526{
 
5527	struct ext4_allocation_context *ac = NULL;
5528	struct ext4_sb_info *sbi;
5529	struct super_block *sb;
5530	ext4_fsblk_t block = 0;
5531	unsigned int inquota = 0;
5532	unsigned int reserv_clstrs = 0;
5533	int retries = 0;
5534	u64 seq;
5535
5536	might_sleep();
5537	sb = ar->inode->i_sb;
5538	sbi = EXT4_SB(sb);
5539
5540	trace_ext4_request_blocks(ar);
5541	if (sbi->s_mount_state & EXT4_FC_REPLAY)
5542		return ext4_mb_new_blocks_simple(handle, ar, errp);
5543
5544	/* Allow to use superuser reservation for quota file */
5545	if (ext4_is_quota_file(ar->inode))
5546		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5547
5548	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
 
 
 
5549		/* Without delayed allocation we need to verify
5550		 * there is enough free blocks to do block allocation
5551		 * and verify allocation doesn't exceed the quota limits.
5552		 */
5553		while (ar->len &&
5554			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5555
5556			/* let others to free the space */
5557			cond_resched();
5558			ar->len = ar->len >> 1;
5559		}
5560		if (!ar->len) {
5561			ext4_mb_show_pa(sb);
5562			*errp = -ENOSPC;
5563			return 0;
5564		}
5565		reserv_clstrs = ar->len;
5566		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5567			dquot_alloc_block_nofail(ar->inode,
5568						 EXT4_C2B(sbi, ar->len));
5569		} else {
5570			while (ar->len &&
5571				dquot_alloc_block(ar->inode,
5572						  EXT4_C2B(sbi, ar->len))) {
5573
5574				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5575				ar->len--;
5576			}
5577		}
5578		inquota = ar->len;
5579		if (ar->len == 0) {
5580			*errp = -EDQUOT;
5581			goto out;
5582		}
5583	}
5584
5585	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5586	if (!ac) {
5587		ar->len = 0;
5588		*errp = -ENOMEM;
5589		goto out;
5590	}
5591
5592	ext4_mb_initialize_context(ac, ar);
 
 
 
 
5593
5594	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5595	seq = this_cpu_read(discard_pa_seq);
5596	if (!ext4_mb_use_preallocated(ac)) {
5597		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5598		ext4_mb_normalize_request(ac, ar);
5599
5600		*errp = ext4_mb_pa_alloc(ac);
5601		if (*errp)
5602			goto errout;
5603repeat:
5604		/* allocate space in core */
5605		*errp = ext4_mb_regular_allocator(ac);
5606		/*
5607		 * pa allocated above is added to grp->bb_prealloc_list only
5608		 * when we were able to allocate some block i.e. when
5609		 * ac->ac_status == AC_STATUS_FOUND.
5610		 * And error from above mean ac->ac_status != AC_STATUS_FOUND
5611		 * So we have to free this pa here itself.
5612		 */
5613		if (*errp) {
5614			ext4_mb_pa_free(ac);
5615			ext4_discard_allocated_blocks(ac);
5616			goto errout;
5617		}
 
 
 
5618		if (ac->ac_status == AC_STATUS_FOUND &&
5619			ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5620			ext4_mb_pa_free(ac);
5621	}
5622	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5623		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5624		if (*errp) {
 
 
 
 
 
 
 
 
 
 
 
 
5625			ext4_discard_allocated_blocks(ac);
5626			goto errout;
5627		} else {
5628			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5629			ar->len = ac->ac_b_ex.fe_len;
5630		}
5631	} else {
5632		if (++retries < 3 &&
5633		    ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5634			goto repeat;
5635		/*
5636		 * If block allocation fails then the pa allocated above
5637		 * needs to be freed here itself.
5638		 */
5639		ext4_mb_pa_free(ac);
5640		*errp = -ENOSPC;
5641	}
5642
5643errout:
5644	if (*errp) {
5645		ac->ac_b_ex.fe_len = 0;
5646		ar->len = 0;
5647		ext4_mb_show_ac(ac);
5648	}
5649	ext4_mb_release_context(ac);
5650out:
5651	if (ac)
5652		kmem_cache_free(ext4_ac_cachep, ac);
5653	if (inquota && ar->len < inquota)
5654		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5655	if (!ar->len) {
5656		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
 
5657			/* release all the reserved blocks if non delalloc */
5658			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5659						reserv_clstrs);
5660	}
5661
5662	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5663
5664	return block;
5665}
5666
5667/*
5668 * We can merge two free data extents only if the physical blocks
5669 * are contiguous, AND the extents were freed by the same transaction,
5670 * AND the blocks are associated with the same group.
5671 */
5672static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5673					struct ext4_free_data *entry,
5674					struct ext4_free_data *new_entry,
5675					struct rb_root *entry_rb_root)
5676{
5677	if ((entry->efd_tid != new_entry->efd_tid) ||
5678	    (entry->efd_group != new_entry->efd_group))
5679		return;
5680	if (entry->efd_start_cluster + entry->efd_count ==
5681	    new_entry->efd_start_cluster) {
5682		new_entry->efd_start_cluster = entry->efd_start_cluster;
5683		new_entry->efd_count += entry->efd_count;
5684	} else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5685		   entry->efd_start_cluster) {
5686		new_entry->efd_count += entry->efd_count;
5687	} else
5688		return;
5689	spin_lock(&sbi->s_md_lock);
5690	list_del(&entry->efd_list);
5691	spin_unlock(&sbi->s_md_lock);
5692	rb_erase(&entry->efd_node, entry_rb_root);
5693	kmem_cache_free(ext4_free_data_cachep, entry);
5694}
5695
5696static noinline_for_stack int
5697ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5698		      struct ext4_free_data *new_entry)
5699{
5700	ext4_group_t group = e4b->bd_group;
5701	ext4_grpblk_t cluster;
5702	ext4_grpblk_t clusters = new_entry->efd_count;
5703	struct ext4_free_data *entry;
5704	struct ext4_group_info *db = e4b->bd_info;
5705	struct super_block *sb = e4b->bd_sb;
5706	struct ext4_sb_info *sbi = EXT4_SB(sb);
5707	struct rb_node **n = &db->bb_free_root.rb_node, *node;
5708	struct rb_node *parent = NULL, *new_node;
5709
5710	BUG_ON(!ext4_handle_valid(handle));
5711	BUG_ON(e4b->bd_bitmap_page == NULL);
5712	BUG_ON(e4b->bd_buddy_page == NULL);
5713
5714	new_node = &new_entry->efd_node;
5715	cluster = new_entry->efd_start_cluster;
5716
5717	if (!*n) {
5718		/* first free block exent. We need to
5719		   protect buddy cache from being freed,
5720		 * otherwise we'll refresh it from
5721		 * on-disk bitmap and lose not-yet-available
5722		 * blocks */
5723		get_page(e4b->bd_buddy_page);
5724		get_page(e4b->bd_bitmap_page);
5725	}
5726	while (*n) {
5727		parent = *n;
5728		entry = rb_entry(parent, struct ext4_free_data, efd_node);
5729		if (cluster < entry->efd_start_cluster)
5730			n = &(*n)->rb_left;
5731		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5732			n = &(*n)->rb_right;
5733		else {
5734			ext4_grp_locked_error(sb, group, 0,
5735				ext4_group_first_block_no(sb, group) +
5736				EXT4_C2B(sbi, cluster),
5737				"Block already on to-be-freed list");
5738			kmem_cache_free(ext4_free_data_cachep, new_entry);
5739			return 0;
5740		}
5741	}
5742
5743	rb_link_node(new_node, parent, n);
5744	rb_insert_color(new_node, &db->bb_free_root);
5745
5746	/* Now try to see the extent can be merged to left and right */
5747	node = rb_prev(new_node);
5748	if (node) {
5749		entry = rb_entry(node, struct ext4_free_data, efd_node);
5750		ext4_try_merge_freed_extent(sbi, entry, new_entry,
5751					    &(db->bb_free_root));
 
 
 
 
 
 
 
5752	}
5753
5754	node = rb_next(new_node);
5755	if (node) {
5756		entry = rb_entry(node, struct ext4_free_data, efd_node);
5757		ext4_try_merge_freed_extent(sbi, entry, new_entry,
5758					    &(db->bb_free_root));
 
 
 
 
 
 
5759	}
5760
5761	spin_lock(&sbi->s_md_lock);
5762	list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5763	sbi->s_mb_free_pending += clusters;
5764	spin_unlock(&sbi->s_md_lock);
5765	return 0;
5766}
5767
5768/*
5769 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5770 * linearly starting at the goal block and also excludes the blocks which
5771 * are going to be in use after fast commit replay.
5772 */
5773static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5774				struct ext4_allocation_request *ar, int *errp)
5775{
5776	struct buffer_head *bitmap_bh;
5777	struct super_block *sb = ar->inode->i_sb;
5778	ext4_group_t group;
5779	ext4_grpblk_t blkoff;
5780	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5781	ext4_grpblk_t i = 0;
5782	ext4_fsblk_t goal, block;
5783	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5784
5785	goal = ar->goal;
5786	if (goal < le32_to_cpu(es->s_first_data_block) ||
5787			goal >= ext4_blocks_count(es))
5788		goal = le32_to_cpu(es->s_first_data_block);
5789
5790	ar->len = 0;
5791	ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5792	for (; group < ext4_get_groups_count(sb); group++) {
5793		bitmap_bh = ext4_read_block_bitmap(sb, group);
5794		if (IS_ERR(bitmap_bh)) {
5795			*errp = PTR_ERR(bitmap_bh);
5796			pr_warn("Failed to read block bitmap\n");
5797			return 0;
5798		}
5799
5800		ext4_get_group_no_and_offset(sb,
5801			max(ext4_group_first_block_no(sb, group), goal),
5802			NULL, &blkoff);
5803		while (1) {
5804			i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5805						blkoff);
5806			if (i >= max)
5807				break;
5808			if (ext4_fc_replay_check_excluded(sb,
5809				ext4_group_first_block_no(sb, group) + i)) {
5810				blkoff = i + 1;
5811			} else
5812				break;
5813		}
5814		brelse(bitmap_bh);
5815		if (i < max)
5816			break;
5817	}
5818
5819	if (group >= ext4_get_groups_count(sb) || i >= max) {
5820		*errp = -ENOSPC;
5821		return 0;
5822	}
5823
5824	block = ext4_group_first_block_no(sb, group) + i;
5825	ext4_mb_mark_bb(sb, block, 1, 1);
5826	ar->len = 1;
5827
5828	return block;
5829}
5830
5831static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5832					unsigned long count)
5833{
5834	struct buffer_head *bitmap_bh;
5835	struct super_block *sb = inode->i_sb;
5836	struct ext4_group_desc *gdp;
5837	struct buffer_head *gdp_bh;
5838	ext4_group_t group;
5839	ext4_grpblk_t blkoff;
5840	int already_freed = 0, err, i;
5841
5842	ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5843	bitmap_bh = ext4_read_block_bitmap(sb, group);
5844	if (IS_ERR(bitmap_bh)) {
5845		err = PTR_ERR(bitmap_bh);
5846		pr_warn("Failed to read block bitmap\n");
5847		return;
5848	}
5849	gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5850	if (!gdp)
5851		return;
5852
5853	for (i = 0; i < count; i++) {
5854		if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5855			already_freed++;
5856	}
5857	mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5858	err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5859	if (err)
5860		return;
5861	ext4_free_group_clusters_set(
5862		sb, gdp, ext4_free_group_clusters(sb, gdp) +
5863		count - already_freed);
5864	ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5865	ext4_group_desc_csum_set(sb, group, gdp);
5866	ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5867	sync_dirty_buffer(bitmap_bh);
5868	sync_dirty_buffer(gdp_bh);
5869	brelse(bitmap_bh);
5870}
5871
5872/**
5873 * ext4_mb_clear_bb() -- helper function for freeing blocks.
5874 *			Used by ext4_free_blocks()
5875 * @handle:		handle for this transaction
5876 * @inode:		inode
5877 * @block:		starting physical block to be freed
5878 * @count:		number of blocks to be freed
5879 * @flags:		flags used by ext4_free_blocks
5880 */
5881static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
5882			       ext4_fsblk_t block, unsigned long count,
5883			       int flags)
5884{
5885	struct buffer_head *bitmap_bh = NULL;
5886	struct super_block *sb = inode->i_sb;
5887	struct ext4_group_desc *gdp;
 
5888	unsigned int overflow;
5889	ext4_grpblk_t bit;
5890	struct buffer_head *gd_bh;
5891	ext4_group_t block_group;
5892	struct ext4_sb_info *sbi;
5893	struct ext4_buddy e4b;
5894	unsigned int count_clusters;
5895	int err = 0;
5896	int ret;
5897
 
 
 
 
 
 
 
5898	sbi = EXT4_SB(sb);
5899
5900	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5901	    !ext4_inode_block_valid(inode, block, count)) {
5902		ext4_error(sb, "Freeing blocks in system zone - "
5903			   "Block = %llu, count = %lu", block, count);
5904		/* err = 0. ext4_std_error should be a no op */
5905		goto error_return;
5906	}
5907	flags |= EXT4_FREE_BLOCKS_VALIDATED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5908
5909do_more:
5910	overflow = 0;
5911	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5912
5913	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
5914			ext4_get_group_info(sb, block_group))))
5915		return;
5916
5917	/*
5918	 * Check to see if we are freeing blocks across a group
5919	 * boundary.
5920	 */
5921	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5922		overflow = EXT4_C2B(sbi, bit) + count -
5923			EXT4_BLOCKS_PER_GROUP(sb);
5924		count -= overflow;
5925		/* The range changed so it's no longer validated */
5926		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5927	}
5928	count_clusters = EXT4_NUM_B2C(sbi, count);
5929	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5930	if (IS_ERR(bitmap_bh)) {
5931		err = PTR_ERR(bitmap_bh);
5932		bitmap_bh = NULL;
5933		goto error_return;
5934	}
5935	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
5936	if (!gdp) {
5937		err = -EIO;
5938		goto error_return;
5939	}
5940
5941	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5942	    !ext4_inode_block_valid(inode, block, count)) {
 
 
 
 
 
5943		ext4_error(sb, "Freeing blocks in system zone - "
5944			   "Block = %llu, count = %lu", block, count);
5945		/* err = 0. ext4_std_error should be a no op */
5946		goto error_return;
5947	}
5948
5949	BUFFER_TRACE(bitmap_bh, "getting write access");
5950	err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
5951					    EXT4_JTR_NONE);
5952	if (err)
5953		goto error_return;
5954
5955	/*
5956	 * We are about to modify some metadata.  Call the journal APIs
5957	 * to unshare ->b_data if a currently-committing transaction is
5958	 * using it
5959	 */
5960	BUFFER_TRACE(gd_bh, "get_write_access");
5961	err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
5962	if (err)
5963		goto error_return;
5964#ifdef AGGRESSIVE_CHECK
5965	{
5966		int i;
5967		for (i = 0; i < count_clusters; i++)
5968			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
5969	}
5970#endif
5971	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
5972
5973	/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
5974	err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
5975				     GFP_NOFS|__GFP_NOFAIL);
5976	if (err)
5977		goto error_return;
5978
5979	/*
5980	 * We need to make sure we don't reuse the freed block until after the
5981	 * transaction is committed. We make an exception if the inode is to be
5982	 * written in writeback mode since writeback mode has weak data
5983	 * consistency guarantees.
5984	 */
5985	if (ext4_handle_valid(handle) &&
5986	    ((flags & EXT4_FREE_BLOCKS_METADATA) ||
5987	     !ext4_should_writeback_data(inode))) {
5988		struct ext4_free_data *new_entry;
5989		/*
5990		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
5991		 * to fail.
5992		 */
5993		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
5994				GFP_NOFS|__GFP_NOFAIL);
5995		new_entry->efd_start_cluster = bit;
5996		new_entry->efd_group = block_group;
5997		new_entry->efd_count = count_clusters;
5998		new_entry->efd_tid = handle->h_transaction->t_tid;
 
 
 
5999
6000		ext4_lock_group(sb, block_group);
6001		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6002		ext4_mb_free_metadata(handle, &e4b, new_entry);
6003	} else {
6004		/* need to update group_info->bb_free and bitmap
6005		 * with group lock held. generate_buddy look at
6006		 * them with group lock_held
6007		 */
6008		if (test_opt(sb, DISCARD)) {
6009			err = ext4_issue_discard(sb, block_group, bit, count,
6010						 NULL);
6011			if (err && err != -EOPNOTSUPP)
6012				ext4_msg(sb, KERN_WARNING, "discard request in"
6013					 " group:%u block:%d count:%lu failed"
6014					 " with %d", block_group, bit, count,
6015					 err);
6016		} else
6017			EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6018
6019		ext4_lock_group(sb, block_group);
6020		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6021		mb_free_blocks(inode, &e4b, bit, count_clusters);
6022	}
6023
6024	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
6025	ext4_free_group_clusters_set(sb, gdp, ret);
6026	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
6027	ext4_group_desc_csum_set(sb, block_group, gdp);
6028	ext4_unlock_group(sb, block_group);
 
6029
6030	if (sbi->s_log_groups_per_flex) {
6031		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6032		atomic64_add(count_clusters,
6033			     &sbi_array_rcu_deref(sbi, s_flex_groups,
6034						  flex_group)->free_clusters);
6035	}
6036
6037	/*
6038	 * on a bigalloc file system, defer the s_freeclusters_counter
6039	 * update to the caller (ext4_remove_space and friends) so they
6040	 * can determine if a cluster freed here should be rereserved
6041	 */
6042	if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6043		if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6044			dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6045		percpu_counter_add(&sbi->s_freeclusters_counter,
6046				   count_clusters);
6047	}
6048
6049	ext4_mb_unload_buddy(&e4b);
6050
6051	/* We dirtied the bitmap block */
6052	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6053	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6054
6055	/* And the group descriptor block */
6056	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6057	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6058	if (!err)
6059		err = ret;
6060
6061	if (overflow && !err) {
6062		block += count;
6063		count = overflow;
6064		put_bh(bitmap_bh);
6065		/* The range changed so it's no longer validated */
6066		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6067		goto do_more;
6068	}
 
6069error_return:
 
 
6070	brelse(bitmap_bh);
6071	ext4_std_error(sb, err);
6072	return;
6073}
6074
6075/**
6076 * ext4_free_blocks() -- Free given blocks and update quota
6077 * @handle:		handle for this transaction
6078 * @inode:		inode
6079 * @bh:			optional buffer of the block to be freed
6080 * @block:		starting physical block to be freed
6081 * @count:		number of blocks to be freed
6082 * @flags:		flags used by ext4_free_blocks
6083 */
6084void ext4_free_blocks(handle_t *handle, struct inode *inode,
6085		      struct buffer_head *bh, ext4_fsblk_t block,
6086		      unsigned long count, int flags)
6087{
6088	struct super_block *sb = inode->i_sb;
6089	unsigned int overflow;
6090	struct ext4_sb_info *sbi;
6091
6092	sbi = EXT4_SB(sb);
6093
6094	if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6095		ext4_free_blocks_simple(inode, block, count);
6096		return;
6097	}
6098
6099	might_sleep();
6100	if (bh) {
6101		if (block)
6102			BUG_ON(block != bh->b_blocknr);
6103		else
6104			block = bh->b_blocknr;
6105	}
6106
6107	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6108	    !ext4_inode_block_valid(inode, block, count)) {
6109		ext4_error(sb, "Freeing blocks not in datazone - "
6110			   "block = %llu, count = %lu", block, count);
6111		return;
6112	}
6113	flags |= EXT4_FREE_BLOCKS_VALIDATED;
6114
6115	ext4_debug("freeing block %llu\n", block);
6116	trace_ext4_free_blocks(inode, block, count, flags);
6117
6118	if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6119		BUG_ON(count > 1);
6120
6121		ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6122			    inode, bh, block);
6123	}
6124
6125	/*
6126	 * If the extent to be freed does not begin on a cluster
6127	 * boundary, we need to deal with partial clusters at the
6128	 * beginning and end of the extent.  Normally we will free
6129	 * blocks at the beginning or the end unless we are explicitly
6130	 * requested to avoid doing so.
6131	 */
6132	overflow = EXT4_PBLK_COFF(sbi, block);
6133	if (overflow) {
6134		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6135			overflow = sbi->s_cluster_ratio - overflow;
6136			block += overflow;
6137			if (count > overflow)
6138				count -= overflow;
6139			else
6140				return;
6141		} else {
6142			block -= overflow;
6143			count += overflow;
6144		}
6145		/* The range changed so it's no longer validated */
6146		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6147	}
6148	overflow = EXT4_LBLK_COFF(sbi, count);
6149	if (overflow) {
6150		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6151			if (count > overflow)
6152				count -= overflow;
6153			else
6154				return;
6155		} else
6156			count += sbi->s_cluster_ratio - overflow;
6157		/* The range changed so it's no longer validated */
6158		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6159	}
6160
6161	if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6162		int i;
6163		int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6164
6165		for (i = 0; i < count; i++) {
6166			cond_resched();
6167			if (is_metadata)
6168				bh = sb_find_get_block(inode->i_sb, block + i);
6169			ext4_forget(handle, is_metadata, inode, bh, block + i);
6170		}
6171	}
6172
6173	ext4_mb_clear_bb(handle, inode, block, count, flags);
6174	return;
6175}
6176
6177/**
6178 * ext4_group_add_blocks() -- Add given blocks to an existing group
6179 * @handle:			handle to this transaction
6180 * @sb:				super block
6181 * @block:			start physical block to add to the block group
6182 * @count:			number of blocks to free
6183 *
6184 * This marks the blocks as free in the bitmap and buddy.
6185 */
6186int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6187			 ext4_fsblk_t block, unsigned long count)
6188{
6189	struct buffer_head *bitmap_bh = NULL;
6190	struct buffer_head *gd_bh;
6191	ext4_group_t block_group;
6192	ext4_grpblk_t bit;
6193	unsigned int i;
6194	struct ext4_group_desc *desc;
6195	struct ext4_sb_info *sbi = EXT4_SB(sb);
6196	struct ext4_buddy e4b;
6197	int err = 0, ret, free_clusters_count;
6198	ext4_grpblk_t clusters_freed;
6199	ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6200	ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6201	unsigned long cluster_count = last_cluster - first_cluster + 1;
6202
6203	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6204
6205	if (count == 0)
6206		return 0;
6207
6208	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6209	/*
6210	 * Check to see if we are freeing blocks across a group
6211	 * boundary.
6212	 */
6213	if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6214		ext4_warning(sb, "too many blocks added to group %u",
6215			     block_group);
6216		err = -EINVAL;
6217		goto error_return;
6218	}
6219
6220	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6221	if (IS_ERR(bitmap_bh)) {
6222		err = PTR_ERR(bitmap_bh);
6223		bitmap_bh = NULL;
6224		goto error_return;
6225	}
6226
6227	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
6228	if (!desc) {
6229		err = -EIO;
6230		goto error_return;
6231	}
6232
6233	if (!ext4_sb_block_valid(sb, NULL, block, count)) {
 
 
 
 
6234		ext4_error(sb, "Adding blocks in system zones - "
6235			   "Block = %llu, count = %lu",
6236			   block, count);
6237		err = -EINVAL;
6238		goto error_return;
6239	}
6240
6241	BUFFER_TRACE(bitmap_bh, "getting write access");
6242	err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6243					    EXT4_JTR_NONE);
6244	if (err)
6245		goto error_return;
6246
6247	/*
6248	 * We are about to modify some metadata.  Call the journal APIs
6249	 * to unshare ->b_data if a currently-committing transaction is
6250	 * using it
6251	 */
6252	BUFFER_TRACE(gd_bh, "get_write_access");
6253	err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6254	if (err)
6255		goto error_return;
6256
6257	for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
6258		BUFFER_TRACE(bitmap_bh, "clear bit");
6259		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
6260			ext4_error(sb, "bit already cleared for block %llu",
6261				   (ext4_fsblk_t)(block + i));
6262			BUFFER_TRACE(bitmap_bh, "bit already cleared");
6263		} else {
6264			clusters_freed++;
6265		}
6266	}
6267
6268	err = ext4_mb_load_buddy(sb, block_group, &e4b);
6269	if (err)
6270		goto error_return;
6271
6272	/*
6273	 * need to update group_info->bb_free and bitmap
6274	 * with group lock held. generate_buddy look at
6275	 * them with group lock_held
6276	 */
6277	ext4_lock_group(sb, block_group);
6278	mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
6279	mb_free_blocks(NULL, &e4b, bit, cluster_count);
6280	free_clusters_count = clusters_freed +
6281		ext4_free_group_clusters(sb, desc);
6282	ext4_free_group_clusters_set(sb, desc, free_clusters_count);
6283	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
6284	ext4_group_desc_csum_set(sb, block_group, desc);
6285	ext4_unlock_group(sb, block_group);
6286	percpu_counter_add(&sbi->s_freeclusters_counter,
6287			   clusters_freed);
6288
6289	if (sbi->s_log_groups_per_flex) {
6290		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6291		atomic64_add(clusters_freed,
6292			     &sbi_array_rcu_deref(sbi, s_flex_groups,
6293						  flex_group)->free_clusters);
6294	}
6295
6296	ext4_mb_unload_buddy(&e4b);
6297
6298	/* We dirtied the bitmap block */
6299	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6300	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6301
6302	/* And the group descriptor block */
6303	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6304	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6305	if (!err)
6306		err = ret;
6307
6308error_return:
6309	brelse(bitmap_bh);
6310	ext4_std_error(sb, err);
6311	return err;
6312}
6313
6314/**
6315 * ext4_trim_extent -- function to TRIM one single free extent in the group
6316 * @sb:		super block for the file system
6317 * @start:	starting block of the free extent in the alloc. group
6318 * @count:	number of blocks to TRIM
 
6319 * @e4b:	ext4 buddy for the group
6320 *
6321 * Trim "count" blocks starting at "start" in the "group". To assure that no
6322 * one will allocate those blocks, mark it as used in buddy bitmap. This must
6323 * be called with under the group lock.
6324 */
6325static int ext4_trim_extent(struct super_block *sb,
6326		int start, int count, struct ext4_buddy *e4b)
6327__releases(bitlock)
6328__acquires(bitlock)
6329{
6330	struct ext4_free_extent ex;
6331	ext4_group_t group = e4b->bd_group;
6332	int ret = 0;
6333
6334	trace_ext4_trim_extent(sb, group, start, count);
6335
6336	assert_spin_locked(ext4_group_lock_ptr(sb, group));
6337
6338	ex.fe_start = start;
6339	ex.fe_group = group;
6340	ex.fe_len = count;
6341
6342	/*
6343	 * Mark blocks used, so no one can reuse them while
6344	 * being trimmed.
6345	 */
6346	mb_mark_used(e4b, &ex);
6347	ext4_unlock_group(sb, group);
6348	ret = ext4_issue_discard(sb, group, start, count, NULL);
6349	ext4_lock_group(sb, group);
6350	mb_free_blocks(NULL, e4b, start, ex.fe_len);
6351	return ret;
6352}
6353
6354static int ext4_try_to_trim_range(struct super_block *sb,
6355		struct ext4_buddy *e4b, ext4_grpblk_t start,
6356		ext4_grpblk_t max, ext4_grpblk_t minblocks)
6357__acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6358__releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6359{
6360	ext4_grpblk_t next, count, free_count;
6361	void *bitmap;
6362
6363	bitmap = e4b->bd_bitmap;
6364	start = (e4b->bd_info->bb_first_free > start) ?
6365		e4b->bd_info->bb_first_free : start;
6366	count = 0;
6367	free_count = 0;
6368
6369	while (start <= max) {
6370		start = mb_find_next_zero_bit(bitmap, max + 1, start);
6371		if (start > max)
6372			break;
6373		next = mb_find_next_bit(bitmap, max + 1, start);
6374
6375		if ((next - start) >= minblocks) {
6376			int ret = ext4_trim_extent(sb, start, next - start, e4b);
6377
6378			if (ret && ret != -EOPNOTSUPP)
6379				break;
6380			count += next - start;
6381		}
6382		free_count += next - start;
6383		start = next + 1;
6384
6385		if (fatal_signal_pending(current)) {
6386			count = -ERESTARTSYS;
6387			break;
6388		}
6389
6390		if (need_resched()) {
6391			ext4_unlock_group(sb, e4b->bd_group);
6392			cond_resched();
6393			ext4_lock_group(sb, e4b->bd_group);
6394		}
6395
6396		if ((e4b->bd_info->bb_free - free_count) < minblocks)
6397			break;
6398	}
6399
6400	return count;
6401}
6402
6403/**
6404 * ext4_trim_all_free -- function to trim all free space in alloc. group
6405 * @sb:			super block for file system
6406 * @group:		group to be trimmed
6407 * @start:		first group block to examine
6408 * @max:		last group block to examine
6409 * @minblocks:		minimum extent block count
6410 * @set_trimmed:	set the trimmed flag if at least one block is trimmed
 
 
 
 
6411 *
6412 * ext4_trim_all_free walks through group's block bitmap searching for free
6413 * extents. When the free extent is found, mark it as used in group buddy
6414 * bitmap. Then issue a TRIM command on this extent and free the extent in
6415 * the group buddy bitmap.
6416 */
6417static ext4_grpblk_t
6418ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6419		   ext4_grpblk_t start, ext4_grpblk_t max,
6420		   ext4_grpblk_t minblocks, bool set_trimmed)
6421{
 
 
6422	struct ext4_buddy e4b;
6423	int ret;
6424
6425	trace_ext4_trim_all_free(sb, group, start, max);
6426
6427	ret = ext4_mb_load_buddy(sb, group, &e4b);
6428	if (ret) {
6429		ext4_warning(sb, "Error %d loading buddy information for %u",
6430			     ret, group);
6431		return ret;
6432	}
 
6433
6434	ext4_lock_group(sb, group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6435
6436	if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6437	    minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
6438		ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6439		if (ret >= 0 && set_trimmed)
6440			EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
6441	} else {
6442		ret = 0;
 
6443	}
6444
 
 
 
6445	ext4_unlock_group(sb, group);
6446	ext4_mb_unload_buddy(&e4b);
6447
6448	ext4_debug("trimmed %d blocks in the group %d\n",
6449		ret, group);
6450
6451	return ret;
6452}
6453
6454/**
6455 * ext4_trim_fs() -- trim ioctl handle function
6456 * @sb:			superblock for filesystem
6457 * @range:		fstrim_range structure
6458 *
6459 * start:	First Byte to trim
6460 * len:		number of Bytes to trim from start
6461 * minlen:	minimum extent length in Bytes
6462 * ext4_trim_fs goes through all allocation groups containing Bytes from
6463 * start to start+len. For each such a group ext4_trim_all_free function
6464 * is invoked to trim all free space.
6465 */
6466int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6467{
6468	unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
6469	struct ext4_group_info *grp;
6470	ext4_group_t group, first_group, last_group;
6471	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6472	uint64_t start, end, minlen, trimmed = 0;
 
6473	ext4_fsblk_t first_data_blk =
6474			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6475	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6476	bool whole_group, eof = false;
6477	int ret = 0;
6478
6479	start = range->start >> sb->s_blocksize_bits;
6480	end = start + (range->len >> sb->s_blocksize_bits) - 1;
6481	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6482			      range->minlen >> sb->s_blocksize_bits);
6483
6484	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6485	    start >= max_blks ||
6486	    range->len < sb->s_blocksize)
6487		return -EINVAL;
6488	/* No point to try to trim less than discard granularity */
6489	if (range->minlen < discard_granularity) {
6490		minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6491				discard_granularity >> sb->s_blocksize_bits);
6492		if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6493			goto out;
6494	}
6495	if (end >= max_blks - 1) {
6496		end = max_blks - 1;
6497		eof = true;
6498	}
6499	if (end <= first_data_blk)
6500		goto out;
6501	if (start < first_data_blk)
 
6502		start = first_data_blk;
 
6503
6504	/* Determine first and last group to examine based on start and end */
6505	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6506				     &first_group, &first_cluster);
6507	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6508				     &last_group, &last_cluster);
6509
6510	/* end now represents the last cluster to discard in this group */
6511	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6512	whole_group = true;
 
6513
6514	for (group = first_group; group <= last_group; group++) {
6515		grp = ext4_get_group_info(sb, group);
6516		/* We only do this if the grp has never been initialized */
6517		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6518			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6519			if (ret)
6520				break;
6521		}
6522
6523		/*
6524		 * For all the groups except the last one, last cluster will
6525		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6526		 * change it for the last group, note that last_cluster is
6527		 * already computed earlier by ext4_get_group_no_and_offset()
6528		 */
6529		if (group == last_group) {
6530			end = last_cluster;
6531			whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6532		}
6533		if (grp->bb_free >= minlen) {
6534			cnt = ext4_trim_all_free(sb, group, first_cluster,
6535						 end, minlen, whole_group);
6536			if (cnt < 0) {
6537				ret = cnt;
6538				break;
6539			}
6540			trimmed += cnt;
6541		}
6542
6543		/*
6544		 * For every group except the first one, we are sure
6545		 * that the first cluster to discard will be cluster #0.
6546		 */
6547		first_cluster = 0;
6548	}
 
6549
6550	if (!ret)
6551		EXT4_SB(sb)->s_last_trim_minblks = minlen;
6552
6553out:
6554	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6555	return ret;
6556}
6557
6558/* Iterate all the free extents in the group. */
6559int
6560ext4_mballoc_query_range(
6561	struct super_block		*sb,
6562	ext4_group_t			group,
6563	ext4_grpblk_t			start,
6564	ext4_grpblk_t			end,
6565	ext4_mballoc_query_range_fn	formatter,
6566	void				*priv)
6567{
6568	void				*bitmap;
6569	ext4_grpblk_t			next;
6570	struct ext4_buddy		e4b;
6571	int				error;
6572
6573	error = ext4_mb_load_buddy(sb, group, &e4b);
6574	if (error)
6575		return error;
6576	bitmap = e4b.bd_bitmap;
6577
6578	ext4_lock_group(sb, group);
6579
6580	start = (e4b.bd_info->bb_first_free > start) ?
6581		e4b.bd_info->bb_first_free : start;
6582	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6583		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6584
6585	while (start <= end) {
6586		start = mb_find_next_zero_bit(bitmap, end + 1, start);
6587		if (start > end)
6588			break;
6589		next = mb_find_next_bit(bitmap, end + 1, start);
6590
6591		ext4_unlock_group(sb, group);
6592		error = formatter(sb, group, start, next - start, priv);
6593		if (error)
6594			goto out_unload;
6595		ext4_lock_group(sb, group);
6596
6597		start = next + 1;
6598	}
6599
6600	ext4_unlock_group(sb, group);
6601out_unload:
6602	ext4_mb_unload_buddy(&e4b);
6603
6604	return error;
6605}