Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * KUnit test of ext4 multiblocks allocation.
  4 */
  5
  6#include <kunit/test.h>
  7#include <kunit/static_stub.h>
  8#include <linux/random.h>
  9
 10#include "ext4.h"
 11
 12struct mbt_grp_ctx {
 13	struct buffer_head bitmap_bh;
 14	/* desc and gd_bh are just the place holders for now */
 15	struct ext4_group_desc desc;
 16	struct buffer_head gd_bh;
 17};
 18
 19struct mbt_ctx {
 20	struct mbt_grp_ctx *grp_ctx;
 21};
 22
 23struct mbt_ext4_super_block {
 24	struct ext4_super_block es;
 25	struct ext4_sb_info sbi;
 26	struct mbt_ctx mbt_ctx;
 27};
 28
 29#define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi))
 30#define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
 31#define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
 32
 33static struct inode *mbt_alloc_inode(struct super_block *sb)
 34{
 35	struct ext4_inode_info *ei;
 36
 37	ei = kmalloc(sizeof(struct ext4_inode_info), GFP_KERNEL);
 38	if (!ei)
 39		return NULL;
 40
 41	INIT_LIST_HEAD(&ei->i_orphan);
 42	init_rwsem(&ei->xattr_sem);
 43	init_rwsem(&ei->i_data_sem);
 44	inode_init_once(&ei->vfs_inode);
 45	ext4_fc_init_inode(&ei->vfs_inode);
 46
 47	return &ei->vfs_inode;
 48}
 49
 50static void mbt_free_inode(struct inode *inode)
 51{
 52	kfree(EXT4_I(inode));
 53}
 54
 55static const struct super_operations mbt_sops = {
 56	.alloc_inode	= mbt_alloc_inode,
 57	.free_inode	= mbt_free_inode,
 58};
 59
 60static void mbt_kill_sb(struct super_block *sb)
 61{
 62	generic_shutdown_super(sb);
 63}
 64
 65static struct file_system_type mbt_fs_type = {
 66	.name			= "mballoc test",
 67	.kill_sb		= mbt_kill_sb,
 68};
 69
 70static int mbt_mb_init(struct super_block *sb)
 71{
 72	ext4_fsblk_t block;
 73	int ret;
 74
 75	/* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */
 76	sb->s_bdev = kzalloc(sizeof(*sb->s_bdev), GFP_KERNEL);
 77	if (sb->s_bdev == NULL)
 78		return -ENOMEM;
 79
 80	sb->s_bdev->bd_queue = kzalloc(sizeof(struct request_queue), GFP_KERNEL);
 81	if (sb->s_bdev->bd_queue == NULL) {
 82		kfree(sb->s_bdev);
 83		return -ENOMEM;
 84	}
 85
 86	/*
 87	 * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache =
 88	 * new_inode(sb);
 89	 */
 90	INIT_LIST_HEAD(&sb->s_inodes);
 91	sb->s_op = &mbt_sops;
 92
 93	ret = ext4_mb_init(sb);
 94	if (ret != 0)
 95		goto err_out;
 96
 97	block = ext4_count_free_clusters(sb);
 98	ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block,
 99				  GFP_KERNEL);
100	if (ret != 0)
101		goto err_mb_release;
102
103	ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0,
104				  GFP_KERNEL);
105	if (ret != 0)
106		goto err_freeclusters;
107
108	return 0;
109
110err_freeclusters:
111	percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
112err_mb_release:
113	ext4_mb_release(sb);
114err_out:
115	kfree(sb->s_bdev->bd_queue);
116	kfree(sb->s_bdev);
117	return ret;
118}
119
120static void mbt_mb_release(struct super_block *sb)
121{
122	percpu_counter_destroy(&EXT4_SB(sb)->s_dirtyclusters_counter);
123	percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
124	ext4_mb_release(sb);
125	kfree(sb->s_bdev->bd_queue);
126	kfree(sb->s_bdev);
127}
128
129static int mbt_set(struct super_block *sb, void *data)
130{
131	return 0;
132}
133
134static struct super_block *mbt_ext4_alloc_super_block(void)
135{
136	struct mbt_ext4_super_block *fsb;
137	struct super_block *sb;
138	struct ext4_sb_info *sbi;
139
140	fsb = kzalloc(sizeof(*fsb), GFP_KERNEL);
141	if (fsb == NULL)
142		return NULL;
143
144	sb = sget(&mbt_fs_type, NULL, mbt_set, 0, NULL);
145	if (IS_ERR(sb))
146		goto out;
147
148	sbi = &fsb->sbi;
149
150	sbi->s_blockgroup_lock =
151		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
152	if (!sbi->s_blockgroup_lock)
153		goto out_deactivate;
154
155	bgl_lock_init(sbi->s_blockgroup_lock);
156
157	sbi->s_es = &fsb->es;
158	sb->s_fs_info = sbi;
159
160	up_write(&sb->s_umount);
161	return sb;
162
163out_deactivate:
164	deactivate_locked_super(sb);
165out:
166	kfree(fsb);
167	return NULL;
168}
169
170static void mbt_ext4_free_super_block(struct super_block *sb)
171{
172	struct mbt_ext4_super_block *fsb = MBT_SB(sb);
173	struct ext4_sb_info *sbi = EXT4_SB(sb);
174
175	kfree(sbi->s_blockgroup_lock);
176	deactivate_super(sb);
177	kfree(fsb);
178}
179
180struct mbt_ext4_block_layout {
181	unsigned char blocksize_bits;
182	unsigned int cluster_bits;
183	uint32_t blocks_per_group;
184	ext4_group_t group_count;
185	uint16_t desc_size;
186};
187
188static void mbt_init_sb_layout(struct super_block *sb,
189			       struct mbt_ext4_block_layout *layout)
190{
191	struct ext4_sb_info *sbi = EXT4_SB(sb);
192	struct ext4_super_block *es = sbi->s_es;
193
194	sb->s_blocksize = 1UL << layout->blocksize_bits;
195	sb->s_blocksize_bits = layout->blocksize_bits;
196
197	sbi->s_groups_count = layout->group_count;
198	sbi->s_blocks_per_group = layout->blocks_per_group;
199	sbi->s_cluster_bits = layout->cluster_bits;
200	sbi->s_cluster_ratio = 1U << layout->cluster_bits;
201	sbi->s_clusters_per_group = layout->blocks_per_group >>
202				    layout->cluster_bits;
203	sbi->s_desc_size = layout->desc_size;
204	sbi->s_desc_per_block_bits =
205		sb->s_blocksize_bits - (fls(layout->desc_size) - 1);
206	sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits;
207
208	es->s_first_data_block = cpu_to_le32(0);
209	es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group *
210					    layout->group_count);
211}
212
213static int mbt_grp_ctx_init(struct super_block *sb,
214			    struct mbt_grp_ctx *grp_ctx)
215{
216	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
217
218	grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL);
219	if (grp_ctx->bitmap_bh.b_data == NULL)
220		return -ENOMEM;
221	mb_set_bits(grp_ctx->bitmap_bh.b_data, max, sb->s_blocksize * 8 - max);
222	ext4_free_group_clusters_set(sb, &grp_ctx->desc, max);
223
224	return 0;
225}
226
227static void mbt_grp_ctx_release(struct mbt_grp_ctx *grp_ctx)
228{
229	kfree(grp_ctx->bitmap_bh.b_data);
230	grp_ctx->bitmap_bh.b_data = NULL;
231}
232
233static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group,
234			      unsigned int start, unsigned int len)
235{
236	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
237
238	mb_set_bits(grp_ctx->bitmap_bh.b_data, start, len);
239}
240
241static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group)
242{
243	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
244
245	return grp_ctx->bitmap_bh.b_data;
246}
247
248/* called after mbt_init_sb_layout */
249static int mbt_ctx_init(struct super_block *sb)
250{
251	struct mbt_ctx *ctx = MBT_CTX(sb);
252	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
253
254	ctx->grp_ctx = kcalloc(ngroups, sizeof(struct mbt_grp_ctx),
255			       GFP_KERNEL);
256	if (ctx->grp_ctx == NULL)
257		return -ENOMEM;
258
259	for (i = 0; i < ngroups; i++)
260		if (mbt_grp_ctx_init(sb, &ctx->grp_ctx[i]))
261			goto out;
262
263	/*
264	 * first data block(first cluster in first group) is used by
265	 * metadata, mark it used to avoid to alloc data block at first
266	 * block which will fail ext4_sb_block_valid check.
267	 */
268	mb_set_bits(ctx->grp_ctx[0].bitmap_bh.b_data, 0, 1);
269	ext4_free_group_clusters_set(sb, &ctx->grp_ctx[0].desc,
270				     EXT4_CLUSTERS_PER_GROUP(sb) - 1);
271
272	return 0;
273out:
274	while (i-- > 0)
275		mbt_grp_ctx_release(&ctx->grp_ctx[i]);
276	kfree(ctx->grp_ctx);
277	return -ENOMEM;
278}
279
280static void mbt_ctx_release(struct super_block *sb)
281{
282	struct mbt_ctx *ctx = MBT_CTX(sb);
283	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
284
285	for (i = 0; i < ngroups; i++)
286		mbt_grp_ctx_release(&ctx->grp_ctx[i]);
287	kfree(ctx->grp_ctx);
288}
289
290static struct buffer_head *
291ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group,
292				   bool ignore_locked)
293{
294	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
295
296	/* paired with brelse from caller of ext4_read_block_bitmap_nowait */
297	get_bh(&grp_ctx->bitmap_bh);
298	return &grp_ctx->bitmap_bh;
299}
300
301static int ext4_wait_block_bitmap_stub(struct super_block *sb,
302				       ext4_group_t block_group,
303				       struct buffer_head *bh)
304{
305	/*
306	 * real ext4_wait_block_bitmap will set these flags and
307	 * functions like ext4_mb_init_cache will verify the flags.
308	 */
309	set_buffer_uptodate(bh);
310	set_bitmap_uptodate(bh);
311	set_buffer_verified(bh);
312	return 0;
313}
314
315static struct ext4_group_desc *
316ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group,
317			 struct buffer_head **bh)
318{
319	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
320
321	if (bh != NULL)
322		*bh = &grp_ctx->gd_bh;
323
324	return &grp_ctx->desc;
325}
326
327static int
328ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state,
329			  ext4_group_t group, ext4_grpblk_t blkoff,
330			  ext4_grpblk_t len, int flags,
331			  ext4_grpblk_t *ret_changed)
332{
333	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
334	struct buffer_head *bitmap_bh = &grp_ctx->bitmap_bh;
335
336	if (state)
337		mb_set_bits(bitmap_bh->b_data, blkoff, len);
338	else
339		mb_clear_bits(bitmap_bh->b_data, blkoff, len);
340
341	return 0;
342}
343
344#define TEST_GOAL_GROUP 1
345static int mbt_kunit_init(struct kunit *test)
346{
347	struct mbt_ext4_block_layout *layout =
348		(struct mbt_ext4_block_layout *)(test->param_value);
349	struct super_block *sb;
350	int ret;
351
352	sb = mbt_ext4_alloc_super_block();
353	if (sb == NULL)
354		return -ENOMEM;
355
356	mbt_init_sb_layout(sb, layout);
357
358	ret = mbt_ctx_init(sb);
359	if (ret != 0) {
360		mbt_ext4_free_super_block(sb);
361		return ret;
362	}
363
364	test->priv = sb;
365	kunit_activate_static_stub(test,
366				   ext4_read_block_bitmap_nowait,
367				   ext4_read_block_bitmap_nowait_stub);
368	kunit_activate_static_stub(test,
369				   ext4_wait_block_bitmap,
370				   ext4_wait_block_bitmap_stub);
371	kunit_activate_static_stub(test,
372				   ext4_get_group_desc,
373				   ext4_get_group_desc_stub);
374	kunit_activate_static_stub(test,
375				   ext4_mb_mark_context,
376				   ext4_mb_mark_context_stub);
377
378	/* stub function will be called in mbt_mb_init->ext4_mb_init */
379	if (mbt_mb_init(sb) != 0) {
380		mbt_ctx_release(sb);
381		mbt_ext4_free_super_block(sb);
382		return -ENOMEM;
383	}
384
385	return 0;
386}
387
388static void mbt_kunit_exit(struct kunit *test)
389{
390	struct super_block *sb = (struct super_block *)test->priv;
391
392	mbt_mb_release(sb);
393	mbt_ctx_release(sb);
394	mbt_ext4_free_super_block(sb);
395}
396
397static void test_new_blocks_simple(struct kunit *test)
398{
399	struct super_block *sb = (struct super_block *)test->priv;
400	struct inode *inode;
401	struct ext4_allocation_request ar;
402	ext4_group_t i, goal_group = TEST_GOAL_GROUP;
403	int err = 0;
404	ext4_fsblk_t found;
405	struct ext4_sb_info *sbi = EXT4_SB(sb);
406
407	inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
408	if (!inode)
409		return;
410
411	inode->i_sb = sb;
412	ar.inode = inode;
413
414	/* get block at goal */
415	ar.goal = ext4_group_first_block_no(sb, goal_group);
416	found = ext4_mb_new_blocks_simple(&ar, &err);
417	KUNIT_ASSERT_EQ_MSG(test, ar.goal, found,
418		"failed to alloc block at goal, expected %llu found %llu",
419		ar.goal, found);
420
421	/* get block after goal in goal group */
422	ar.goal = ext4_group_first_block_no(sb, goal_group);
423	found = ext4_mb_new_blocks_simple(&ar, &err);
424	KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found,
425		"failed to alloc block after goal in goal group, expected %llu found %llu",
426		ar.goal + 1, found);
427
428	/* get block after goal group */
429	mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
430	ar.goal = ext4_group_first_block_no(sb, goal_group);
431	found = ext4_mb_new_blocks_simple(&ar, &err);
432	KUNIT_ASSERT_EQ_MSG(test,
433		ext4_group_first_block_no(sb, goal_group + 1), found,
434		"failed to alloc block after goal group, expected %llu found %llu",
435		ext4_group_first_block_no(sb, goal_group + 1), found);
436
437	/* get block before goal group */
438	for (i = goal_group; i < ext4_get_groups_count(sb); i++)
439		mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
440	ar.goal = ext4_group_first_block_no(sb, goal_group);
441	found = ext4_mb_new_blocks_simple(&ar, &err);
442	KUNIT_ASSERT_EQ_MSG(test,
443		ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
444		"failed to alloc block before goal group, expected %llu found %llu",
445		ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found);
446
447	/* no block available, fail to allocate block */
448	for (i = 0; i < ext4_get_groups_count(sb); i++)
449		mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
450	ar.goal = ext4_group_first_block_no(sb, goal_group);
451	found = ext4_mb_new_blocks_simple(&ar, &err);
452	KUNIT_ASSERT_NE_MSG(test, err, 0,
453		"unexpectedly get block when no block is available");
454}
455
456#define TEST_RANGE_COUNT 8
457
458struct test_range {
459	ext4_grpblk_t start;
460	ext4_grpblk_t len;
461};
462
463static void
464mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges,
465			 int count)
466{
467	ext4_grpblk_t start, len, max;
468	int i;
469
470	max = EXT4_CLUSTERS_PER_GROUP(sb) / count;
471	for (i = 0; i < count; i++) {
472		start = get_random_u32() % max;
473		len = get_random_u32() % max;
474		len = min(len, max - start);
475
476		ranges[i].start = start + i * max;
477		ranges[i].len = len;
478	}
479}
480
481static void
482validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
483			    ext4_group_t goal_group, ext4_grpblk_t start,
484			    ext4_grpblk_t len)
485{
486	void *bitmap;
487	ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb);
488	ext4_group_t i;
489
490	for (i = 0; i < ext4_get_groups_count(sb); i++) {
491		if (i == goal_group)
492			continue;
493
494		bitmap = mbt_ctx_bitmap(sb, i);
495		bit = mb_find_next_zero_bit(bitmap, max, 0);
496		KUNIT_ASSERT_EQ_MSG(test, bit, max,
497				    "free block on unexpected group %d", i);
498	}
499
500	bitmap = mbt_ctx_bitmap(sb, goal_group);
501	bit = mb_find_next_zero_bit(bitmap, max, 0);
502	KUNIT_ASSERT_EQ(test, bit, start);
503
504	bit = mb_find_next_bit(bitmap, max, bit + 1);
505	KUNIT_ASSERT_EQ(test, bit, start + len);
506}
507
508static void
509test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group,
510			      ext4_grpblk_t start, ext4_grpblk_t len)
511{
512	struct super_block *sb = (struct super_block *)test->priv;
513	struct ext4_sb_info *sbi = EXT4_SB(sb);
514	struct inode *inode;
515	ext4_fsblk_t block;
516
517	inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
518	if (!inode)
519		return;
520	inode->i_sb = sb;
521
522	if (len == 0)
523		return;
524
525	block = ext4_group_first_block_no(sb, goal_group) +
526		EXT4_C2B(sbi, start);
527	ext4_free_blocks_simple(inode, block, len);
528	validate_free_blocks_simple(test, sb, goal_group, start, len);
529	mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
530}
531
532static void test_free_blocks_simple(struct kunit *test)
533{
534	struct super_block *sb = (struct super_block *)test->priv;
535	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
536	ext4_group_t i;
537	struct test_range ranges[TEST_RANGE_COUNT];
538
539	for (i = 0; i < ext4_get_groups_count(sb); i++)
540		mbt_ctx_mark_used(sb, i, 0, max);
541
542	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
543	for (i = 0; i < TEST_RANGE_COUNT; i++)
544		test_free_blocks_simple_range(test, TEST_GOAL_GROUP,
545			ranges[i].start, ranges[i].len);
546}
547
548static void
549test_mark_diskspace_used_range(struct kunit *test,
550			       struct ext4_allocation_context *ac,
551			       ext4_grpblk_t start,
552			       ext4_grpblk_t len)
553{
554	struct super_block *sb = (struct super_block *)test->priv;
555	int ret;
556	void *bitmap;
557	ext4_grpblk_t i, max;
558
559	/* ext4_mb_mark_diskspace_used will BUG if len is 0 */
560	if (len == 0)
561		return;
562
563	ac->ac_b_ex.fe_group = TEST_GOAL_GROUP;
564	ac->ac_b_ex.fe_start = start;
565	ac->ac_b_ex.fe_len = len;
566
567	bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
568	memset(bitmap, 0, sb->s_blocksize);
569	ret = ext4_mb_mark_diskspace_used(ac, NULL, 0);
570	KUNIT_ASSERT_EQ(test, ret, 0);
571
572	max = EXT4_CLUSTERS_PER_GROUP(sb);
573	i = mb_find_next_bit(bitmap, max, 0);
574	KUNIT_ASSERT_EQ(test, i, start);
575	i = mb_find_next_zero_bit(bitmap, max, i + 1);
576	KUNIT_ASSERT_EQ(test, i, start + len);
577	i = mb_find_next_bit(bitmap, max, i + 1);
578	KUNIT_ASSERT_EQ(test, max, i);
579}
580
581static void test_mark_diskspace_used(struct kunit *test)
582{
583	struct super_block *sb = (struct super_block *)test->priv;
584	struct inode *inode;
585	struct ext4_allocation_context ac;
586	struct test_range ranges[TEST_RANGE_COUNT];
587	int i;
588
589	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
590
591	inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
592	if (!inode)
593		return;
594	inode->i_sb = sb;
595
596	ac.ac_status = AC_STATUS_FOUND;
597	ac.ac_sb = sb;
598	ac.ac_inode = inode;
599	for (i = 0; i < TEST_RANGE_COUNT; i++)
600		test_mark_diskspace_used_range(test, &ac, ranges[i].start,
601					       ranges[i].len);
602}
603
604static void mbt_generate_buddy(struct super_block *sb, void *buddy,
605			       void *bitmap, struct ext4_group_info *grp)
606{
607	struct ext4_sb_info *sbi = EXT4_SB(sb);
608	uint32_t order, off;
609	void *bb, *bb_h;
610	int max;
611
612	memset(buddy, 0xff, sb->s_blocksize);
613	memset(grp, 0, offsetof(struct ext4_group_info,
614				 bb_counters[MB_NUM_ORDERS(sb)]));
615
616	bb = bitmap;
617	max = EXT4_CLUSTERS_PER_GROUP(sb);
618	bb_h = buddy + sbi->s_mb_offsets[1];
619
620	off = mb_find_next_zero_bit(bb, max, 0);
621	grp->bb_first_free = off;
622	while (off < max) {
623		grp->bb_counters[0]++;
624		grp->bb_free++;
625
626		if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
627			grp->bb_free++;
628			grp->bb_counters[0]--;
629			mb_clear_bit(off >> 1, bb_h);
630			grp->bb_counters[1]++;
631			grp->bb_largest_free_order = 1;
632			off++;
633		}
634
635		off = mb_find_next_zero_bit(bb, max, off + 1);
636	}
637
638	for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
639		bb = buddy + sbi->s_mb_offsets[order];
640		bb_h = buddy + sbi->s_mb_offsets[order + 1];
641		max = max >> 1;
642		off = mb_find_next_zero_bit(bb, max, 0);
643
644		while (off < max) {
645			if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
646				mb_set_bits(bb, off, 2);
647				grp->bb_counters[order] -= 2;
648				mb_clear_bit(off >> 1, bb_h);
649				grp->bb_counters[order + 1]++;
650				grp->bb_largest_free_order = order + 1;
651				off++;
652			}
653
654			off = mb_find_next_zero_bit(bb, max, off + 1);
655		}
656	}
657
658	max = EXT4_CLUSTERS_PER_GROUP(sb);
659	off = mb_find_next_zero_bit(bitmap, max, 0);
660	while (off < max) {
661		grp->bb_fragments++;
662
663		off = mb_find_next_bit(bitmap, max, off + 1);
664		if (off + 1 >= max)
665			break;
666
667		off = mb_find_next_zero_bit(bitmap, max, off + 1);
668	}
669}
670
671static void
672mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1,
673			struct ext4_group_info *grp2)
674{
675	struct super_block *sb = (struct super_block *)test->priv;
676	int i;
677
678	KUNIT_ASSERT_EQ(test, grp1->bb_first_free,
679			grp2->bb_first_free);
680	KUNIT_ASSERT_EQ(test, grp1->bb_fragments,
681			grp2->bb_fragments);
682	KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free);
683	KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order,
684			grp2->bb_largest_free_order);
685
686	for (i = 1; i < MB_NUM_ORDERS(sb); i++) {
687		KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i],
688				    grp2->bb_counters[i],
689				    "bb_counters[%d] diffs, expected %d, generated %d",
690				    i, grp1->bb_counters[i],
691				    grp2->bb_counters[i]);
692	}
693}
694
695static void
696do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
697			   void *mbt_buddy, struct ext4_group_info *mbt_grp,
698			   void *ext4_buddy, struct ext4_group_info *ext4_grp)
699{
700	int i;
701
702	mbt_generate_buddy(sb, mbt_buddy, bitmap, mbt_grp);
703
704	for (i = 0; i < MB_NUM_ORDERS(sb); i++)
705		ext4_grp->bb_counters[i] = 0;
706	/* needed by validation in ext4_mb_generate_buddy */
707	ext4_grp->bb_free = mbt_grp->bb_free;
708	memset(ext4_buddy, 0xff, sb->s_blocksize);
709	ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
710			       ext4_grp);
711
712	KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
713			0);
714	mbt_validate_group_info(test, mbt_grp, ext4_grp);
715}
716
717static void test_mb_generate_buddy(struct kunit *test)
718{
719	struct super_block *sb = (struct super_block *)test->priv;
720	void *bitmap, *expected_bb, *generate_bb;
721	struct ext4_group_info *expected_grp, *generate_grp;
722	struct test_range ranges[TEST_RANGE_COUNT];
723	int i;
724
725	bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
726	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
727	expected_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
728	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb);
729	generate_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
730	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb);
731	expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
732				bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
733	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp);
734	generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP);
735	KUNIT_ASSERT_NOT_NULL(test, generate_grp);
736
737	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
738	for (i = 0; i < TEST_RANGE_COUNT; i++) {
739		mb_set_bits(bitmap, ranges[i].start, ranges[i].len);
740		do_test_generate_buddy(test, sb, bitmap, expected_bb,
741				       expected_grp, generate_bb, generate_grp);
742	}
743}
744
745static void
746test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
747			ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
748			void *buddy, struct ext4_group_info *grp)
749{
750	struct super_block *sb = (struct super_block *)test->priv;
751	struct ext4_free_extent ex;
752	int i;
753
754	/* mb_mark_used only accepts non-zero len */
755	if (len == 0)
756		return;
757
758	ex.fe_start = start;
759	ex.fe_len = len;
760	ex.fe_group = TEST_GOAL_GROUP;
761
762	ext4_lock_group(sb, TEST_GOAL_GROUP);
763	mb_mark_used(e4b, &ex);
764	ext4_unlock_group(sb, TEST_GOAL_GROUP);
765
766	mb_set_bits(bitmap, start, len);
767	/* bypass bb_free validatoin in ext4_mb_generate_buddy */
768	grp->bb_free -= len;
769	memset(buddy, 0xff, sb->s_blocksize);
770	for (i = 0; i < MB_NUM_ORDERS(sb); i++)
771		grp->bb_counters[i] = 0;
772	ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
773
774	KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
775			0);
776	mbt_validate_group_info(test, grp, e4b->bd_info);
777}
778
779static void test_mb_mark_used(struct kunit *test)
780{
781	struct ext4_buddy e4b;
782	struct super_block *sb = (struct super_block *)test->priv;
783	void *bitmap, *buddy;
784	struct ext4_group_info *grp;
785	int ret;
786	struct test_range ranges[TEST_RANGE_COUNT];
787	int i;
788
789	/* buddy cache assumes that each page contains at least one block */
790	if (sb->s_blocksize > PAGE_SIZE)
791		kunit_skip(test, "blocksize exceeds pagesize");
792
793	bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
794	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
795	buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
796	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
797	grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
798				bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
799
800	ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
801	KUNIT_ASSERT_EQ(test, ret, 0);
802
803	grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
804	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
805	for (i = 0; i < TEST_RANGE_COUNT; i++)
806		test_mb_mark_used_range(test, &e4b, ranges[i].start,
807					ranges[i].len, bitmap, buddy, grp);
808
809	ext4_mb_unload_buddy(&e4b);
810}
811
812static void
813test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b,
814			  ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
815			  void *buddy, struct ext4_group_info *grp)
816{
817	struct super_block *sb = (struct super_block *)test->priv;
818	int i;
819
820	/* mb_free_blocks will WARN if len is 0 */
821	if (len == 0)
822		return;
823
824	ext4_lock_group(sb, e4b->bd_group);
825	mb_free_blocks(NULL, e4b, start, len);
826	ext4_unlock_group(sb, e4b->bd_group);
827
828	mb_clear_bits(bitmap, start, len);
829	/* bypass bb_free validatoin in ext4_mb_generate_buddy */
830	grp->bb_free += len;
831	memset(buddy, 0xff, sb->s_blocksize);
832	for (i = 0; i < MB_NUM_ORDERS(sb); i++)
833		grp->bb_counters[i] = 0;
834	ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
835
836	KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
837			0);
838	mbt_validate_group_info(test, grp, e4b->bd_info);
839
840}
841
842static void test_mb_free_blocks(struct kunit *test)
843{
844	struct ext4_buddy e4b;
845	struct super_block *sb = (struct super_block *)test->priv;
846	void *bitmap, *buddy;
847	struct ext4_group_info *grp;
848	struct ext4_free_extent ex;
849	int ret;
850	int i;
851	struct test_range ranges[TEST_RANGE_COUNT];
852
853	/* buddy cache assumes that each page contains at least one block */
854	if (sb->s_blocksize > PAGE_SIZE)
855		kunit_skip(test, "blocksize exceeds pagesize");
856
857	bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
858	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
859	buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
860	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
861	grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
862				bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
863
864	ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
865	KUNIT_ASSERT_EQ(test, ret, 0);
866
867	ex.fe_start = 0;
868	ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb);
869	ex.fe_group = TEST_GOAL_GROUP;
870
871	ext4_lock_group(sb, TEST_GOAL_GROUP);
872	mb_mark_used(&e4b, &ex);
873	ext4_unlock_group(sb, TEST_GOAL_GROUP);
874
875	grp->bb_free = 0;
876	memset(bitmap, 0xff, sb->s_blocksize);
877
878	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
879	for (i = 0; i < TEST_RANGE_COUNT; i++)
880		test_mb_free_blocks_range(test, &e4b, ranges[i].start,
881					  ranges[i].len, bitmap, buddy, grp);
882
883	ext4_mb_unload_buddy(&e4b);
884}
885
886#define COUNT_FOR_ESTIMATE 100000
887static void test_mb_mark_used_cost(struct kunit *test)
888{
889	struct ext4_buddy e4b;
890	struct super_block *sb = (struct super_block *)test->priv;
891	struct ext4_free_extent ex;
892	int ret;
893	struct test_range ranges[TEST_RANGE_COUNT];
894	int i, j;
895	unsigned long start, end, all = 0;
896
897	/* buddy cache assumes that each page contains at least one block */
898	if (sb->s_blocksize > PAGE_SIZE)
899		kunit_skip(test, "blocksize exceeds pagesize");
900
901	ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
902	KUNIT_ASSERT_EQ(test, ret, 0);
903
904	ex.fe_group = TEST_GOAL_GROUP;
905	for (j = 0; j < COUNT_FOR_ESTIMATE; j++) {
906		mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
907		start = jiffies;
908		for (i = 0; i < TEST_RANGE_COUNT; i++) {
909			if (ranges[i].len == 0)
910				continue;
911
912			ex.fe_start = ranges[i].start;
913			ex.fe_len = ranges[i].len;
914			ext4_lock_group(sb, TEST_GOAL_GROUP);
915			mb_mark_used(&e4b, &ex);
916			ext4_unlock_group(sb, TEST_GOAL_GROUP);
917		}
918		end = jiffies;
919		all += (end - start);
920
921		for (i = 0; i < TEST_RANGE_COUNT; i++) {
922			if (ranges[i].len == 0)
923				continue;
924
925			ext4_lock_group(sb, TEST_GOAL_GROUP);
926			mb_free_blocks(NULL, &e4b, ranges[i].start,
927				       ranges[i].len);
928			ext4_unlock_group(sb, TEST_GOAL_GROUP);
929		}
930	}
931
932	kunit_info(test, "costed jiffies %lu\n", all);
933	ext4_mb_unload_buddy(&e4b);
934}
935
936static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
937	{
938		.blocksize_bits = 10,
939		.cluster_bits = 3,
940		.blocks_per_group = 8192,
941		.group_count = 4,
942		.desc_size = 64,
943	},
944	{
945		.blocksize_bits = 12,
946		.cluster_bits = 3,
947		.blocks_per_group = 8192,
948		.group_count = 4,
949		.desc_size = 64,
950	},
951	{
952		.blocksize_bits = 16,
953		.cluster_bits = 3,
954		.blocks_per_group = 8192,
955		.group_count = 4,
956		.desc_size = 64,
957	},
958};
959
960static void mbt_show_layout(const struct mbt_ext4_block_layout *layout,
961			    char *desc)
962{
963	snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_bits=%d cluster_bits=%d "
964		 "blocks_per_group=%d group_count=%d desc_size=%d\n",
965		 layout->blocksize_bits, layout->cluster_bits,
966		 layout->blocks_per_group, layout->group_count,
967		 layout->desc_size);
968}
969KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout);
970
971static struct kunit_case mbt_test_cases[] = {
972	KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params),
973	KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params),
974	KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params),
975	KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params),
976	KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params),
977	KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params),
978	KUNIT_CASE_PARAM_ATTR(test_mb_mark_used_cost, mbt_layouts_gen_params,
979			      { .speed = KUNIT_SPEED_SLOW }),
980	{}
981};
982
983static struct kunit_suite mbt_test_suite = {
984	.name = "ext4_mballoc_test",
985	.init = mbt_kunit_init,
986	.exit = mbt_kunit_exit,
987	.test_cases = mbt_test_cases,
988};
989
990kunit_test_suites(&mbt_test_suite);
991
992MODULE_LICENSE("GPL");
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * KUnit test of ext4 multiblocks allocation.
  4 */
  5
  6#include <kunit/test.h>
  7#include <kunit/static_stub.h>
  8#include <linux/random.h>
  9
 10#include "ext4.h"
 11
 12struct mbt_grp_ctx {
 13	struct buffer_head bitmap_bh;
 14	/* desc and gd_bh are just the place holders for now */
 15	struct ext4_group_desc desc;
 16	struct buffer_head gd_bh;
 17};
 18
 19struct mbt_ctx {
 20	struct mbt_grp_ctx *grp_ctx;
 21};
 22
 23struct mbt_ext4_super_block {
 24	struct ext4_super_block es;
 25	struct ext4_sb_info sbi;
 26	struct mbt_ctx mbt_ctx;
 27};
 28
 29#define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi))
 30#define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
 31#define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
 32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33static const struct super_operations mbt_sops = {
 
 
 34};
 35
 36static void mbt_kill_sb(struct super_block *sb)
 37{
 38	generic_shutdown_super(sb);
 39}
 40
 41static struct file_system_type mbt_fs_type = {
 42	.name			= "mballoc test",
 43	.kill_sb		= mbt_kill_sb,
 44};
 45
 46static int mbt_mb_init(struct super_block *sb)
 47{
 48	ext4_fsblk_t block;
 49	int ret;
 50
 51	/* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */
 52	sb->s_bdev = kzalloc(sizeof(*sb->s_bdev), GFP_KERNEL);
 53	if (sb->s_bdev == NULL)
 54		return -ENOMEM;
 55
 56	sb->s_bdev->bd_queue = kzalloc(sizeof(struct request_queue), GFP_KERNEL);
 57	if (sb->s_bdev->bd_queue == NULL) {
 58		kfree(sb->s_bdev);
 59		return -ENOMEM;
 60	}
 61
 62	/*
 63	 * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache =
 64	 * new_inode(sb);
 65	 */
 66	INIT_LIST_HEAD(&sb->s_inodes);
 67	sb->s_op = &mbt_sops;
 68
 69	ret = ext4_mb_init(sb);
 70	if (ret != 0)
 71		goto err_out;
 72
 73	block = ext4_count_free_clusters(sb);
 74	ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block,
 75				  GFP_KERNEL);
 76	if (ret != 0)
 77		goto err_mb_release;
 78
 79	ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0,
 80				  GFP_KERNEL);
 81	if (ret != 0)
 82		goto err_freeclusters;
 83
 84	return 0;
 85
 86err_freeclusters:
 87	percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
 88err_mb_release:
 89	ext4_mb_release(sb);
 90err_out:
 91	kfree(sb->s_bdev->bd_queue);
 92	kfree(sb->s_bdev);
 93	return ret;
 94}
 95
 96static void mbt_mb_release(struct super_block *sb)
 97{
 98	percpu_counter_destroy(&EXT4_SB(sb)->s_dirtyclusters_counter);
 99	percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
100	ext4_mb_release(sb);
101	kfree(sb->s_bdev->bd_queue);
102	kfree(sb->s_bdev);
103}
104
105static int mbt_set(struct super_block *sb, void *data)
106{
107	return 0;
108}
109
110static struct super_block *mbt_ext4_alloc_super_block(void)
111{
112	struct mbt_ext4_super_block *fsb;
113	struct super_block *sb;
114	struct ext4_sb_info *sbi;
115
116	fsb = kzalloc(sizeof(*fsb), GFP_KERNEL);
117	if (fsb == NULL)
118		return NULL;
119
120	sb = sget(&mbt_fs_type, NULL, mbt_set, 0, NULL);
121	if (IS_ERR(sb))
122		goto out;
123
124	sbi = &fsb->sbi;
125
126	sbi->s_blockgroup_lock =
127		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
128	if (!sbi->s_blockgroup_lock)
129		goto out_deactivate;
130
131	bgl_lock_init(sbi->s_blockgroup_lock);
132
133	sbi->s_es = &fsb->es;
134	sb->s_fs_info = sbi;
135
136	up_write(&sb->s_umount);
137	return sb;
138
139out_deactivate:
140	deactivate_locked_super(sb);
141out:
142	kfree(fsb);
143	return NULL;
144}
145
146static void mbt_ext4_free_super_block(struct super_block *sb)
147{
148	struct mbt_ext4_super_block *fsb = MBT_SB(sb);
149	struct ext4_sb_info *sbi = EXT4_SB(sb);
150
151	kfree(sbi->s_blockgroup_lock);
152	deactivate_super(sb);
153	kfree(fsb);
154}
155
156struct mbt_ext4_block_layout {
157	unsigned char blocksize_bits;
158	unsigned int cluster_bits;
159	uint32_t blocks_per_group;
160	ext4_group_t group_count;
161	uint16_t desc_size;
162};
163
164static void mbt_init_sb_layout(struct super_block *sb,
165			       struct mbt_ext4_block_layout *layout)
166{
167	struct ext4_sb_info *sbi = EXT4_SB(sb);
168	struct ext4_super_block *es = sbi->s_es;
169
170	sb->s_blocksize = 1UL << layout->blocksize_bits;
171	sb->s_blocksize_bits = layout->blocksize_bits;
172
173	sbi->s_groups_count = layout->group_count;
174	sbi->s_blocks_per_group = layout->blocks_per_group;
175	sbi->s_cluster_bits = layout->cluster_bits;
176	sbi->s_cluster_ratio = 1U << layout->cluster_bits;
177	sbi->s_clusters_per_group = layout->blocks_per_group >>
178				    layout->cluster_bits;
179	sbi->s_desc_size = layout->desc_size;
180	sbi->s_desc_per_block_bits =
181		sb->s_blocksize_bits - (fls(layout->desc_size) - 1);
182	sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits;
183
184	es->s_first_data_block = cpu_to_le32(0);
185	es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group *
186					    layout->group_count);
187}
188
189static int mbt_grp_ctx_init(struct super_block *sb,
190			    struct mbt_grp_ctx *grp_ctx)
191{
192	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
193
194	grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL);
195	if (grp_ctx->bitmap_bh.b_data == NULL)
196		return -ENOMEM;
197	mb_set_bits(grp_ctx->bitmap_bh.b_data, max, sb->s_blocksize * 8 - max);
198	ext4_free_group_clusters_set(sb, &grp_ctx->desc, max);
199
200	return 0;
201}
202
203static void mbt_grp_ctx_release(struct mbt_grp_ctx *grp_ctx)
204{
205	kfree(grp_ctx->bitmap_bh.b_data);
206	grp_ctx->bitmap_bh.b_data = NULL;
207}
208
209static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group,
210			      unsigned int start, unsigned int len)
211{
212	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
213
214	mb_set_bits(grp_ctx->bitmap_bh.b_data, start, len);
215}
216
217static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group)
218{
219	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
220
221	return grp_ctx->bitmap_bh.b_data;
222}
223
224/* called after mbt_init_sb_layout */
225static int mbt_ctx_init(struct super_block *sb)
226{
227	struct mbt_ctx *ctx = MBT_CTX(sb);
228	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
229
230	ctx->grp_ctx = kcalloc(ngroups, sizeof(struct mbt_grp_ctx),
231			       GFP_KERNEL);
232	if (ctx->grp_ctx == NULL)
233		return -ENOMEM;
234
235	for (i = 0; i < ngroups; i++)
236		if (mbt_grp_ctx_init(sb, &ctx->grp_ctx[i]))
237			goto out;
238
239	/*
240	 * first data block(first cluster in first group) is used by
241	 * metadata, mark it used to avoid to alloc data block at first
242	 * block which will fail ext4_sb_block_valid check.
243	 */
244	mb_set_bits(ctx->grp_ctx[0].bitmap_bh.b_data, 0, 1);
245	ext4_free_group_clusters_set(sb, &ctx->grp_ctx[0].desc,
246				     EXT4_CLUSTERS_PER_GROUP(sb) - 1);
247
248	return 0;
249out:
250	while (i-- > 0)
251		mbt_grp_ctx_release(&ctx->grp_ctx[i]);
252	kfree(ctx->grp_ctx);
253	return -ENOMEM;
254}
255
256static void mbt_ctx_release(struct super_block *sb)
257{
258	struct mbt_ctx *ctx = MBT_CTX(sb);
259	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
260
261	for (i = 0; i < ngroups; i++)
262		mbt_grp_ctx_release(&ctx->grp_ctx[i]);
263	kfree(ctx->grp_ctx);
264}
265
266static struct buffer_head *
267ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group,
268				   bool ignore_locked)
269{
270	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
271
272	/* paired with brelse from caller of ext4_read_block_bitmap_nowait */
273	get_bh(&grp_ctx->bitmap_bh);
274	return &grp_ctx->bitmap_bh;
275}
276
277static int ext4_wait_block_bitmap_stub(struct super_block *sb,
278				       ext4_group_t block_group,
279				       struct buffer_head *bh)
280{
281	/*
282	 * real ext4_wait_block_bitmap will set these flags and
283	 * functions like ext4_mb_init_cache will verify the flags.
284	 */
285	set_buffer_uptodate(bh);
286	set_bitmap_uptodate(bh);
287	set_buffer_verified(bh);
288	return 0;
289}
290
291static struct ext4_group_desc *
292ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group,
293			 struct buffer_head **bh)
294{
295	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
296
297	if (bh != NULL)
298		*bh = &grp_ctx->gd_bh;
299
300	return &grp_ctx->desc;
301}
302
303static int
304ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state,
305			  ext4_group_t group, ext4_grpblk_t blkoff,
306			  ext4_grpblk_t len, int flags,
307			  ext4_grpblk_t *ret_changed)
308{
309	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
310	struct buffer_head *bitmap_bh = &grp_ctx->bitmap_bh;
311
312	if (state)
313		mb_set_bits(bitmap_bh->b_data, blkoff, len);
314	else
315		mb_clear_bits(bitmap_bh->b_data, blkoff, len);
316
317	return 0;
318}
319
320#define TEST_GOAL_GROUP 1
321static int mbt_kunit_init(struct kunit *test)
322{
323	struct mbt_ext4_block_layout *layout =
324		(struct mbt_ext4_block_layout *)(test->param_value);
325	struct super_block *sb;
326	int ret;
327
328	sb = mbt_ext4_alloc_super_block();
329	if (sb == NULL)
330		return -ENOMEM;
331
332	mbt_init_sb_layout(sb, layout);
333
334	ret = mbt_ctx_init(sb);
335	if (ret != 0) {
336		mbt_ext4_free_super_block(sb);
337		return ret;
338	}
339
340	test->priv = sb;
341	kunit_activate_static_stub(test,
342				   ext4_read_block_bitmap_nowait,
343				   ext4_read_block_bitmap_nowait_stub);
344	kunit_activate_static_stub(test,
345				   ext4_wait_block_bitmap,
346				   ext4_wait_block_bitmap_stub);
347	kunit_activate_static_stub(test,
348				   ext4_get_group_desc,
349				   ext4_get_group_desc_stub);
350	kunit_activate_static_stub(test,
351				   ext4_mb_mark_context,
352				   ext4_mb_mark_context_stub);
353
354	/* stub function will be called in mbt_mb_init->ext4_mb_init */
355	if (mbt_mb_init(sb) != 0) {
356		mbt_ctx_release(sb);
357		mbt_ext4_free_super_block(sb);
358		return -ENOMEM;
359	}
360
361	return 0;
362}
363
364static void mbt_kunit_exit(struct kunit *test)
365{
366	struct super_block *sb = (struct super_block *)test->priv;
367
368	mbt_mb_release(sb);
369	mbt_ctx_release(sb);
370	mbt_ext4_free_super_block(sb);
371}
372
373static void test_new_blocks_simple(struct kunit *test)
374{
375	struct super_block *sb = (struct super_block *)test->priv;
376	struct inode *inode;
377	struct ext4_allocation_request ar;
378	ext4_group_t i, goal_group = TEST_GOAL_GROUP;
379	int err = 0;
380	ext4_fsblk_t found;
381	struct ext4_sb_info *sbi = EXT4_SB(sb);
382
383	inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
384	if (!inode)
385		return;
386
387	inode->i_sb = sb;
388	ar.inode = inode;
389
390	/* get block at goal */
391	ar.goal = ext4_group_first_block_no(sb, goal_group);
392	found = ext4_mb_new_blocks_simple(&ar, &err);
393	KUNIT_ASSERT_EQ_MSG(test, ar.goal, found,
394		"failed to alloc block at goal, expected %llu found %llu",
395		ar.goal, found);
396
397	/* get block after goal in goal group */
398	ar.goal = ext4_group_first_block_no(sb, goal_group);
399	found = ext4_mb_new_blocks_simple(&ar, &err);
400	KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found,
401		"failed to alloc block after goal in goal group, expected %llu found %llu",
402		ar.goal + 1, found);
403
404	/* get block after goal group */
405	mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
406	ar.goal = ext4_group_first_block_no(sb, goal_group);
407	found = ext4_mb_new_blocks_simple(&ar, &err);
408	KUNIT_ASSERT_EQ_MSG(test,
409		ext4_group_first_block_no(sb, goal_group + 1), found,
410		"failed to alloc block after goal group, expected %llu found %llu",
411		ext4_group_first_block_no(sb, goal_group + 1), found);
412
413	/* get block before goal group */
414	for (i = goal_group; i < ext4_get_groups_count(sb); i++)
415		mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
416	ar.goal = ext4_group_first_block_no(sb, goal_group);
417	found = ext4_mb_new_blocks_simple(&ar, &err);
418	KUNIT_ASSERT_EQ_MSG(test,
419		ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
420		"failed to alloc block before goal group, expected %llu found %llu",
421		ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found);
422
423	/* no block available, fail to allocate block */
424	for (i = 0; i < ext4_get_groups_count(sb); i++)
425		mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
426	ar.goal = ext4_group_first_block_no(sb, goal_group);
427	found = ext4_mb_new_blocks_simple(&ar, &err);
428	KUNIT_ASSERT_NE_MSG(test, err, 0,
429		"unexpectedly get block when no block is available");
430}
431
432#define TEST_RANGE_COUNT 8
433
434struct test_range {
435	ext4_grpblk_t start;
436	ext4_grpblk_t len;
437};
438
439static void
440mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges,
441			 int count)
442{
443	ext4_grpblk_t start, len, max;
444	int i;
445
446	max = EXT4_CLUSTERS_PER_GROUP(sb) / count;
447	for (i = 0; i < count; i++) {
448		start = get_random_u32() % max;
449		len = get_random_u32() % max;
450		len = min(len, max - start);
451
452		ranges[i].start = start + i * max;
453		ranges[i].len = len;
454	}
455}
456
457static void
458validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
459			    ext4_group_t goal_group, ext4_grpblk_t start,
460			    ext4_grpblk_t len)
461{
462	void *bitmap;
463	ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb);
464	ext4_group_t i;
465
466	for (i = 0; i < ext4_get_groups_count(sb); i++) {
467		if (i == goal_group)
468			continue;
469
470		bitmap = mbt_ctx_bitmap(sb, i);
471		bit = mb_find_next_zero_bit(bitmap, max, 0);
472		KUNIT_ASSERT_EQ_MSG(test, bit, max,
473				    "free block on unexpected group %d", i);
474	}
475
476	bitmap = mbt_ctx_bitmap(sb, goal_group);
477	bit = mb_find_next_zero_bit(bitmap, max, 0);
478	KUNIT_ASSERT_EQ(test, bit, start);
479
480	bit = mb_find_next_bit(bitmap, max, bit + 1);
481	KUNIT_ASSERT_EQ(test, bit, start + len);
482}
483
484static void
485test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group,
486			      ext4_grpblk_t start, ext4_grpblk_t len)
487{
488	struct super_block *sb = (struct super_block *)test->priv;
489	struct ext4_sb_info *sbi = EXT4_SB(sb);
490	struct inode *inode;
491	ext4_fsblk_t block;
492
493	inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
494	if (!inode)
495		return;
496	inode->i_sb = sb;
497
498	if (len == 0)
499		return;
500
501	block = ext4_group_first_block_no(sb, goal_group) +
502		EXT4_C2B(sbi, start);
503	ext4_free_blocks_simple(inode, block, len);
504	validate_free_blocks_simple(test, sb, goal_group, start, len);
505	mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
506}
507
508static void test_free_blocks_simple(struct kunit *test)
509{
510	struct super_block *sb = (struct super_block *)test->priv;
511	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
512	ext4_group_t i;
513	struct test_range ranges[TEST_RANGE_COUNT];
514
515	for (i = 0; i < ext4_get_groups_count(sb); i++)
516		mbt_ctx_mark_used(sb, i, 0, max);
517
518	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
519	for (i = 0; i < TEST_RANGE_COUNT; i++)
520		test_free_blocks_simple_range(test, TEST_GOAL_GROUP,
521			ranges[i].start, ranges[i].len);
522}
523
524static void
525test_mark_diskspace_used_range(struct kunit *test,
526			       struct ext4_allocation_context *ac,
527			       ext4_grpblk_t start,
528			       ext4_grpblk_t len)
529{
530	struct super_block *sb = (struct super_block *)test->priv;
531	int ret;
532	void *bitmap;
533	ext4_grpblk_t i, max;
534
535	/* ext4_mb_mark_diskspace_used will BUG if len is 0 */
536	if (len == 0)
537		return;
538
539	ac->ac_b_ex.fe_group = TEST_GOAL_GROUP;
540	ac->ac_b_ex.fe_start = start;
541	ac->ac_b_ex.fe_len = len;
542
543	bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
544	memset(bitmap, 0, sb->s_blocksize);
545	ret = ext4_mb_mark_diskspace_used(ac, NULL, 0);
546	KUNIT_ASSERT_EQ(test, ret, 0);
547
548	max = EXT4_CLUSTERS_PER_GROUP(sb);
549	i = mb_find_next_bit(bitmap, max, 0);
550	KUNIT_ASSERT_EQ(test, i, start);
551	i = mb_find_next_zero_bit(bitmap, max, i + 1);
552	KUNIT_ASSERT_EQ(test, i, start + len);
553	i = mb_find_next_bit(bitmap, max, i + 1);
554	KUNIT_ASSERT_EQ(test, max, i);
555}
556
557static void test_mark_diskspace_used(struct kunit *test)
558{
559	struct super_block *sb = (struct super_block *)test->priv;
560	struct inode *inode;
561	struct ext4_allocation_context ac;
562	struct test_range ranges[TEST_RANGE_COUNT];
563	int i;
564
565	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
566
567	inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
568	if (!inode)
569		return;
570	inode->i_sb = sb;
571
572	ac.ac_status = AC_STATUS_FOUND;
573	ac.ac_sb = sb;
574	ac.ac_inode = inode;
575	for (i = 0; i < TEST_RANGE_COUNT; i++)
576		test_mark_diskspace_used_range(test, &ac, ranges[i].start,
577					       ranges[i].len);
578}
579
580static void mbt_generate_buddy(struct super_block *sb, void *buddy,
581			       void *bitmap, struct ext4_group_info *grp)
582{
583	struct ext4_sb_info *sbi = EXT4_SB(sb);
584	uint32_t order, off;
585	void *bb, *bb_h;
586	int max;
587
588	memset(buddy, 0xff, sb->s_blocksize);
589	memset(grp, 0, offsetof(struct ext4_group_info,
590				 bb_counters[MB_NUM_ORDERS(sb)]));
591
592	bb = bitmap;
593	max = EXT4_CLUSTERS_PER_GROUP(sb);
594	bb_h = buddy + sbi->s_mb_offsets[1];
595
596	off = mb_find_next_zero_bit(bb, max, 0);
597	grp->bb_first_free = off;
598	while (off < max) {
599		grp->bb_counters[0]++;
600		grp->bb_free++;
601
602		if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
603			grp->bb_free++;
604			grp->bb_counters[0]--;
605			mb_clear_bit(off >> 1, bb_h);
606			grp->bb_counters[1]++;
607			grp->bb_largest_free_order = 1;
608			off++;
609		}
610
611		off = mb_find_next_zero_bit(bb, max, off + 1);
612	}
613
614	for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
615		bb = buddy + sbi->s_mb_offsets[order];
616		bb_h = buddy + sbi->s_mb_offsets[order + 1];
617		max = max >> 1;
618		off = mb_find_next_zero_bit(bb, max, 0);
619
620		while (off < max) {
621			if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
622				mb_set_bits(bb, off, 2);
623				grp->bb_counters[order] -= 2;
624				mb_clear_bit(off >> 1, bb_h);
625				grp->bb_counters[order + 1]++;
626				grp->bb_largest_free_order = order + 1;
627				off++;
628			}
629
630			off = mb_find_next_zero_bit(bb, max, off + 1);
631		}
632	}
633
634	max = EXT4_CLUSTERS_PER_GROUP(sb);
635	off = mb_find_next_zero_bit(bitmap, max, 0);
636	while (off < max) {
637		grp->bb_fragments++;
638
639		off = mb_find_next_bit(bitmap, max, off + 1);
640		if (off + 1 >= max)
641			break;
642
643		off = mb_find_next_zero_bit(bitmap, max, off + 1);
644	}
645}
646
647static void
648mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1,
649			struct ext4_group_info *grp2)
650{
651	struct super_block *sb = (struct super_block *)test->priv;
652	int i;
653
654	KUNIT_ASSERT_EQ(test, grp1->bb_first_free,
655			grp2->bb_first_free);
656	KUNIT_ASSERT_EQ(test, grp1->bb_fragments,
657			grp2->bb_fragments);
658	KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free);
659	KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order,
660			grp2->bb_largest_free_order);
661
662	for (i = 1; i < MB_NUM_ORDERS(sb); i++) {
663		KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i],
664				    grp2->bb_counters[i],
665				    "bb_counters[%d] diffs, expected %d, generated %d",
666				    i, grp1->bb_counters[i],
667				    grp2->bb_counters[i]);
668	}
669}
670
671static void
672do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
673			   void *mbt_buddy, struct ext4_group_info *mbt_grp,
674			   void *ext4_buddy, struct ext4_group_info *ext4_grp)
675{
676	int i;
677
678	mbt_generate_buddy(sb, mbt_buddy, bitmap, mbt_grp);
679
680	for (i = 0; i < MB_NUM_ORDERS(sb); i++)
681		ext4_grp->bb_counters[i] = 0;
682	/* needed by validation in ext4_mb_generate_buddy */
683	ext4_grp->bb_free = mbt_grp->bb_free;
684	memset(ext4_buddy, 0xff, sb->s_blocksize);
685	ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
686			       ext4_grp);
687
688	KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
689			0);
690	mbt_validate_group_info(test, mbt_grp, ext4_grp);
691}
692
693static void test_mb_generate_buddy(struct kunit *test)
694{
695	struct super_block *sb = (struct super_block *)test->priv;
696	void *bitmap, *expected_bb, *generate_bb;
697	struct ext4_group_info *expected_grp, *generate_grp;
698	struct test_range ranges[TEST_RANGE_COUNT];
699	int i;
700
701	bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
702	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
703	expected_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
704	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb);
705	generate_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
706	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb);
707	expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
708				bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
709	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp);
710	generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP);
711	KUNIT_ASSERT_NOT_NULL(test, generate_grp);
712
713	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
714	for (i = 0; i < TEST_RANGE_COUNT; i++) {
715		mb_set_bits(bitmap, ranges[i].start, ranges[i].len);
716		do_test_generate_buddy(test, sb, bitmap, expected_bb,
717				       expected_grp, generate_bb, generate_grp);
718	}
719}
720
721static void
722test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
723			ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
724			void *buddy, struct ext4_group_info *grp)
725{
726	struct super_block *sb = (struct super_block *)test->priv;
727	struct ext4_free_extent ex;
728	int i;
729
730	/* mb_mark_used only accepts non-zero len */
731	if (len == 0)
732		return;
733
734	ex.fe_start = start;
735	ex.fe_len = len;
736	ex.fe_group = TEST_GOAL_GROUP;
737
738	ext4_lock_group(sb, TEST_GOAL_GROUP);
739	mb_mark_used(e4b, &ex);
740	ext4_unlock_group(sb, TEST_GOAL_GROUP);
741
742	mb_set_bits(bitmap, start, len);
743	/* bypass bb_free validatoin in ext4_mb_generate_buddy */
744	grp->bb_free -= len;
745	memset(buddy, 0xff, sb->s_blocksize);
746	for (i = 0; i < MB_NUM_ORDERS(sb); i++)
747		grp->bb_counters[i] = 0;
748	ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
749
750	KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
751			0);
752	mbt_validate_group_info(test, grp, e4b->bd_info);
753}
754
755static void test_mb_mark_used(struct kunit *test)
756{
757	struct ext4_buddy e4b;
758	struct super_block *sb = (struct super_block *)test->priv;
759	void *bitmap, *buddy;
760	struct ext4_group_info *grp;
761	int ret;
762	struct test_range ranges[TEST_RANGE_COUNT];
763	int i;
764
765	/* buddy cache assumes that each page contains at least one block */
766	if (sb->s_blocksize > PAGE_SIZE)
767		kunit_skip(test, "blocksize exceeds pagesize");
768
769	bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
770	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
771	buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
772	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
773	grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
774				bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
775
776	ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
777	KUNIT_ASSERT_EQ(test, ret, 0);
778
779	grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
780	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
781	for (i = 0; i < TEST_RANGE_COUNT; i++)
782		test_mb_mark_used_range(test, &e4b, ranges[i].start,
783					ranges[i].len, bitmap, buddy, grp);
784
785	ext4_mb_unload_buddy(&e4b);
786}
787
788static void
789test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b,
790			  ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
791			  void *buddy, struct ext4_group_info *grp)
792{
793	struct super_block *sb = (struct super_block *)test->priv;
794	int i;
795
796	/* mb_free_blocks will WARN if len is 0 */
797	if (len == 0)
798		return;
799
800	ext4_lock_group(sb, e4b->bd_group);
801	mb_free_blocks(NULL, e4b, start, len);
802	ext4_unlock_group(sb, e4b->bd_group);
803
804	mb_clear_bits(bitmap, start, len);
805	/* bypass bb_free validatoin in ext4_mb_generate_buddy */
806	grp->bb_free += len;
807	memset(buddy, 0xff, sb->s_blocksize);
808	for (i = 0; i < MB_NUM_ORDERS(sb); i++)
809		grp->bb_counters[i] = 0;
810	ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
811
812	KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
813			0);
814	mbt_validate_group_info(test, grp, e4b->bd_info);
815
816}
817
818static void test_mb_free_blocks(struct kunit *test)
819{
820	struct ext4_buddy e4b;
821	struct super_block *sb = (struct super_block *)test->priv;
822	void *bitmap, *buddy;
823	struct ext4_group_info *grp;
824	struct ext4_free_extent ex;
825	int ret;
826	int i;
827	struct test_range ranges[TEST_RANGE_COUNT];
828
829	/* buddy cache assumes that each page contains at least one block */
830	if (sb->s_blocksize > PAGE_SIZE)
831		kunit_skip(test, "blocksize exceeds pagesize");
832
833	bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
834	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
835	buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
836	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
837	grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
838				bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
839
840	ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
841	KUNIT_ASSERT_EQ(test, ret, 0);
842
843	ex.fe_start = 0;
844	ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb);
845	ex.fe_group = TEST_GOAL_GROUP;
846
847	ext4_lock_group(sb, TEST_GOAL_GROUP);
848	mb_mark_used(&e4b, &ex);
849	ext4_unlock_group(sb, TEST_GOAL_GROUP);
850
851	grp->bb_free = 0;
852	memset(bitmap, 0xff, sb->s_blocksize);
853
854	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
855	for (i = 0; i < TEST_RANGE_COUNT; i++)
856		test_mb_free_blocks_range(test, &e4b, ranges[i].start,
857					  ranges[i].len, bitmap, buddy, grp);
858
859	ext4_mb_unload_buddy(&e4b);
860}
861
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
862static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
863	{
864		.blocksize_bits = 10,
865		.cluster_bits = 3,
866		.blocks_per_group = 8192,
867		.group_count = 4,
868		.desc_size = 64,
869	},
870	{
871		.blocksize_bits = 12,
872		.cluster_bits = 3,
873		.blocks_per_group = 8192,
874		.group_count = 4,
875		.desc_size = 64,
876	},
877	{
878		.blocksize_bits = 16,
879		.cluster_bits = 3,
880		.blocks_per_group = 8192,
881		.group_count = 4,
882		.desc_size = 64,
883	},
884};
885
886static void mbt_show_layout(const struct mbt_ext4_block_layout *layout,
887			    char *desc)
888{
889	snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_bits=%d cluster_bits=%d "
890		 "blocks_per_group=%d group_count=%d desc_size=%d\n",
891		 layout->blocksize_bits, layout->cluster_bits,
892		 layout->blocks_per_group, layout->group_count,
893		 layout->desc_size);
894}
895KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout);
896
897static struct kunit_case mbt_test_cases[] = {
898	KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params),
899	KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params),
900	KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params),
901	KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params),
902	KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params),
903	KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params),
 
 
904	{}
905};
906
907static struct kunit_suite mbt_test_suite = {
908	.name = "ext4_mballoc_test",
909	.init = mbt_kunit_init,
910	.exit = mbt_kunit_exit,
911	.test_cases = mbt_test_cases,
912};
913
914kunit_test_suites(&mbt_test_suite);
915
916MODULE_LICENSE("GPL");