Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2013 Fusion IO.  All rights reserved.
  4 */
  5
  6#include <linux/fs.h>
  7#include <linux/mount.h>
  8#include <linux/pseudo_fs.h>
  9#include <linux/magic.h>
 10#include "btrfs-tests.h"
 11#include "../ctree.h"
 12#include "../free-space-cache.h"
 13#include "../free-space-tree.h"
 14#include "../transaction.h"
 15#include "../volumes.h"
 16#include "../disk-io.h"
 17#include "../qgroup.h"
 18#include "../block-group.h"
 19
 20static struct vfsmount *test_mnt = NULL;
 21
 22const char *test_error[] = {
 23	[TEST_ALLOC_FS_INFO]	     = "cannot allocate fs_info",
 24	[TEST_ALLOC_ROOT]	     = "cannot allocate root",
 25	[TEST_ALLOC_EXTENT_BUFFER]   = "cannot extent buffer",
 26	[TEST_ALLOC_PATH]	     = "cannot allocate path",
 27	[TEST_ALLOC_INODE]	     = "cannot allocate inode",
 28	[TEST_ALLOC_BLOCK_GROUP]     = "cannot allocate block group",
 29	[TEST_ALLOC_EXTENT_MAP]      = "cannot allocate extent map",
 30};
 31
 32static const struct super_operations btrfs_test_super_ops = {
 33	.alloc_inode	= btrfs_alloc_inode,
 34	.destroy_inode	= btrfs_test_destroy_inode,
 35};
 36
 37
 38static int btrfs_test_init_fs_context(struct fs_context *fc)
 
 39{
 40	struct pseudo_fs_context *ctx = init_pseudo(fc, BTRFS_TEST_MAGIC);
 41	if (!ctx)
 42		return -ENOMEM;
 43	ctx->ops = &btrfs_test_super_ops;
 44	return 0;
 45}
 46
 47static struct file_system_type test_type = {
 48	.name		= "btrfs_test_fs",
 49	.init_fs_context = btrfs_test_init_fs_context,
 50	.kill_sb	= kill_anon_super,
 51};
 52
 53struct inode *btrfs_new_test_inode(void)
 54{
 55	struct inode *inode;
 56
 57	inode = new_inode(test_mnt->mnt_sb);
 58	if (inode)
 59		inode_init_owner(inode, NULL, S_IFREG);
 60
 61	return inode;
 62}
 63
 64static int btrfs_init_test_fs(void)
 65{
 66	int ret;
 67
 68	ret = register_filesystem(&test_type);
 69	if (ret) {
 70		printk(KERN_ERR "btrfs: cannot register test file system\n");
 71		return ret;
 72	}
 73
 74	test_mnt = kern_mount(&test_type);
 75	if (IS_ERR(test_mnt)) {
 76		printk(KERN_ERR "btrfs: cannot mount test file system\n");
 77		unregister_filesystem(&test_type);
 78		return PTR_ERR(test_mnt);
 79	}
 80	return 0;
 81}
 82
 83static void btrfs_destroy_test_fs(void)
 84{
 85	kern_unmount(test_mnt);
 86	unregister_filesystem(&test_type);
 87}
 88
 89struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
 90{
 91	struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
 92						GFP_KERNEL);
 93
 94	if (!fs_info)
 95		return fs_info;
 96	fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices),
 97				      GFP_KERNEL);
 98	if (!fs_info->fs_devices) {
 99		kfree(fs_info);
100		return NULL;
101	}
102	fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block),
103				      GFP_KERNEL);
104	if (!fs_info->super_copy) {
105		kfree(fs_info->fs_devices);
106		kfree(fs_info);
107		return NULL;
108	}
109
110	fs_info->nodesize = nodesize;
111	fs_info->sectorsize = sectorsize;
112
113	if (init_srcu_struct(&fs_info->subvol_srcu)) {
114		kfree(fs_info->fs_devices);
115		kfree(fs_info->super_copy);
116		kfree(fs_info);
117		return NULL;
118	}
119
120	spin_lock_init(&fs_info->buffer_lock);
121	spin_lock_init(&fs_info->qgroup_lock);
 
122	spin_lock_init(&fs_info->super_lock);
123	spin_lock_init(&fs_info->fs_roots_radix_lock);
124	spin_lock_init(&fs_info->tree_mod_seq_lock);
125	mutex_init(&fs_info->qgroup_ioctl_lock);
126	mutex_init(&fs_info->qgroup_rescan_lock);
127	rwlock_init(&fs_info->tree_mod_log_lock);
128	fs_info->running_transaction = NULL;
129	fs_info->qgroup_tree = RB_ROOT;
130	fs_info->qgroup_ulist = NULL;
131	atomic64_set(&fs_info->tree_mod_seq, 0);
132	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
133	INIT_LIST_HEAD(&fs_info->dead_roots);
134	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
135	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
136	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
137	extent_io_tree_init(fs_info, &fs_info->freed_extents[0],
138			    IO_TREE_FS_INFO_FREED_EXTENTS0, NULL);
139	extent_io_tree_init(fs_info, &fs_info->freed_extents[1],
140			    IO_TREE_FS_INFO_FREED_EXTENTS1, NULL);
141	fs_info->pinned_extents = &fs_info->freed_extents[0];
142	set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
143
144	test_mnt->mnt_sb->s_fs_info = fs_info;
145
146	return fs_info;
147}
148
149void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
150{
151	struct radix_tree_iter iter;
152	void **slot;
153
154	if (!fs_info)
155		return;
156
157	if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
158			      &fs_info->fs_state)))
159		return;
160
161	test_mnt->mnt_sb->s_fs_info = NULL;
162
163	spin_lock(&fs_info->buffer_lock);
164	radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
165		struct extent_buffer *eb;
166
167		eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
168		if (!eb)
169			continue;
170		/* Shouldn't happen but that kind of thinking creates CVE's */
171		if (radix_tree_exception(eb)) {
172			if (radix_tree_deref_retry(eb))
173				slot = radix_tree_iter_retry(&iter);
174			continue;
175		}
176		slot = radix_tree_iter_resume(slot, &iter);
177		spin_unlock(&fs_info->buffer_lock);
178		free_extent_buffer_stale(eb);
179		spin_lock(&fs_info->buffer_lock);
180	}
181	spin_unlock(&fs_info->buffer_lock);
182
183	btrfs_free_qgroup_config(fs_info);
184	btrfs_free_fs_roots(fs_info);
185	cleanup_srcu_struct(&fs_info->subvol_srcu);
186	kfree(fs_info->super_copy);
187	kfree(fs_info->fs_devices);
188	kfree(fs_info);
189}
190
191void btrfs_free_dummy_root(struct btrfs_root *root)
192{
193	if (!root)
194		return;
195	/* Will be freed by btrfs_free_fs_roots */
196	if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
197		return;
198	if (root->node) {
199		/* One for allocate_extent_buffer */
200		free_extent_buffer(root->node);
201	}
202	kfree(root);
203}
204
205struct btrfs_block_group_cache *
206btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
207			      unsigned long length)
208{
209	struct btrfs_block_group_cache *cache;
210
211	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
212	if (!cache)
213		return NULL;
214	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
215					GFP_KERNEL);
216	if (!cache->free_space_ctl) {
217		kfree(cache);
218		return NULL;
219	}
220
221	cache->key.objectid = 0;
222	cache->key.offset = length;
223	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
224	cache->full_stripe_len = fs_info->sectorsize;
225	cache->fs_info = fs_info;
226
227	INIT_LIST_HEAD(&cache->list);
228	INIT_LIST_HEAD(&cache->cluster_list);
229	INIT_LIST_HEAD(&cache->bg_list);
230	btrfs_init_free_space_ctl(cache);
231	mutex_init(&cache->free_space_lock);
232
233	return cache;
234}
235
236void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache)
237{
238	if (!cache)
239		return;
240	__btrfs_remove_free_space_cache(cache->free_space_ctl);
241	kfree(cache->free_space_ctl);
242	kfree(cache);
243}
244
245void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
246			    struct btrfs_fs_info *fs_info)
247{
248	memset(trans, 0, sizeof(*trans));
249	trans->transid = 1;
250	trans->type = __TRANS_DUMMY;
251	trans->fs_info = fs_info;
252}
253
254int btrfs_run_sanity_tests(void)
255{
256	int ret, i;
257	u32 sectorsize, nodesize;
258	u32 test_sectorsize[] = {
259		PAGE_SIZE,
260	};
261	ret = btrfs_init_test_fs();
262	if (ret)
263		return ret;
264	for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
265		sectorsize = test_sectorsize[i];
266		for (nodesize = sectorsize;
267		     nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
268		     nodesize <<= 1) {
269			pr_info("BTRFS: selftest: sectorsize: %u  nodesize: %u\n",
270				sectorsize, nodesize);
271			ret = btrfs_test_free_space_cache(sectorsize, nodesize);
272			if (ret)
273				goto out;
274			ret = btrfs_test_extent_buffer_operations(sectorsize,
275				nodesize);
276			if (ret)
277				goto out;
278			ret = btrfs_test_extent_io(sectorsize, nodesize);
279			if (ret)
280				goto out;
281			ret = btrfs_test_inodes(sectorsize, nodesize);
282			if (ret)
283				goto out;
284			ret = btrfs_test_qgroups(sectorsize, nodesize);
285			if (ret)
286				goto out;
287			ret = btrfs_test_free_space_tree(sectorsize, nodesize);
288			if (ret)
289				goto out;
290		}
291	}
292	ret = btrfs_test_extent_map();
293
294out:
295	btrfs_destroy_test_fs();
296	return ret;
297}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2013 Fusion IO.  All rights reserved.
  4 */
  5
  6#include <linux/fs.h>
  7#include <linux/mount.h>
 
  8#include <linux/magic.h>
  9#include "btrfs-tests.h"
 10#include "../ctree.h"
 11#include "../free-space-cache.h"
 12#include "../free-space-tree.h"
 13#include "../transaction.h"
 14#include "../volumes.h"
 15#include "../disk-io.h"
 16#include "../qgroup.h"
 
 17
 18static struct vfsmount *test_mnt = NULL;
 19
 
 
 
 
 
 
 
 
 
 
 20static const struct super_operations btrfs_test_super_ops = {
 21	.alloc_inode	= btrfs_alloc_inode,
 22	.destroy_inode	= btrfs_test_destroy_inode,
 23};
 24
 25static struct dentry *btrfs_test_mount(struct file_system_type *fs_type,
 26				       int flags, const char *dev_name,
 27				       void *data)
 28{
 29	return mount_pseudo(fs_type, "btrfs_test:", &btrfs_test_super_ops,
 30			    NULL, BTRFS_TEST_MAGIC);
 
 
 
 31}
 32
 33static struct file_system_type test_type = {
 34	.name		= "btrfs_test_fs",
 35	.mount		= btrfs_test_mount,
 36	.kill_sb	= kill_anon_super,
 37};
 38
 39struct inode *btrfs_new_test_inode(void)
 40{
 41	return new_inode(test_mnt->mnt_sb);
 
 
 
 
 
 
 42}
 43
 44static int btrfs_init_test_fs(void)
 45{
 46	int ret;
 47
 48	ret = register_filesystem(&test_type);
 49	if (ret) {
 50		printk(KERN_ERR "btrfs: cannot register test file system\n");
 51		return ret;
 52	}
 53
 54	test_mnt = kern_mount(&test_type);
 55	if (IS_ERR(test_mnt)) {
 56		printk(KERN_ERR "btrfs: cannot mount test file system\n");
 57		unregister_filesystem(&test_type);
 58		return PTR_ERR(test_mnt);
 59	}
 60	return 0;
 61}
 62
 63static void btrfs_destroy_test_fs(void)
 64{
 65	kern_unmount(test_mnt);
 66	unregister_filesystem(&test_type);
 67}
 68
 69struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
 70{
 71	struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
 72						GFP_KERNEL);
 73
 74	if (!fs_info)
 75		return fs_info;
 76	fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices),
 77				      GFP_KERNEL);
 78	if (!fs_info->fs_devices) {
 79		kfree(fs_info);
 80		return NULL;
 81	}
 82	fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block),
 83				      GFP_KERNEL);
 84	if (!fs_info->super_copy) {
 85		kfree(fs_info->fs_devices);
 86		kfree(fs_info);
 87		return NULL;
 88	}
 89
 90	fs_info->nodesize = nodesize;
 91	fs_info->sectorsize = sectorsize;
 92
 93	if (init_srcu_struct(&fs_info->subvol_srcu)) {
 94		kfree(fs_info->fs_devices);
 95		kfree(fs_info->super_copy);
 96		kfree(fs_info);
 97		return NULL;
 98	}
 99
100	spin_lock_init(&fs_info->buffer_lock);
101	spin_lock_init(&fs_info->qgroup_lock);
102	spin_lock_init(&fs_info->qgroup_op_lock);
103	spin_lock_init(&fs_info->super_lock);
104	spin_lock_init(&fs_info->fs_roots_radix_lock);
105	spin_lock_init(&fs_info->tree_mod_seq_lock);
106	mutex_init(&fs_info->qgroup_ioctl_lock);
107	mutex_init(&fs_info->qgroup_rescan_lock);
108	rwlock_init(&fs_info->tree_mod_log_lock);
109	fs_info->running_transaction = NULL;
110	fs_info->qgroup_tree = RB_ROOT;
111	fs_info->qgroup_ulist = NULL;
112	atomic64_set(&fs_info->tree_mod_seq, 0);
113	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
114	INIT_LIST_HEAD(&fs_info->dead_roots);
115	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
116	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
117	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
118	extent_io_tree_init(&fs_info->freed_extents[0], NULL);
119	extent_io_tree_init(&fs_info->freed_extents[1], NULL);
 
 
120	fs_info->pinned_extents = &fs_info->freed_extents[0];
121	set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
122
123	test_mnt->mnt_sb->s_fs_info = fs_info;
124
125	return fs_info;
126}
127
128void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
129{
130	struct radix_tree_iter iter;
131	void **slot;
132
133	if (!fs_info)
134		return;
135
136	if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
137			      &fs_info->fs_state)))
138		return;
139
140	test_mnt->mnt_sb->s_fs_info = NULL;
141
142	spin_lock(&fs_info->buffer_lock);
143	radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
144		struct extent_buffer *eb;
145
146		eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
147		if (!eb)
148			continue;
149		/* Shouldn't happen but that kind of thinking creates CVE's */
150		if (radix_tree_exception(eb)) {
151			if (radix_tree_deref_retry(eb))
152				slot = radix_tree_iter_retry(&iter);
153			continue;
154		}
155		slot = radix_tree_iter_resume(slot, &iter);
156		spin_unlock(&fs_info->buffer_lock);
157		free_extent_buffer_stale(eb);
158		spin_lock(&fs_info->buffer_lock);
159	}
160	spin_unlock(&fs_info->buffer_lock);
161
162	btrfs_free_qgroup_config(fs_info);
163	btrfs_free_fs_roots(fs_info);
164	cleanup_srcu_struct(&fs_info->subvol_srcu);
165	kfree(fs_info->super_copy);
166	kfree(fs_info->fs_devices);
167	kfree(fs_info);
168}
169
170void btrfs_free_dummy_root(struct btrfs_root *root)
171{
172	if (!root)
173		return;
174	/* Will be freed by btrfs_free_fs_roots */
175	if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
176		return;
177	if (root->node)
 
178		free_extent_buffer(root->node);
 
179	kfree(root);
180}
181
182struct btrfs_block_group_cache *
183btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
184			      unsigned long length)
185{
186	struct btrfs_block_group_cache *cache;
187
188	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
189	if (!cache)
190		return NULL;
191	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
192					GFP_KERNEL);
193	if (!cache->free_space_ctl) {
194		kfree(cache);
195		return NULL;
196	}
197
198	cache->key.objectid = 0;
199	cache->key.offset = length;
200	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
201	cache->full_stripe_len = fs_info->sectorsize;
202	cache->fs_info = fs_info;
203
204	INIT_LIST_HEAD(&cache->list);
205	INIT_LIST_HEAD(&cache->cluster_list);
206	INIT_LIST_HEAD(&cache->bg_list);
207	btrfs_init_free_space_ctl(cache);
208	mutex_init(&cache->free_space_lock);
209
210	return cache;
211}
212
213void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache)
214{
215	if (!cache)
216		return;
217	__btrfs_remove_free_space_cache(cache->free_space_ctl);
218	kfree(cache->free_space_ctl);
219	kfree(cache);
220}
221
222void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans)
 
223{
224	memset(trans, 0, sizeof(*trans));
225	trans->transid = 1;
226	trans->type = __TRANS_DUMMY;
 
227}
228
229int btrfs_run_sanity_tests(void)
230{
231	int ret, i;
232	u32 sectorsize, nodesize;
233	u32 test_sectorsize[] = {
234		PAGE_SIZE,
235	};
236	ret = btrfs_init_test_fs();
237	if (ret)
238		return ret;
239	for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
240		sectorsize = test_sectorsize[i];
241		for (nodesize = sectorsize;
242		     nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
243		     nodesize <<= 1) {
244			pr_info("BTRFS: selftest: sectorsize: %u  nodesize: %u\n",
245				sectorsize, nodesize);
246			ret = btrfs_test_free_space_cache(sectorsize, nodesize);
247			if (ret)
248				goto out;
249			ret = btrfs_test_extent_buffer_operations(sectorsize,
250				nodesize);
251			if (ret)
252				goto out;
253			ret = btrfs_test_extent_io(sectorsize, nodesize);
254			if (ret)
255				goto out;
256			ret = btrfs_test_inodes(sectorsize, nodesize);
257			if (ret)
258				goto out;
259			ret = btrfs_test_qgroups(sectorsize, nodesize);
260			if (ret)
261				goto out;
262			ret = btrfs_test_free_space_tree(sectorsize, nodesize);
263			if (ret)
264				goto out;
265		}
266	}
267	ret = btrfs_test_extent_map();
268
269out:
270	btrfs_destroy_test_fs();
271	return ret;
272}