Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include "misc.h"
  4#include "ctree.h"
  5#include "block-rsv.h"
  6#include "space-info.h"
  7#include "transaction.h"
  8#include "block-group.h"
  9#include "disk-io.h"
 10#include "fs.h"
 11#include "accessors.h"
 12
 13/*
 14 * HOW DO BLOCK RESERVES WORK
 15 *
 16 *   Think of block_rsv's as buckets for logically grouped metadata
 17 *   reservations.  Each block_rsv has a ->size and a ->reserved.  ->size is
 18 *   how large we want our block rsv to be, ->reserved is how much space is
 19 *   currently reserved for this block reserve.
 20 *
 21 *   ->failfast exists for the truncate case, and is described below.
 22 *
 23 * NORMAL OPERATION
 24 *
 25 *   -> Reserve
 26 *     Entrance: btrfs_block_rsv_add, btrfs_block_rsv_refill
 27 *
 28 *     We call into btrfs_reserve_metadata_bytes() with our bytes, which is
 29 *     accounted for in space_info->bytes_may_use, and then add the bytes to
 30 *     ->reserved, and ->size in the case of btrfs_block_rsv_add.
 31 *
 32 *     ->size is an over-estimation of how much we may use for a particular
 33 *     operation.
 34 *
 35 *   -> Use
 36 *     Entrance: btrfs_use_block_rsv
 37 *
 38 *     When we do a btrfs_alloc_tree_block() we call into btrfs_use_block_rsv()
 39 *     to determine the appropriate block_rsv to use, and then verify that
 40 *     ->reserved has enough space for our tree block allocation.  Once
 41 *     successful we subtract fs_info->nodesize from ->reserved.
 42 *
 43 *   -> Finish
 44 *     Entrance: btrfs_block_rsv_release
 45 *
 46 *     We are finished with our operation, subtract our individual reservation
 47 *     from ->size, and then subtract ->size from ->reserved and free up the
 48 *     excess if there is any.
 49 *
 50 *     There is some logic here to refill the delayed refs rsv or the global rsv
 51 *     as needed, otherwise the excess is subtracted from
 52 *     space_info->bytes_may_use.
 53 *
 54 * TYPES OF BLOCK RESERVES
 55 *
 56 * BLOCK_RSV_TRANS, BLOCK_RSV_DELOPS, BLOCK_RSV_CHUNK
 57 *   These behave normally, as described above, just within the confines of the
 58 *   lifetime of their particular operation (transaction for the whole trans
 59 *   handle lifetime, for example).
 60 *
 61 * BLOCK_RSV_GLOBAL
 62 *   It is impossible to properly account for all the space that may be required
 63 *   to make our extent tree updates.  This block reserve acts as an overflow
 64 *   buffer in case our delayed refs reserve does not reserve enough space to
 65 *   update the extent tree.
 66 *
 67 *   We can steal from this in some cases as well, notably on evict() or
 68 *   truncate() in order to help users recover from ENOSPC conditions.
 69 *
 70 * BLOCK_RSV_DELALLOC
 71 *   The individual item sizes are determined by the per-inode size
 72 *   calculations, which are described with the delalloc code.  This is pretty
 73 *   straightforward, it's just the calculation of ->size encodes a lot of
 74 *   different items, and thus it gets used when updating inodes, inserting file
 75 *   extents, and inserting checksums.
 76 *
 77 * BLOCK_RSV_DELREFS
 78 *   We keep a running tally of how many delayed refs we have on the system.
 79 *   We assume each one of these delayed refs are going to use a full
 80 *   reservation.  We use the transaction items and pre-reserve space for every
 81 *   operation, and use this reservation to refill any gap between ->size and
 82 *   ->reserved that may exist.
 83 *
 84 *   From there it's straightforward, removing a delayed ref means we remove its
 85 *   count from ->size and free up reservations as necessary.  Since this is
 86 *   the most dynamic block reserve in the system, we will try to refill this
 87 *   block reserve first with any excess returned by any other block reserve.
 88 *
 89 * BLOCK_RSV_EMPTY
 90 *   This is the fallback block reserve to make us try to reserve space if we
 91 *   don't have a specific bucket for this allocation.  It is mostly used for
 92 *   updating the device tree and such, since that is a separate pool we're
 93 *   content to just reserve space from the space_info on demand.
 94 *
 95 * BLOCK_RSV_TEMP
 96 *   This is used by things like truncate and iput.  We will temporarily
 97 *   allocate a block reserve, set it to some size, and then truncate bytes
 98 *   until we have no space left.  With ->failfast set we'll simply return
 99 *   ENOSPC from btrfs_use_block_rsv() to signal that we need to unwind and try
100 *   to make a new reservation.  This is because these operations are
101 *   unbounded, so we want to do as much work as we can, and then back off and
102 *   re-reserve.
103 */
104
105static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
106				    struct btrfs_block_rsv *block_rsv,
107				    struct btrfs_block_rsv *dest, u64 num_bytes,
108				    u64 *qgroup_to_release_ret)
109{
110	struct btrfs_space_info *space_info = block_rsv->space_info;
111	u64 qgroup_to_release = 0;
112	u64 ret;
113
114	spin_lock(&block_rsv->lock);
115	if (num_bytes == (u64)-1) {
116		num_bytes = block_rsv->size;
117		qgroup_to_release = block_rsv->qgroup_rsv_size;
118	}
119	block_rsv->size -= num_bytes;
120	if (block_rsv->reserved >= block_rsv->size) {
121		num_bytes = block_rsv->reserved - block_rsv->size;
122		block_rsv->reserved = block_rsv->size;
123		block_rsv->full = true;
124	} else {
125		num_bytes = 0;
126	}
127	if (qgroup_to_release_ret &&
128	    block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
129		qgroup_to_release = block_rsv->qgroup_rsv_reserved -
130				    block_rsv->qgroup_rsv_size;
131		block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
132	} else {
133		qgroup_to_release = 0;
134	}
135	spin_unlock(&block_rsv->lock);
136
137	ret = num_bytes;
138	if (num_bytes > 0) {
139		if (dest) {
140			spin_lock(&dest->lock);
141			if (!dest->full) {
142				u64 bytes_to_add;
143
144				bytes_to_add = dest->size - dest->reserved;
145				bytes_to_add = min(num_bytes, bytes_to_add);
146				dest->reserved += bytes_to_add;
147				if (dest->reserved >= dest->size)
148					dest->full = true;
149				num_bytes -= bytes_to_add;
150			}
151			spin_unlock(&dest->lock);
152		}
153		if (num_bytes)
154			btrfs_space_info_free_bytes_may_use(fs_info,
155							    space_info,
156							    num_bytes);
157	}
158	if (qgroup_to_release_ret)
159		*qgroup_to_release_ret = qgroup_to_release;
160	return ret;
161}
162
163int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
164			    struct btrfs_block_rsv *dst, u64 num_bytes,
165			    bool update_size)
166{
167	int ret;
168
169	ret = btrfs_block_rsv_use_bytes(src, num_bytes);
170	if (ret)
171		return ret;
172
173	btrfs_block_rsv_add_bytes(dst, num_bytes, update_size);
174	return 0;
175}
176
177void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type)
178{
179	memset(rsv, 0, sizeof(*rsv));
180	spin_lock_init(&rsv->lock);
181	rsv->type = type;
182}
183
184void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
185				   struct btrfs_block_rsv *rsv,
186				   enum btrfs_rsv_type type)
187{
188	btrfs_init_block_rsv(rsv, type);
189	rsv->space_info = btrfs_find_space_info(fs_info,
190					    BTRFS_BLOCK_GROUP_METADATA);
191}
192
193struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
194					      enum btrfs_rsv_type type)
195{
196	struct btrfs_block_rsv *block_rsv;
197
198	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
199	if (!block_rsv)
200		return NULL;
201
202	btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
203	return block_rsv;
204}
205
206void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
207			  struct btrfs_block_rsv *rsv)
208{
209	if (!rsv)
210		return;
211	btrfs_block_rsv_release(fs_info, rsv, (u64)-1, NULL);
212	kfree(rsv);
213}
214
215int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
216			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
217			enum btrfs_reserve_flush_enum flush)
218{
219	int ret;
220
221	if (num_bytes == 0)
222		return 0;
223
224	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
225					   num_bytes, flush);
226	if (!ret)
227		btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
228
229	return ret;
230}
231
232int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent)
233{
234	u64 num_bytes = 0;
235	int ret = -ENOSPC;
236
237	spin_lock(&block_rsv->lock);
238	num_bytes = mult_perc(block_rsv->size, min_percent);
239	if (block_rsv->reserved >= num_bytes)
240		ret = 0;
241	spin_unlock(&block_rsv->lock);
242
243	return ret;
244}
245
246int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
247			   struct btrfs_block_rsv *block_rsv, u64 num_bytes,
248			   enum btrfs_reserve_flush_enum flush)
249{
250	int ret = -ENOSPC;
251
252	if (!block_rsv)
253		return 0;
254
255	spin_lock(&block_rsv->lock);
256	if (block_rsv->reserved >= num_bytes)
257		ret = 0;
258	else
259		num_bytes -= block_rsv->reserved;
260	spin_unlock(&block_rsv->lock);
261
262	if (!ret)
263		return 0;
264
265	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
266					   num_bytes, flush);
267	if (!ret) {
268		btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
269		return 0;
270	}
271
272	return ret;
273}
274
275u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
276			    struct btrfs_block_rsv *block_rsv, u64 num_bytes,
277			    u64 *qgroup_to_release)
278{
279	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
280	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
281	struct btrfs_block_rsv *target = NULL;
282
283	/*
284	 * If we are a delayed block reserve then push to the global rsv,
285	 * otherwise dump into the global delayed reserve if it is not full.
286	 */
287	if (block_rsv->type == BTRFS_BLOCK_RSV_DELOPS)
288		target = global_rsv;
289	else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv))
290		target = delayed_rsv;
291
292	if (target && block_rsv->space_info != target->space_info)
293		target = NULL;
294
295	return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
296				       qgroup_to_release);
297}
298
299int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
300{
301	int ret = -ENOSPC;
302
303	spin_lock(&block_rsv->lock);
304	if (block_rsv->reserved >= num_bytes) {
305		block_rsv->reserved -= num_bytes;
306		if (block_rsv->reserved < block_rsv->size)
307			block_rsv->full = false;
308		ret = 0;
309	}
310	spin_unlock(&block_rsv->lock);
311	return ret;
312}
313
314void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
315			       u64 num_bytes, bool update_size)
316{
317	spin_lock(&block_rsv->lock);
318	block_rsv->reserved += num_bytes;
319	if (update_size)
320		block_rsv->size += num_bytes;
321	else if (block_rsv->reserved >= block_rsv->size)
322		block_rsv->full = true;
323	spin_unlock(&block_rsv->lock);
324}
325
326void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
327{
328	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
329	struct btrfs_space_info *sinfo = block_rsv->space_info;
330	struct btrfs_root *root, *tmp;
331	u64 num_bytes = btrfs_root_used(&fs_info->tree_root->root_item);
332	unsigned int min_items = 1;
333
334	/*
335	 * The global block rsv is based on the size of the extent tree, the
336	 * checksum tree and the root tree.  If the fs is empty we want to set
337	 * it to a minimal amount for safety.
338	 *
339	 * We also are going to need to modify the minimum of the tree root and
340	 * any global roots we could touch.
341	 */
342	read_lock(&fs_info->global_root_lock);
343	rbtree_postorder_for_each_entry_safe(root, tmp, &fs_info->global_root_tree,
344					     rb_node) {
345		if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID ||
346		    root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID ||
347		    root->root_key.objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) {
348			num_bytes += btrfs_root_used(&root->root_item);
349			min_items++;
350		}
351	}
352	read_unlock(&fs_info->global_root_lock);
353
354	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
355		num_bytes += btrfs_root_used(&fs_info->block_group_root->root_item);
356		min_items++;
357	}
358
359	if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
360		num_bytes += btrfs_root_used(&fs_info->stripe_root->root_item);
361		min_items++;
362	}
363
364	/*
365	 * But we also want to reserve enough space so we can do the fallback
366	 * global reserve for an unlink, which is an additional
367	 * BTRFS_UNLINK_METADATA_UNITS items.
368	 *
369	 * But we also need space for the delayed ref updates from the unlink,
370	 * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for
371	 * each unlink metadata item.
372	 */
373	min_items += BTRFS_UNLINK_METADATA_UNITS;
374
375	num_bytes = max_t(u64, num_bytes,
376			  btrfs_calc_insert_metadata_size(fs_info, min_items) +
377			  btrfs_calc_delayed_ref_bytes(fs_info,
378					       BTRFS_UNLINK_METADATA_UNITS));
379
380	spin_lock(&sinfo->lock);
381	spin_lock(&block_rsv->lock);
382
383	block_rsv->size = min_t(u64, num_bytes, SZ_512M);
384
385	if (block_rsv->reserved < block_rsv->size) {
386		num_bytes = block_rsv->size - block_rsv->reserved;
387		btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
388						      num_bytes);
389		block_rsv->reserved = block_rsv->size;
390	} else if (block_rsv->reserved > block_rsv->size) {
391		num_bytes = block_rsv->reserved - block_rsv->size;
392		btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
393						      -num_bytes);
394		block_rsv->reserved = block_rsv->size;
395		btrfs_try_granting_tickets(fs_info, sinfo);
396	}
397
398	block_rsv->full = (block_rsv->reserved == block_rsv->size);
399
400	if (block_rsv->size >= sinfo->total_bytes)
401		sinfo->force_alloc = CHUNK_ALLOC_FORCE;
402	spin_unlock(&block_rsv->lock);
403	spin_unlock(&sinfo->lock);
404}
405
406void btrfs_init_root_block_rsv(struct btrfs_root *root)
407{
408	struct btrfs_fs_info *fs_info = root->fs_info;
409
410	switch (root->root_key.objectid) {
411	case BTRFS_CSUM_TREE_OBJECTID:
412	case BTRFS_EXTENT_TREE_OBJECTID:
413	case BTRFS_FREE_SPACE_TREE_OBJECTID:
414	case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
415	case BTRFS_RAID_STRIPE_TREE_OBJECTID:
416		root->block_rsv = &fs_info->delayed_refs_rsv;
417		break;
418	case BTRFS_ROOT_TREE_OBJECTID:
419	case BTRFS_DEV_TREE_OBJECTID:
420	case BTRFS_QUOTA_TREE_OBJECTID:
421		root->block_rsv = &fs_info->global_block_rsv;
422		break;
423	case BTRFS_CHUNK_TREE_OBJECTID:
424		root->block_rsv = &fs_info->chunk_block_rsv;
425		break;
426	default:
427		root->block_rsv = NULL;
428		break;
429	}
430}
431
432void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
433{
434	struct btrfs_space_info *space_info;
435
436	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
437	fs_info->chunk_block_rsv.space_info = space_info;
438
439	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
440	fs_info->global_block_rsv.space_info = space_info;
441	fs_info->trans_block_rsv.space_info = space_info;
442	fs_info->empty_block_rsv.space_info = space_info;
443	fs_info->delayed_block_rsv.space_info = space_info;
444	fs_info->delayed_refs_rsv.space_info = space_info;
445
446	btrfs_update_global_block_rsv(fs_info);
447}
448
449void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info)
450{
451	btrfs_block_rsv_release(fs_info, &fs_info->global_block_rsv, (u64)-1,
452				NULL);
453	WARN_ON(fs_info->trans_block_rsv.size > 0);
454	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
455	WARN_ON(fs_info->chunk_block_rsv.size > 0);
456	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
457	WARN_ON(fs_info->delayed_block_rsv.size > 0);
458	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
459	WARN_ON(fs_info->delayed_refs_rsv.reserved > 0);
460	WARN_ON(fs_info->delayed_refs_rsv.size > 0);
461}
462
463static struct btrfs_block_rsv *get_block_rsv(
464					const struct btrfs_trans_handle *trans,
465					const struct btrfs_root *root)
466{
467	struct btrfs_fs_info *fs_info = root->fs_info;
468	struct btrfs_block_rsv *block_rsv = NULL;
469
470	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
471	    (root == fs_info->uuid_root) ||
472	    (trans->adding_csums &&
473	     root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID))
474		block_rsv = trans->block_rsv;
475
476	if (!block_rsv)
477		block_rsv = root->block_rsv;
478
479	if (!block_rsv)
480		block_rsv = &fs_info->empty_block_rsv;
481
482	return block_rsv;
483}
484
485struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
486					    struct btrfs_root *root,
487					    u32 blocksize)
488{
489	struct btrfs_fs_info *fs_info = root->fs_info;
490	struct btrfs_block_rsv *block_rsv;
491	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
492	int ret;
493	bool global_updated = false;
494
495	block_rsv = get_block_rsv(trans, root);
496
497	if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
498		goto try_reserve;
499again:
500	ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
501	if (!ret)
502		return block_rsv;
503
504	if (block_rsv->failfast)
505		return ERR_PTR(ret);
506
507	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
508		global_updated = true;
509		btrfs_update_global_block_rsv(fs_info);
510		goto again;
511	}
512
513	/*
514	 * The global reserve still exists to save us from ourselves, so don't
515	 * warn_on if we are short on our delayed refs reserve.
516	 */
517	if (block_rsv->type != BTRFS_BLOCK_RSV_DELREFS &&
518	    btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
519		static DEFINE_RATELIMIT_STATE(_rs,
520				DEFAULT_RATELIMIT_INTERVAL * 10,
521				/*DEFAULT_RATELIMIT_BURST*/ 1);
522		if (__ratelimit(&_rs))
523			WARN(1, KERN_DEBUG
524				"BTRFS: block rsv %d returned %d\n",
525				block_rsv->type, ret);
526	}
527try_reserve:
528	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
529					   blocksize, BTRFS_RESERVE_NO_FLUSH);
530	if (!ret)
531		return block_rsv;
532	/*
533	 * If we couldn't reserve metadata bytes try and use some from
534	 * the global reserve if its space type is the same as the global
535	 * reservation.
536	 */
537	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
538	    block_rsv->space_info == global_rsv->space_info) {
539		ret = btrfs_block_rsv_use_bytes(global_rsv, blocksize);
540		if (!ret)
541			return global_rsv;
542	}
543
544	/*
545	 * All hope is lost, but of course our reservations are overly
546	 * pessimistic, so instead of possibly having an ENOSPC abort here, try
547	 * one last time to force a reservation if there's enough actual space
548	 * on disk to make the reservation.
549	 */
550	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info, blocksize,
551					   BTRFS_RESERVE_FLUSH_EMERGENCY);
552	if (!ret)
553		return block_rsv;
554
555	return ERR_PTR(ret);
556}
557
558int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
559				       struct btrfs_block_rsv *rsv)
560{
561	u64 needed_bytes;
562	int ret;
563
564	/* 1 for slack space, 1 for updating the inode */
565	needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
566		btrfs_calc_metadata_size(fs_info, 1);
567
568	spin_lock(&rsv->lock);
569	if (rsv->reserved < needed_bytes)
570		ret = -ENOSPC;
571	else
572		ret = 0;
573	spin_unlock(&rsv->lock);
574	return ret;
575}