Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include "misc.h"
  4#include "ctree.h"
  5#include "block-rsv.h"
  6#include "space-info.h"
  7#include "transaction.h"
  8#include "block-group.h"
  9#include "fs.h"
 10#include "accessors.h"
 11
 12/*
 13 * HOW DO BLOCK RESERVES WORK
 14 *
 15 *   Think of block_rsv's as buckets for logically grouped metadata
 16 *   reservations.  Each block_rsv has a ->size and a ->reserved.  ->size is
 17 *   how large we want our block rsv to be, ->reserved is how much space is
 18 *   currently reserved for this block reserve.
 19 *
 20 *   ->failfast exists for the truncate case, and is described below.
 21 *
 22 * NORMAL OPERATION
 23 *
 24 *   -> Reserve
 25 *     Entrance: btrfs_block_rsv_add, btrfs_block_rsv_refill
 26 *
 27 *     We call into btrfs_reserve_metadata_bytes() with our bytes, which is
 28 *     accounted for in space_info->bytes_may_use, and then add the bytes to
 29 *     ->reserved, and ->size in the case of btrfs_block_rsv_add.
 30 *
 31 *     ->size is an over-estimation of how much we may use for a particular
 32 *     operation.
 33 *
 34 *   -> Use
 35 *     Entrance: btrfs_use_block_rsv
 36 *
 37 *     When we do a btrfs_alloc_tree_block() we call into btrfs_use_block_rsv()
 38 *     to determine the appropriate block_rsv to use, and then verify that
 39 *     ->reserved has enough space for our tree block allocation.  Once
 40 *     successful we subtract fs_info->nodesize from ->reserved.
 41 *
 42 *   -> Finish
 43 *     Entrance: btrfs_block_rsv_release
 44 *
 45 *     We are finished with our operation, subtract our individual reservation
 46 *     from ->size, and then subtract ->size from ->reserved and free up the
 47 *     excess if there is any.
 48 *
 49 *     There is some logic here to refill the delayed refs rsv or the global rsv
 50 *     as needed, otherwise the excess is subtracted from
 51 *     space_info->bytes_may_use.
 52 *
 53 * TYPES OF BLOCK RESERVES
 54 *
 55 * BLOCK_RSV_TRANS, BLOCK_RSV_DELOPS, BLOCK_RSV_CHUNK
 56 *   These behave normally, as described above, just within the confines of the
 57 *   lifetime of their particular operation (transaction for the whole trans
 58 *   handle lifetime, for example).
 59 *
 60 * BLOCK_RSV_GLOBAL
 61 *   It is impossible to properly account for all the space that may be required
 62 *   to make our extent tree updates.  This block reserve acts as an overflow
 63 *   buffer in case our delayed refs reserve does not reserve enough space to
 64 *   update the extent tree.
 65 *
 66 *   We can steal from this in some cases as well, notably on evict() or
 67 *   truncate() in order to help users recover from ENOSPC conditions.
 68 *
 69 * BLOCK_RSV_DELALLOC
 70 *   The individual item sizes are determined by the per-inode size
 71 *   calculations, which are described with the delalloc code.  This is pretty
 72 *   straightforward, it's just the calculation of ->size encodes a lot of
 73 *   different items, and thus it gets used when updating inodes, inserting file
 74 *   extents, and inserting checksums.
 75 *
 76 * BLOCK_RSV_DELREFS
 77 *   We keep a running tally of how many delayed refs we have on the system.
 78 *   We assume each one of these delayed refs are going to use a full
 79 *   reservation.  We use the transaction items and pre-reserve space for every
 80 *   operation, and use this reservation to refill any gap between ->size and
 81 *   ->reserved that may exist.
 82 *
 83 *   From there it's straightforward, removing a delayed ref means we remove its
 84 *   count from ->size and free up reservations as necessary.  Since this is
 85 *   the most dynamic block reserve in the system, we will try to refill this
 86 *   block reserve first with any excess returned by any other block reserve.
 87 *
 88 * BLOCK_RSV_EMPTY
 89 *   This is the fallback block reserve to make us try to reserve space if we
 90 *   don't have a specific bucket for this allocation.  It is mostly used for
 91 *   updating the device tree and such, since that is a separate pool we're
 92 *   content to just reserve space from the space_info on demand.
 93 *
 94 * BLOCK_RSV_TEMP
 95 *   This is used by things like truncate and iput.  We will temporarily
 96 *   allocate a block reserve, set it to some size, and then truncate bytes
 97 *   until we have no space left.  With ->failfast set we'll simply return
 98 *   ENOSPC from btrfs_use_block_rsv() to signal that we need to unwind and try
 99 *   to make a new reservation.  This is because these operations are
100 *   unbounded, so we want to do as much work as we can, and then back off and
101 *   re-reserve.
102 */
103
104static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
105				    struct btrfs_block_rsv *block_rsv,
106				    struct btrfs_block_rsv *dest, u64 num_bytes,
107				    u64 *qgroup_to_release_ret)
108{
109	struct btrfs_space_info *space_info = block_rsv->space_info;
110	u64 qgroup_to_release = 0;
111	u64 ret;
112
113	spin_lock(&block_rsv->lock);
114	if (num_bytes == (u64)-1) {
115		num_bytes = block_rsv->size;
116		qgroup_to_release = block_rsv->qgroup_rsv_size;
117	}
118	block_rsv->size -= num_bytes;
119	if (block_rsv->reserved >= block_rsv->size) {
120		num_bytes = block_rsv->reserved - block_rsv->size;
121		block_rsv->reserved = block_rsv->size;
122		block_rsv->full = true;
123	} else {
124		num_bytes = 0;
125	}
126	if (qgroup_to_release_ret &&
127	    block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
128		qgroup_to_release = block_rsv->qgroup_rsv_reserved -
129				    block_rsv->qgroup_rsv_size;
130		block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
131	} else {
132		qgroup_to_release = 0;
133	}
134	spin_unlock(&block_rsv->lock);
135
136	ret = num_bytes;
137	if (num_bytes > 0) {
138		if (dest) {
139			spin_lock(&dest->lock);
140			if (!dest->full) {
141				u64 bytes_to_add;
142
143				bytes_to_add = dest->size - dest->reserved;
144				bytes_to_add = min(num_bytes, bytes_to_add);
145				dest->reserved += bytes_to_add;
146				if (dest->reserved >= dest->size)
147					dest->full = true;
148				num_bytes -= bytes_to_add;
149			}
150			spin_unlock(&dest->lock);
151		}
152		if (num_bytes)
153			btrfs_space_info_free_bytes_may_use(fs_info,
154							    space_info,
155							    num_bytes);
156	}
157	if (qgroup_to_release_ret)
158		*qgroup_to_release_ret = qgroup_to_release;
159	return ret;
160}
161
162int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
163			    struct btrfs_block_rsv *dst, u64 num_bytes,
164			    bool update_size)
165{
166	int ret;
167
168	ret = btrfs_block_rsv_use_bytes(src, num_bytes);
169	if (ret)
170		return ret;
171
172	btrfs_block_rsv_add_bytes(dst, num_bytes, update_size);
173	return 0;
174}
175
176void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type)
177{
178	memset(rsv, 0, sizeof(*rsv));
179	spin_lock_init(&rsv->lock);
180	rsv->type = type;
181}
182
183void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
184				   struct btrfs_block_rsv *rsv,
185				   enum btrfs_rsv_type type)
186{
187	btrfs_init_block_rsv(rsv, type);
188	rsv->space_info = btrfs_find_space_info(fs_info,
189					    BTRFS_BLOCK_GROUP_METADATA);
190}
191
192struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
193					      enum btrfs_rsv_type type)
194{
195	struct btrfs_block_rsv *block_rsv;
196
197	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
198	if (!block_rsv)
199		return NULL;
200
201	btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
202	return block_rsv;
203}
204
205void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
206			  struct btrfs_block_rsv *rsv)
207{
208	if (!rsv)
209		return;
210	btrfs_block_rsv_release(fs_info, rsv, (u64)-1, NULL);
211	kfree(rsv);
212}
213
214int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
215			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
216			enum btrfs_reserve_flush_enum flush)
217{
218	int ret;
219
220	if (num_bytes == 0)
221		return 0;
222
223	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
224					   num_bytes, flush);
225	if (!ret)
226		btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
227
228	return ret;
229}
230
231int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent)
232{
233	u64 num_bytes = 0;
234	int ret = -ENOSPC;
235
 
 
 
236	spin_lock(&block_rsv->lock);
237	num_bytes = mult_perc(block_rsv->size, min_percent);
238	if (block_rsv->reserved >= num_bytes)
239		ret = 0;
240	spin_unlock(&block_rsv->lock);
241
242	return ret;
243}
244
245int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
246			   struct btrfs_block_rsv *block_rsv, u64 num_bytes,
247			   enum btrfs_reserve_flush_enum flush)
248{
 
249	int ret = -ENOSPC;
250
251	if (!block_rsv)
252		return 0;
253
254	spin_lock(&block_rsv->lock);
 
255	if (block_rsv->reserved >= num_bytes)
256		ret = 0;
257	else
258		num_bytes -= block_rsv->reserved;
259	spin_unlock(&block_rsv->lock);
260
261	if (!ret)
262		return 0;
263
264	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
265					   num_bytes, flush);
266	if (!ret) {
267		btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
268		return 0;
269	}
270
271	return ret;
272}
273
274u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
275			    struct btrfs_block_rsv *block_rsv, u64 num_bytes,
276			    u64 *qgroup_to_release)
277{
278	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
279	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
280	struct btrfs_block_rsv *target = NULL;
281
282	/*
283	 * If we are a delayed block reserve then push to the global rsv,
284	 * otherwise dump into the global delayed reserve if it is not full.
285	 */
286	if (block_rsv->type == BTRFS_BLOCK_RSV_DELOPS)
287		target = global_rsv;
288	else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv))
289		target = delayed_rsv;
290
291	if (target && block_rsv->space_info != target->space_info)
292		target = NULL;
293
294	return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
295				       qgroup_to_release);
296}
297
298int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
299{
300	int ret = -ENOSPC;
301
302	spin_lock(&block_rsv->lock);
303	if (block_rsv->reserved >= num_bytes) {
304		block_rsv->reserved -= num_bytes;
305		if (block_rsv->reserved < block_rsv->size)
306			block_rsv->full = false;
307		ret = 0;
308	}
309	spin_unlock(&block_rsv->lock);
310	return ret;
311}
312
313void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
314			       u64 num_bytes, bool update_size)
315{
316	spin_lock(&block_rsv->lock);
317	block_rsv->reserved += num_bytes;
318	if (update_size)
319		block_rsv->size += num_bytes;
320	else if (block_rsv->reserved >= block_rsv->size)
321		block_rsv->full = true;
322	spin_unlock(&block_rsv->lock);
323}
324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
326{
327	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
328	struct btrfs_space_info *sinfo = block_rsv->space_info;
329	struct btrfs_root *root, *tmp;
330	u64 num_bytes = btrfs_root_used(&fs_info->tree_root->root_item);
331	unsigned int min_items = 1;
332
333	/*
334	 * The global block rsv is based on the size of the extent tree, the
335	 * checksum tree and the root tree.  If the fs is empty we want to set
336	 * it to a minimal amount for safety.
337	 *
338	 * We also are going to need to modify the minimum of the tree root and
339	 * any global roots we could touch.
340	 */
341	read_lock(&fs_info->global_root_lock);
342	rbtree_postorder_for_each_entry_safe(root, tmp, &fs_info->global_root_tree,
343					     rb_node) {
344		if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID ||
345		    btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID ||
346		    btrfs_root_id(root) == BTRFS_FREE_SPACE_TREE_OBJECTID) {
347			num_bytes += btrfs_root_used(&root->root_item);
348			min_items++;
349		}
350	}
351	read_unlock(&fs_info->global_root_lock);
352
353	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
354		num_bytes += btrfs_root_used(&fs_info->block_group_root->root_item);
355		min_items++;
356	}
357
358	if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
359		num_bytes += btrfs_root_used(&fs_info->stripe_root->root_item);
360		min_items++;
361	}
 
362
363	/*
364	 * But we also want to reserve enough space so we can do the fallback
365	 * global reserve for an unlink, which is an additional
366	 * BTRFS_UNLINK_METADATA_UNITS items.
367	 *
368	 * But we also need space for the delayed ref updates from the unlink,
369	 * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for
370	 * each unlink metadata item.
371	 */
372	min_items += BTRFS_UNLINK_METADATA_UNITS;
373
374	num_bytes = max_t(u64, num_bytes,
375			  btrfs_calc_insert_metadata_size(fs_info, min_items) +
376			  btrfs_calc_delayed_ref_bytes(fs_info,
377					       BTRFS_UNLINK_METADATA_UNITS));
378
379	spin_lock(&sinfo->lock);
380	spin_lock(&block_rsv->lock);
381
382	block_rsv->size = min_t(u64, num_bytes, SZ_512M);
383
384	if (block_rsv->reserved < block_rsv->size) {
385		num_bytes = block_rsv->size - block_rsv->reserved;
386		btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
387						      num_bytes);
388		block_rsv->reserved = block_rsv->size;
389	} else if (block_rsv->reserved > block_rsv->size) {
390		num_bytes = block_rsv->reserved - block_rsv->size;
391		btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
392						      -num_bytes);
393		block_rsv->reserved = block_rsv->size;
394		btrfs_try_granting_tickets(fs_info, sinfo);
395	}
396
397	block_rsv->full = (block_rsv->reserved == block_rsv->size);
 
 
 
398
399	if (block_rsv->size >= sinfo->total_bytes)
400		sinfo->force_alloc = CHUNK_ALLOC_FORCE;
401	spin_unlock(&block_rsv->lock);
402	spin_unlock(&sinfo->lock);
403}
404
405void btrfs_init_root_block_rsv(struct btrfs_root *root)
406{
407	struct btrfs_fs_info *fs_info = root->fs_info;
408
409	switch (btrfs_root_id(root)) {
410	case BTRFS_CSUM_TREE_OBJECTID:
411	case BTRFS_EXTENT_TREE_OBJECTID:
412	case BTRFS_FREE_SPACE_TREE_OBJECTID:
413	case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
414	case BTRFS_RAID_STRIPE_TREE_OBJECTID:
415		root->block_rsv = &fs_info->delayed_refs_rsv;
416		break;
417	case BTRFS_ROOT_TREE_OBJECTID:
418	case BTRFS_DEV_TREE_OBJECTID:
419	case BTRFS_QUOTA_TREE_OBJECTID:
420		root->block_rsv = &fs_info->global_block_rsv;
421		break;
422	case BTRFS_CHUNK_TREE_OBJECTID:
423		root->block_rsv = &fs_info->chunk_block_rsv;
424		break;
425	default:
426		root->block_rsv = NULL;
427		break;
428	}
429}
430
431void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
432{
433	struct btrfs_space_info *space_info;
434
435	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
436	fs_info->chunk_block_rsv.space_info = space_info;
437
438	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
439	fs_info->global_block_rsv.space_info = space_info;
440	fs_info->trans_block_rsv.space_info = space_info;
441	fs_info->empty_block_rsv.space_info = space_info;
442	fs_info->delayed_block_rsv.space_info = space_info;
443	fs_info->delayed_refs_rsv.space_info = space_info;
444
 
 
 
 
 
 
 
 
445	btrfs_update_global_block_rsv(fs_info);
446}
447
448void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info)
449{
450	btrfs_block_rsv_release(fs_info, &fs_info->global_block_rsv, (u64)-1,
451				NULL);
452	WARN_ON(fs_info->trans_block_rsv.size > 0);
453	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
454	WARN_ON(fs_info->chunk_block_rsv.size > 0);
455	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
456	WARN_ON(fs_info->delayed_block_rsv.size > 0);
457	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
458	WARN_ON(fs_info->delayed_refs_rsv.reserved > 0);
459	WARN_ON(fs_info->delayed_refs_rsv.size > 0);
460}
461
462static struct btrfs_block_rsv *get_block_rsv(
463					const struct btrfs_trans_handle *trans,
464					const struct btrfs_root *root)
465{
466	struct btrfs_fs_info *fs_info = root->fs_info;
467	struct btrfs_block_rsv *block_rsv = NULL;
468
469	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
470	    (root == fs_info->uuid_root) ||
471	    (trans->adding_csums && btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID))
472		block_rsv = trans->block_rsv;
473
474	if (!block_rsv)
475		block_rsv = root->block_rsv;
476
477	if (!block_rsv)
478		block_rsv = &fs_info->empty_block_rsv;
479
480	return block_rsv;
481}
482
483struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
484					    struct btrfs_root *root,
485					    u32 blocksize)
486{
487	struct btrfs_fs_info *fs_info = root->fs_info;
488	struct btrfs_block_rsv *block_rsv;
489	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
490	int ret;
491	bool global_updated = false;
492
493	block_rsv = get_block_rsv(trans, root);
494
495	if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
496		goto try_reserve;
497again:
498	ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
499	if (!ret)
500		return block_rsv;
501
502	if (block_rsv->failfast)
503		return ERR_PTR(ret);
504
505	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
506		global_updated = true;
507		btrfs_update_global_block_rsv(fs_info);
508		goto again;
509	}
510
511	/*
512	 * The global reserve still exists to save us from ourselves, so don't
513	 * warn_on if we are short on our delayed refs reserve.
514	 */
515	if (block_rsv->type != BTRFS_BLOCK_RSV_DELREFS &&
516	    btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
517		static DEFINE_RATELIMIT_STATE(_rs,
518				DEFAULT_RATELIMIT_INTERVAL * 10,
519				/*DEFAULT_RATELIMIT_BURST*/ 1);
520		if (__ratelimit(&_rs))
521			WARN(1, KERN_DEBUG
522				"BTRFS: block rsv %d returned %d\n",
523				block_rsv->type, ret);
524	}
525try_reserve:
526	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
527					   blocksize, BTRFS_RESERVE_NO_FLUSH);
528	if (!ret)
529		return block_rsv;
530	/*
531	 * If we couldn't reserve metadata bytes try and use some from
532	 * the global reserve if its space type is the same as the global
533	 * reservation.
534	 */
535	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
536	    block_rsv->space_info == global_rsv->space_info) {
537		ret = btrfs_block_rsv_use_bytes(global_rsv, blocksize);
538		if (!ret)
539			return global_rsv;
540	}
541
542	/*
543	 * All hope is lost, but of course our reservations are overly
544	 * pessimistic, so instead of possibly having an ENOSPC abort here, try
545	 * one last time to force a reservation if there's enough actual space
546	 * on disk to make the reservation.
547	 */
548	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info, blocksize,
549					   BTRFS_RESERVE_FLUSH_EMERGENCY);
550	if (!ret)
551		return block_rsv;
552
553	return ERR_PTR(ret);
554}
555
556int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info,
557				       struct btrfs_block_rsv *rsv)
558{
559	u64 needed_bytes;
560	int ret;
561
562	/* 1 for slack space, 1 for updating the inode */
563	needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
564		btrfs_calc_metadata_size(fs_info, 1);
565
566	spin_lock(&rsv->lock);
567	if (rsv->reserved < needed_bytes)
568		ret = -ENOSPC;
569	else
570		ret = 0;
571	spin_unlock(&rsv->lock);
572	return ret;
573}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include "misc.h"
  4#include "ctree.h"
  5#include "block-rsv.h"
  6#include "space-info.h"
  7#include "transaction.h"
  8#include "block-group.h"
 
 
  9
 10/*
 11 * HOW DO BLOCK RESERVES WORK
 12 *
 13 *   Think of block_rsv's as buckets for logically grouped metadata
 14 *   reservations.  Each block_rsv has a ->size and a ->reserved.  ->size is
 15 *   how large we want our block rsv to be, ->reserved is how much space is
 16 *   currently reserved for this block reserve.
 17 *
 18 *   ->failfast exists for the truncate case, and is described below.
 19 *
 20 * NORMAL OPERATION
 21 *
 22 *   -> Reserve
 23 *     Entrance: btrfs_block_rsv_add, btrfs_block_rsv_refill
 24 *
 25 *     We call into btrfs_reserve_metadata_bytes() with our bytes, which is
 26 *     accounted for in space_info->bytes_may_use, and then add the bytes to
 27 *     ->reserved, and ->size in the case of btrfs_block_rsv_add.
 28 *
 29 *     ->size is an over-estimation of how much we may use for a particular
 30 *     operation.
 31 *
 32 *   -> Use
 33 *     Entrance: btrfs_use_block_rsv
 34 *
 35 *     When we do a btrfs_alloc_tree_block() we call into btrfs_use_block_rsv()
 36 *     to determine the appropriate block_rsv to use, and then verify that
 37 *     ->reserved has enough space for our tree block allocation.  Once
 38 *     successful we subtract fs_info->nodesize from ->reserved.
 39 *
 40 *   -> Finish
 41 *     Entrance: btrfs_block_rsv_release
 42 *
 43 *     We are finished with our operation, subtract our individual reservation
 44 *     from ->size, and then subtract ->size from ->reserved and free up the
 45 *     excess if there is any.
 46 *
 47 *     There is some logic here to refill the delayed refs rsv or the global rsv
 48 *     as needed, otherwise the excess is subtracted from
 49 *     space_info->bytes_may_use.
 50 *
 51 * TYPES OF BLOCK RESERVES
 52 *
 53 * BLOCK_RSV_TRANS, BLOCK_RSV_DELOPS, BLOCK_RSV_CHUNK
 54 *   These behave normally, as described above, just within the confines of the
 55 *   lifetime of their particular operation (transaction for the whole trans
 56 *   handle lifetime, for example).
 57 *
 58 * BLOCK_RSV_GLOBAL
 59 *   It is impossible to properly account for all the space that may be required
 60 *   to make our extent tree updates.  This block reserve acts as an overflow
 61 *   buffer in case our delayed refs reserve does not reserve enough space to
 62 *   update the extent tree.
 63 *
 64 *   We can steal from this in some cases as well, notably on evict() or
 65 *   truncate() in order to help users recover from ENOSPC conditions.
 66 *
 67 * BLOCK_RSV_DELALLOC
 68 *   The individual item sizes are determined by the per-inode size
 69 *   calculations, which are described with the delalloc code.  This is pretty
 70 *   straightforward, it's just the calculation of ->size encodes a lot of
 71 *   different items, and thus it gets used when updating inodes, inserting file
 72 *   extents, and inserting checksums.
 73 *
 74 * BLOCK_RSV_DELREFS
 75 *   We keep a running tally of how many delayed refs we have on the system.
 76 *   We assume each one of these delayed refs are going to use a full
 77 *   reservation.  We use the transaction items and pre-reserve space for every
 78 *   operation, and use this reservation to refill any gap between ->size and
 79 *   ->reserved that may exist.
 80 *
 81 *   From there it's straightforward, removing a delayed ref means we remove its
 82 *   count from ->size and free up reservations as necessary.  Since this is
 83 *   the most dynamic block reserve in the system, we will try to refill this
 84 *   block reserve first with any excess returned by any other block reserve.
 85 *
 86 * BLOCK_RSV_EMPTY
 87 *   This is the fallback block reserve to make us try to reserve space if we
 88 *   don't have a specific bucket for this allocation.  It is mostly used for
 89 *   updating the device tree and such, since that is a separate pool we're
 90 *   content to just reserve space from the space_info on demand.
 91 *
 92 * BLOCK_RSV_TEMP
 93 *   This is used by things like truncate and iput.  We will temporarily
 94 *   allocate a block reserve, set it to some size, and then truncate bytes
 95 *   until we have no space left.  With ->failfast set we'll simply return
 96 *   ENOSPC from btrfs_use_block_rsv() to signal that we need to unwind and try
 97 *   to make a new reservation.  This is because these operations are
 98 *   unbounded, so we want to do as much work as we can, and then back off and
 99 *   re-reserve.
100 */
101
102static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
103				    struct btrfs_block_rsv *block_rsv,
104				    struct btrfs_block_rsv *dest, u64 num_bytes,
105				    u64 *qgroup_to_release_ret)
106{
107	struct btrfs_space_info *space_info = block_rsv->space_info;
108	u64 qgroup_to_release = 0;
109	u64 ret;
110
111	spin_lock(&block_rsv->lock);
112	if (num_bytes == (u64)-1) {
113		num_bytes = block_rsv->size;
114		qgroup_to_release = block_rsv->qgroup_rsv_size;
115	}
116	block_rsv->size -= num_bytes;
117	if (block_rsv->reserved >= block_rsv->size) {
118		num_bytes = block_rsv->reserved - block_rsv->size;
119		block_rsv->reserved = block_rsv->size;
120		block_rsv->full = 1;
121	} else {
122		num_bytes = 0;
123	}
124	if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
 
125		qgroup_to_release = block_rsv->qgroup_rsv_reserved -
126				    block_rsv->qgroup_rsv_size;
127		block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
128	} else {
129		qgroup_to_release = 0;
130	}
131	spin_unlock(&block_rsv->lock);
132
133	ret = num_bytes;
134	if (num_bytes > 0) {
135		if (dest) {
136			spin_lock(&dest->lock);
137			if (!dest->full) {
138				u64 bytes_to_add;
139
140				bytes_to_add = dest->size - dest->reserved;
141				bytes_to_add = min(num_bytes, bytes_to_add);
142				dest->reserved += bytes_to_add;
143				if (dest->reserved >= dest->size)
144					dest->full = 1;
145				num_bytes -= bytes_to_add;
146			}
147			spin_unlock(&dest->lock);
148		}
149		if (num_bytes)
150			btrfs_space_info_free_bytes_may_use(fs_info,
151							    space_info,
152							    num_bytes);
153	}
154	if (qgroup_to_release_ret)
155		*qgroup_to_release_ret = qgroup_to_release;
156	return ret;
157}
158
159int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
160			    struct btrfs_block_rsv *dst, u64 num_bytes,
161			    bool update_size)
162{
163	int ret;
164
165	ret = btrfs_block_rsv_use_bytes(src, num_bytes);
166	if (ret)
167		return ret;
168
169	btrfs_block_rsv_add_bytes(dst, num_bytes, update_size);
170	return 0;
171}
172
173void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
174{
175	memset(rsv, 0, sizeof(*rsv));
176	spin_lock_init(&rsv->lock);
177	rsv->type = type;
178}
179
180void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
181				   struct btrfs_block_rsv *rsv,
182				   unsigned short type)
183{
184	btrfs_init_block_rsv(rsv, type);
185	rsv->space_info = btrfs_find_space_info(fs_info,
186					    BTRFS_BLOCK_GROUP_METADATA);
187}
188
189struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
190					      unsigned short type)
191{
192	struct btrfs_block_rsv *block_rsv;
193
194	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
195	if (!block_rsv)
196		return NULL;
197
198	btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
199	return block_rsv;
200}
201
202void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
203			  struct btrfs_block_rsv *rsv)
204{
205	if (!rsv)
206		return;
207	btrfs_block_rsv_release(fs_info, rsv, (u64)-1, NULL);
208	kfree(rsv);
209}
210
211int btrfs_block_rsv_add(struct btrfs_root *root,
212			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
213			enum btrfs_reserve_flush_enum flush)
214{
215	int ret;
216
217	if (num_bytes == 0)
218		return 0;
219
220	ret = btrfs_reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
 
221	if (!ret)
222		btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
223
224	return ret;
225}
226
227int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
228{
229	u64 num_bytes = 0;
230	int ret = -ENOSPC;
231
232	if (!block_rsv)
233		return 0;
234
235	spin_lock(&block_rsv->lock);
236	num_bytes = div_factor(block_rsv->size, min_factor);
237	if (block_rsv->reserved >= num_bytes)
238		ret = 0;
239	spin_unlock(&block_rsv->lock);
240
241	return ret;
242}
243
244int btrfs_block_rsv_refill(struct btrfs_root *root,
245			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
246			   enum btrfs_reserve_flush_enum flush)
247{
248	u64 num_bytes = 0;
249	int ret = -ENOSPC;
250
251	if (!block_rsv)
252		return 0;
253
254	spin_lock(&block_rsv->lock);
255	num_bytes = min_reserved;
256	if (block_rsv->reserved >= num_bytes)
257		ret = 0;
258	else
259		num_bytes -= block_rsv->reserved;
260	spin_unlock(&block_rsv->lock);
261
262	if (!ret)
263		return 0;
264
265	ret = btrfs_reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
 
266	if (!ret) {
267		btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
268		return 0;
269	}
270
271	return ret;
272}
273
274u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
275			    struct btrfs_block_rsv *block_rsv, u64 num_bytes,
276			    u64 *qgroup_to_release)
277{
278	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
279	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
280	struct btrfs_block_rsv *target = NULL;
281
282	/*
283	 * If we are the delayed_rsv then push to the global rsv, otherwise dump
284	 * into the delayed rsv if it is not full.
285	 */
286	if (block_rsv == delayed_rsv)
287		target = global_rsv;
288	else if (block_rsv != global_rsv && !delayed_rsv->full)
289		target = delayed_rsv;
290
291	if (target && block_rsv->space_info != target->space_info)
292		target = NULL;
293
294	return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
295				       qgroup_to_release);
296}
297
298int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
299{
300	int ret = -ENOSPC;
301
302	spin_lock(&block_rsv->lock);
303	if (block_rsv->reserved >= num_bytes) {
304		block_rsv->reserved -= num_bytes;
305		if (block_rsv->reserved < block_rsv->size)
306			block_rsv->full = 0;
307		ret = 0;
308	}
309	spin_unlock(&block_rsv->lock);
310	return ret;
311}
312
313void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
314			       u64 num_bytes, bool update_size)
315{
316	spin_lock(&block_rsv->lock);
317	block_rsv->reserved += num_bytes;
318	if (update_size)
319		block_rsv->size += num_bytes;
320	else if (block_rsv->reserved >= block_rsv->size)
321		block_rsv->full = 1;
322	spin_unlock(&block_rsv->lock);
323}
324
325int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
326			     struct btrfs_block_rsv *dest, u64 num_bytes,
327			     int min_factor)
328{
329	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
330	u64 min_bytes;
331
332	if (global_rsv->space_info != dest->space_info)
333		return -ENOSPC;
334
335	spin_lock(&global_rsv->lock);
336	min_bytes = div_factor(global_rsv->size, min_factor);
337	if (global_rsv->reserved < min_bytes + num_bytes) {
338		spin_unlock(&global_rsv->lock);
339		return -ENOSPC;
340	}
341	global_rsv->reserved -= num_bytes;
342	if (global_rsv->reserved < global_rsv->size)
343		global_rsv->full = 0;
344	spin_unlock(&global_rsv->lock);
345
346	btrfs_block_rsv_add_bytes(dest, num_bytes, true);
347	return 0;
348}
349
350void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
351{
352	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
353	struct btrfs_space_info *sinfo = block_rsv->space_info;
354	u64 num_bytes;
355	unsigned min_items;
 
356
357	/*
358	 * The global block rsv is based on the size of the extent tree, the
359	 * checksum tree and the root tree.  If the fs is empty we want to set
360	 * it to a minimal amount for safety.
 
 
 
361	 */
362	num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
363		btrfs_root_used(&fs_info->csum_root->root_item) +
364		btrfs_root_used(&fs_info->tree_root->root_item);
 
 
 
 
 
 
 
 
 
 
 
 
 
365
366	/*
367	 * We at a minimum are going to modify the csum root, the tree root, and
368	 * the extent root.
369	 */
370	min_items = 3;
371
372	/*
373	 * But we also want to reserve enough space so we can do the fallback
374	 * global reserve for an unlink, which is an additional 5 items (see the
375	 * comment in __unlink_start_trans for what we're modifying.)
376	 *
377	 * But we also need space for the delayed ref updates from the unlink,
378	 * so its 10, 5 for the actual operation, and 5 for the delayed ref
379	 * updates.
380	 */
381	min_items += 10;
382
383	num_bytes = max_t(u64, num_bytes,
384			  btrfs_calc_insert_metadata_size(fs_info, min_items));
 
 
385
386	spin_lock(&sinfo->lock);
387	spin_lock(&block_rsv->lock);
388
389	block_rsv->size = min_t(u64, num_bytes, SZ_512M);
390
391	if (block_rsv->reserved < block_rsv->size) {
392		num_bytes = block_rsv->size - block_rsv->reserved;
393		btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
394						      num_bytes);
395		block_rsv->reserved = block_rsv->size;
396	} else if (block_rsv->reserved > block_rsv->size) {
397		num_bytes = block_rsv->reserved - block_rsv->size;
398		btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
399						      -num_bytes);
400		block_rsv->reserved = block_rsv->size;
401		btrfs_try_granting_tickets(fs_info, sinfo);
402	}
403
404	if (block_rsv->reserved == block_rsv->size)
405		block_rsv->full = 1;
406	else
407		block_rsv->full = 0;
408
409	if (block_rsv->size >= sinfo->total_bytes)
410		sinfo->force_alloc = CHUNK_ALLOC_FORCE;
411	spin_unlock(&block_rsv->lock);
412	spin_unlock(&sinfo->lock);
413}
414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
416{
417	struct btrfs_space_info *space_info;
418
419	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
420	fs_info->chunk_block_rsv.space_info = space_info;
421
422	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
423	fs_info->global_block_rsv.space_info = space_info;
424	fs_info->trans_block_rsv.space_info = space_info;
425	fs_info->empty_block_rsv.space_info = space_info;
426	fs_info->delayed_block_rsv.space_info = space_info;
427	fs_info->delayed_refs_rsv.space_info = space_info;
428
429	fs_info->extent_root->block_rsv = &fs_info->delayed_refs_rsv;
430	fs_info->csum_root->block_rsv = &fs_info->delayed_refs_rsv;
431	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
432	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
433	if (fs_info->quota_root)
434		fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
435	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
436
437	btrfs_update_global_block_rsv(fs_info);
438}
439
440void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info)
441{
442	btrfs_block_rsv_release(fs_info, &fs_info->global_block_rsv, (u64)-1,
443				NULL);
444	WARN_ON(fs_info->trans_block_rsv.size > 0);
445	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
446	WARN_ON(fs_info->chunk_block_rsv.size > 0);
447	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
448	WARN_ON(fs_info->delayed_block_rsv.size > 0);
449	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
450	WARN_ON(fs_info->delayed_refs_rsv.reserved > 0);
451	WARN_ON(fs_info->delayed_refs_rsv.size > 0);
452}
453
454static struct btrfs_block_rsv *get_block_rsv(
455					const struct btrfs_trans_handle *trans,
456					const struct btrfs_root *root)
457{
458	struct btrfs_fs_info *fs_info = root->fs_info;
459	struct btrfs_block_rsv *block_rsv = NULL;
460
461	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
462	    (root == fs_info->csum_root && trans->adding_csums) ||
463	    (root == fs_info->uuid_root))
464		block_rsv = trans->block_rsv;
465
466	if (!block_rsv)
467		block_rsv = root->block_rsv;
468
469	if (!block_rsv)
470		block_rsv = &fs_info->empty_block_rsv;
471
472	return block_rsv;
473}
474
475struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
476					    struct btrfs_root *root,
477					    u32 blocksize)
478{
479	struct btrfs_fs_info *fs_info = root->fs_info;
480	struct btrfs_block_rsv *block_rsv;
481	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
482	int ret;
483	bool global_updated = false;
484
485	block_rsv = get_block_rsv(trans, root);
486
487	if (unlikely(block_rsv->size == 0))
488		goto try_reserve;
489again:
490	ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
491	if (!ret)
492		return block_rsv;
493
494	if (block_rsv->failfast)
495		return ERR_PTR(ret);
496
497	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
498		global_updated = true;
499		btrfs_update_global_block_rsv(fs_info);
500		goto again;
501	}
502
503	/*
504	 * The global reserve still exists to save us from ourselves, so don't
505	 * warn_on if we are short on our delayed refs reserve.
506	 */
507	if (block_rsv->type != BTRFS_BLOCK_RSV_DELREFS &&
508	    btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
509		static DEFINE_RATELIMIT_STATE(_rs,
510				DEFAULT_RATELIMIT_INTERVAL * 10,
511				/*DEFAULT_RATELIMIT_BURST*/ 1);
512		if (__ratelimit(&_rs))
513			WARN(1, KERN_DEBUG
514				"BTRFS: block rsv returned %d\n", ret);
 
515	}
516try_reserve:
517	ret = btrfs_reserve_metadata_bytes(root, block_rsv, blocksize,
518					   BTRFS_RESERVE_NO_FLUSH);
519	if (!ret)
520		return block_rsv;
521	/*
522	 * If we couldn't reserve metadata bytes try and use some from
523	 * the global reserve if its space type is the same as the global
524	 * reservation.
525	 */
526	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
527	    block_rsv->space_info == global_rsv->space_info) {
528		ret = btrfs_block_rsv_use_bytes(global_rsv, blocksize);
529		if (!ret)
530			return global_rsv;
531	}
 
 
 
 
 
 
 
 
 
 
 
 
532	return ERR_PTR(ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533}