Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/pagemap.h>
   8#include <linux/writeback.h>
   9#include <linux/blkdev.h>
  10#include <linux/rbtree.h>
  11#include <linux/slab.h>
  12#include <linux/workqueue.h>
  13#include <linux/btrfs.h>
  14#include <linux/sched/mm.h>
  15
  16#include "ctree.h"
  17#include "transaction.h"
  18#include "disk-io.h"
  19#include "locking.h"
  20#include "ulist.h"
  21#include "backref.h"
  22#include "extent_io.h"
  23#include "qgroup.h"
  24#include "block-group.h"
  25#include "sysfs.h"
  26#include "tree-mod-log.h"
  27#include "fs.h"
  28#include "accessors.h"
  29#include "extent-tree.h"
  30#include "root-tree.h"
  31#include "tree-checker.h"
  32
  33enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
  34{
  35	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
  36		return BTRFS_QGROUP_MODE_DISABLED;
  37	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
  38		return BTRFS_QGROUP_MODE_SIMPLE;
  39	return BTRFS_QGROUP_MODE_FULL;
  40}
  41
  42bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
  43{
  44	return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
  45}
  46
  47bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
  48{
  49	return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
  50}
  51
  52/*
  53 * Helpers to access qgroup reservation
  54 *
  55 * Callers should ensure the lock context and type are valid
  56 */
  57
  58static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
  59{
  60	u64 ret = 0;
  61	int i;
  62
  63	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
  64		ret += qgroup->rsv.values[i];
  65
  66	return ret;
  67}
  68
  69#ifdef CONFIG_BTRFS_DEBUG
  70static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
  71{
  72	if (type == BTRFS_QGROUP_RSV_DATA)
  73		return "data";
  74	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
  75		return "meta_pertrans";
  76	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
  77		return "meta_prealloc";
  78	return NULL;
  79}
  80#endif
  81
  82static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
  83			   struct btrfs_qgroup *qgroup, u64 num_bytes,
  84			   enum btrfs_qgroup_rsv_type type)
  85{
  86	trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
  87	qgroup->rsv.values[type] += num_bytes;
  88}
  89
  90static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
  91			       struct btrfs_qgroup *qgroup, u64 num_bytes,
  92			       enum btrfs_qgroup_rsv_type type)
  93{
  94	trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
  95	if (qgroup->rsv.values[type] >= num_bytes) {
  96		qgroup->rsv.values[type] -= num_bytes;
  97		return;
  98	}
  99#ifdef CONFIG_BTRFS_DEBUG
 100	WARN_RATELIMIT(1,
 101		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
 102		qgroup->qgroupid, qgroup_rsv_type_str(type),
 103		qgroup->rsv.values[type], num_bytes);
 104#endif
 105	qgroup->rsv.values[type] = 0;
 106}
 107
 108static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
 109				     struct btrfs_qgroup *dest,
 110				     const struct btrfs_qgroup *src)
 111{
 112	int i;
 113
 114	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
 115		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
 116}
 117
 118static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
 119					 struct btrfs_qgroup *dest,
 120					 const struct btrfs_qgroup *src)
 121{
 122	int i;
 123
 124	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
 125		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
 126}
 127
 128static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
 129					   int mod)
 130{
 131	if (qg->old_refcnt < seq)
 132		qg->old_refcnt = seq;
 133	qg->old_refcnt += mod;
 134}
 135
 136static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
 137					   int mod)
 138{
 139	if (qg->new_refcnt < seq)
 140		qg->new_refcnt = seq;
 141	qg->new_refcnt += mod;
 142}
 143
 144static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
 145{
 146	if (qg->old_refcnt < seq)
 147		return 0;
 148	return qg->old_refcnt - seq;
 149}
 150
 151static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
 152{
 153	if (qg->new_refcnt < seq)
 154		return 0;
 155	return qg->new_refcnt - seq;
 156}
 157
 
 
 
 
 
 
 
 
 
 
 158static int
 159qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
 160		   int init_flags);
 161static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
 162
 163/* must be called with qgroup_ioctl_lock held */
 164static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
 165					   u64 qgroupid)
 166{
 167	struct rb_node *n = fs_info->qgroup_tree.rb_node;
 168	struct btrfs_qgroup *qgroup;
 169
 170	while (n) {
 171		qgroup = rb_entry(n, struct btrfs_qgroup, node);
 172		if (qgroup->qgroupid < qgroupid)
 173			n = n->rb_left;
 174		else if (qgroup->qgroupid > qgroupid)
 175			n = n->rb_right;
 176		else
 177			return qgroup;
 178	}
 179	return NULL;
 180}
 181
 182/*
 183 * Add qgroup to the filesystem's qgroup tree.
 184 *
 185 * Must be called with qgroup_lock held and @prealloc preallocated.
 186 *
 187 * The control on the lifespan of @prealloc would be transferred to this
 188 * function, thus caller should no longer touch @prealloc.
 189 */
 190static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
 191					  struct btrfs_qgroup *prealloc,
 192					  u64 qgroupid)
 193{
 194	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
 195	struct rb_node *parent = NULL;
 196	struct btrfs_qgroup *qgroup;
 197
 198	/* Caller must have pre-allocated @prealloc. */
 199	ASSERT(prealloc);
 200
 201	while (*p) {
 202		parent = *p;
 203		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
 204
 205		if (qgroup->qgroupid < qgroupid) {
 206			p = &(*p)->rb_left;
 207		} else if (qgroup->qgroupid > qgroupid) {
 208			p = &(*p)->rb_right;
 209		} else {
 210			kfree(prealloc);
 211			return qgroup;
 212		}
 213	}
 214
 215	qgroup = prealloc;
 216	qgroup->qgroupid = qgroupid;
 217	INIT_LIST_HEAD(&qgroup->groups);
 218	INIT_LIST_HEAD(&qgroup->members);
 219	INIT_LIST_HEAD(&qgroup->dirty);
 220	INIT_LIST_HEAD(&qgroup->iterator);
 221	INIT_LIST_HEAD(&qgroup->nested_iterator);
 222
 223	rb_link_node(&qgroup->node, parent, p);
 224	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
 225
 226	return qgroup;
 227}
 228
 229static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
 
 230{
 231	struct btrfs_qgroup_list *list;
 232
 233	list_del(&qgroup->dirty);
 234	while (!list_empty(&qgroup->groups)) {
 235		list = list_first_entry(&qgroup->groups,
 236					struct btrfs_qgroup_list, next_group);
 237		list_del(&list->next_group);
 238		list_del(&list->next_member);
 239		kfree(list);
 240	}
 241
 242	while (!list_empty(&qgroup->members)) {
 243		list = list_first_entry(&qgroup->members,
 244					struct btrfs_qgroup_list, next_member);
 245		list_del(&list->next_group);
 246		list_del(&list->next_member);
 247		kfree(list);
 248	}
 249}
 250
 251/* must be called with qgroup_lock held */
 252static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
 253{
 254	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
 255
 256	if (!qgroup)
 257		return -ENOENT;
 258
 259	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
 260	__del_qgroup_rb(qgroup);
 261	return 0;
 262}
 263
 264/*
 265 * Add relation specified by two qgroups.
 266 *
 267 * Must be called with qgroup_lock held, the ownership of @prealloc is
 268 * transferred to this function and caller should not touch it anymore.
 269 *
 270 * Return: 0        on success
 271 *         -ENOENT  if one of the qgroups is NULL
 272 *         <0       other errors
 273 */
 274static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
 275			     struct btrfs_qgroup *member,
 276			     struct btrfs_qgroup *parent)
 277{
 278	if (!member || !parent) {
 279		kfree(prealloc);
 280		return -ENOENT;
 281	}
 282
 283	prealloc->group = parent;
 284	prealloc->member = member;
 285	list_add_tail(&prealloc->next_group, &member->groups);
 286	list_add_tail(&prealloc->next_member, &parent->members);
 287
 288	return 0;
 289}
 290
 291/*
 292 * Add relation specified by two qgroup ids.
 293 *
 294 * Must be called with qgroup_lock held.
 295 *
 296 * Return: 0        on success
 297 *         -ENOENT  if one of the ids does not exist
 298 *         <0       other errors
 299 */
 300static int add_relation_rb(struct btrfs_fs_info *fs_info,
 301			   struct btrfs_qgroup_list *prealloc,
 302			   u64 memberid, u64 parentid)
 303{
 304	struct btrfs_qgroup *member;
 305	struct btrfs_qgroup *parent;
 306
 307	member = find_qgroup_rb(fs_info, memberid);
 308	parent = find_qgroup_rb(fs_info, parentid);
 309
 310	return __add_relation_rb(prealloc, member, parent);
 311}
 312
 313/* Must be called with qgroup_lock held */
 314static int del_relation_rb(struct btrfs_fs_info *fs_info,
 315			   u64 memberid, u64 parentid)
 316{
 317	struct btrfs_qgroup *member;
 318	struct btrfs_qgroup *parent;
 319	struct btrfs_qgroup_list *list;
 320
 321	member = find_qgroup_rb(fs_info, memberid);
 322	parent = find_qgroup_rb(fs_info, parentid);
 323	if (!member || !parent)
 324		return -ENOENT;
 325
 326	list_for_each_entry(list, &member->groups, next_group) {
 327		if (list->group == parent) {
 328			list_del(&list->next_group);
 329			list_del(&list->next_member);
 330			kfree(list);
 331			return 0;
 332		}
 333	}
 334	return -ENOENT;
 335}
 336
 337#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 338int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
 339			       u64 rfer, u64 excl)
 340{
 341	struct btrfs_qgroup *qgroup;
 342
 343	qgroup = find_qgroup_rb(fs_info, qgroupid);
 344	if (!qgroup)
 345		return -EINVAL;
 346	if (qgroup->rfer != rfer || qgroup->excl != excl)
 347		return -EINVAL;
 348	return 0;
 349}
 350#endif
 351
 352static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
 353{
 354	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
 355		return;
 356	fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
 357				  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
 358				  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
 359}
 360
 361static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
 362				   struct extent_buffer *leaf, int slot,
 363				   struct btrfs_qgroup_status_item *ptr)
 364{
 365	ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
 366	ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
 367	fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
 368}
 369
 370/*
 371 * The full config is read in one go, only called from open_ctree()
 372 * It doesn't use any locking, as at this point we're still single-threaded
 373 */
 374int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
 375{
 376	struct btrfs_key key;
 377	struct btrfs_key found_key;
 378	struct btrfs_root *quota_root = fs_info->quota_root;
 379	struct btrfs_path *path = NULL;
 380	struct extent_buffer *l;
 381	int slot;
 382	int ret = 0;
 383	u64 flags = 0;
 384	u64 rescan_progress = 0;
 385
 386	if (!fs_info->quota_root)
 387		return 0;
 388
 389	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
 390	if (!fs_info->qgroup_ulist) {
 391		ret = -ENOMEM;
 392		goto out;
 393	}
 394
 395	path = btrfs_alloc_path();
 396	if (!path) {
 397		ret = -ENOMEM;
 398		goto out;
 399	}
 400
 401	ret = btrfs_sysfs_add_qgroups(fs_info);
 402	if (ret < 0)
 403		goto out;
 404	/* default this to quota off, in case no status key is found */
 405	fs_info->qgroup_flags = 0;
 406
 407	/*
 408	 * pass 1: read status, all qgroup infos and limits
 409	 */
 410	key.objectid = 0;
 411	key.type = 0;
 412	key.offset = 0;
 413	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
 414	if (ret)
 415		goto out;
 416
 417	while (1) {
 418		struct btrfs_qgroup *qgroup;
 419
 420		slot = path->slots[0];
 421		l = path->nodes[0];
 422		btrfs_item_key_to_cpu(l, &found_key, slot);
 423
 424		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
 425			struct btrfs_qgroup_status_item *ptr;
 426
 427			ptr = btrfs_item_ptr(l, slot,
 428					     struct btrfs_qgroup_status_item);
 429
 430			if (btrfs_qgroup_status_version(l, ptr) !=
 431			    BTRFS_QGROUP_STATUS_VERSION) {
 432				btrfs_err(fs_info,
 433				 "old qgroup version, quota disabled");
 434				goto out;
 435			}
 436			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
 437			if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) {
 438				qgroup_read_enable_gen(fs_info, l, slot, ptr);
 439			} else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) {
 440				qgroup_mark_inconsistent(fs_info);
 441				btrfs_err(fs_info,
 442					"qgroup generation mismatch, marked as inconsistent");
 443			}
 444			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
 445			goto next1;
 446		}
 447
 448		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
 449		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
 450			goto next1;
 451
 452		qgroup = find_qgroup_rb(fs_info, found_key.offset);
 453		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
 454		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
 455			btrfs_err(fs_info, "inconsistent qgroup config");
 456			qgroup_mark_inconsistent(fs_info);
 457		}
 458		if (!qgroup) {
 459			struct btrfs_qgroup *prealloc;
 460			struct btrfs_root *tree_root = fs_info->tree_root;
 461
 462			prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
 463			if (!prealloc) {
 464				ret = -ENOMEM;
 465				goto out;
 466			}
 467			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
 468			/*
 469			 * If a qgroup exists for a subvolume ID, it is possible
 470			 * that subvolume has been deleted, in which case
 471			 * reusing that ID would lead to incorrect accounting.
 472			 *
 473			 * Ensure that we skip any such subvol ids.
 474			 *
 475			 * We don't need to lock because this is only called
 476			 * during mount before we start doing things like creating
 477			 * subvolumes.
 478			 */
 479			if (is_fstree(qgroup->qgroupid) &&
 480			    qgroup->qgroupid > tree_root->free_objectid)
 481				/*
 482				 * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
 483				 * as it will get checked on the next call to
 484				 * btrfs_get_free_objectid.
 485				 */
 486				tree_root->free_objectid = qgroup->qgroupid + 1;
 487		}
 488		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
 489		if (ret < 0)
 490			goto out;
 491
 492		switch (found_key.type) {
 493		case BTRFS_QGROUP_INFO_KEY: {
 494			struct btrfs_qgroup_info_item *ptr;
 495
 496			ptr = btrfs_item_ptr(l, slot,
 497					     struct btrfs_qgroup_info_item);
 498			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
 499			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
 500			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
 501			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
 502			/* generation currently unused */
 503			break;
 504		}
 505		case BTRFS_QGROUP_LIMIT_KEY: {
 506			struct btrfs_qgroup_limit_item *ptr;
 507
 508			ptr = btrfs_item_ptr(l, slot,
 509					     struct btrfs_qgroup_limit_item);
 510			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
 511			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
 512			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
 513			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
 514			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
 515			break;
 516		}
 517		}
 518next1:
 519		ret = btrfs_next_item(quota_root, path);
 520		if (ret < 0)
 521			goto out;
 522		if (ret)
 523			break;
 524	}
 525	btrfs_release_path(path);
 526
 527	/*
 528	 * pass 2: read all qgroup relations
 529	 */
 530	key.objectid = 0;
 531	key.type = BTRFS_QGROUP_RELATION_KEY;
 532	key.offset = 0;
 533	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
 534	if (ret)
 535		goto out;
 536	while (1) {
 537		struct btrfs_qgroup_list *list = NULL;
 538
 539		slot = path->slots[0];
 540		l = path->nodes[0];
 541		btrfs_item_key_to_cpu(l, &found_key, slot);
 542
 543		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
 544			goto next2;
 545
 546		if (found_key.objectid > found_key.offset) {
 547			/* parent <- member, not needed to build config */
 548			/* FIXME should we omit the key completely? */
 549			goto next2;
 550		}
 551
 552		list = kzalloc(sizeof(*list), GFP_KERNEL);
 553		if (!list) {
 554			ret = -ENOMEM;
 555			goto out;
 556		}
 557		ret = add_relation_rb(fs_info, list, found_key.objectid,
 558				      found_key.offset);
 559		list = NULL;
 560		if (ret == -ENOENT) {
 561			btrfs_warn(fs_info,
 562				"orphan qgroup relation 0x%llx->0x%llx",
 563				found_key.objectid, found_key.offset);
 564			ret = 0;	/* ignore the error */
 565		}
 566		if (ret)
 567			goto out;
 568next2:
 569		ret = btrfs_next_item(quota_root, path);
 570		if (ret < 0)
 571			goto out;
 572		if (ret)
 573			break;
 574	}
 575out:
 576	btrfs_free_path(path);
 577	fs_info->qgroup_flags |= flags;
 578	if (ret >= 0) {
 579		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
 580			set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
 581		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
 582			ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
 583	} else {
 584		ulist_free(fs_info->qgroup_ulist);
 585		fs_info->qgroup_ulist = NULL;
 586		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
 587		btrfs_sysfs_del_qgroups(fs_info);
 588	}
 589
 590	return ret < 0 ? ret : 0;
 591}
 592
 593/*
 594 * Called in close_ctree() when quota is still enabled.  This verifies we don't
 595 * leak some reserved space.
 596 *
 597 * Return false if no reserved space is left.
 598 * Return true if some reserved space is leaked.
 599 */
 600bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
 601{
 602	struct rb_node *node;
 603	bool ret = false;
 604
 605	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
 606		return ret;
 607	/*
 608	 * Since we're unmounting, there is no race and no need to grab qgroup
 609	 * lock.  And here we don't go post-order to provide a more user
 610	 * friendly sorted result.
 611	 */
 612	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
 613		struct btrfs_qgroup *qgroup;
 614		int i;
 615
 616		qgroup = rb_entry(node, struct btrfs_qgroup, node);
 617		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
 618			if (qgroup->rsv.values[i]) {
 619				ret = true;
 620				btrfs_warn(fs_info,
 621		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
 622				   btrfs_qgroup_level(qgroup->qgroupid),
 623				   btrfs_qgroup_subvolid(qgroup->qgroupid),
 624				   i, qgroup->rsv.values[i]);
 625			}
 626		}
 627	}
 628	return ret;
 629}
 630
 631/*
 632 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
 633 * first two are in single-threaded paths.And for the third one, we have set
 634 * quota_root to be null with qgroup_lock held before, so it is safe to clean
 635 * up the in-memory structures without qgroup_lock held.
 636 */
 637void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
 638{
 639	struct rb_node *n;
 640	struct btrfs_qgroup *qgroup;
 641
 642	while ((n = rb_first(&fs_info->qgroup_tree))) {
 643		qgroup = rb_entry(n, struct btrfs_qgroup, node);
 644		rb_erase(n, &fs_info->qgroup_tree);
 645		__del_qgroup_rb(qgroup);
 646		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
 647		kfree(qgroup);
 648	}
 649	/*
 650	 * We call btrfs_free_qgroup_config() when unmounting
 651	 * filesystem and disabling quota, so we set qgroup_ulist
 652	 * to be null here to avoid double free.
 653	 */
 654	ulist_free(fs_info->qgroup_ulist);
 655	fs_info->qgroup_ulist = NULL;
 656	btrfs_sysfs_del_qgroups(fs_info);
 657}
 658
 659static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
 660				    u64 dst)
 661{
 662	int ret;
 663	struct btrfs_root *quota_root = trans->fs_info->quota_root;
 664	struct btrfs_path *path;
 665	struct btrfs_key key;
 666
 667	path = btrfs_alloc_path();
 668	if (!path)
 669		return -ENOMEM;
 670
 671	key.objectid = src;
 672	key.type = BTRFS_QGROUP_RELATION_KEY;
 673	key.offset = dst;
 674
 675	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
 676
 677	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
 678
 679	btrfs_free_path(path);
 680	return ret;
 681}
 682
 683static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
 684				    u64 dst)
 685{
 686	int ret;
 687	struct btrfs_root *quota_root = trans->fs_info->quota_root;
 688	struct btrfs_path *path;
 689	struct btrfs_key key;
 690
 691	path = btrfs_alloc_path();
 692	if (!path)
 693		return -ENOMEM;
 694
 695	key.objectid = src;
 696	key.type = BTRFS_QGROUP_RELATION_KEY;
 697	key.offset = dst;
 698
 699	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
 700	if (ret < 0)
 701		goto out;
 702
 703	if (ret > 0) {
 704		ret = -ENOENT;
 705		goto out;
 706	}
 707
 708	ret = btrfs_del_item(trans, quota_root, path);
 709out:
 710	btrfs_free_path(path);
 711	return ret;
 712}
 713
 714static int add_qgroup_item(struct btrfs_trans_handle *trans,
 715			   struct btrfs_root *quota_root, u64 qgroupid)
 716{
 717	int ret;
 718	struct btrfs_path *path;
 719	struct btrfs_qgroup_info_item *qgroup_info;
 720	struct btrfs_qgroup_limit_item *qgroup_limit;
 721	struct extent_buffer *leaf;
 722	struct btrfs_key key;
 723
 724	if (btrfs_is_testing(quota_root->fs_info))
 725		return 0;
 726
 727	path = btrfs_alloc_path();
 728	if (!path)
 729		return -ENOMEM;
 730
 731	key.objectid = 0;
 732	key.type = BTRFS_QGROUP_INFO_KEY;
 733	key.offset = qgroupid;
 734
 735	/*
 736	 * Avoid a transaction abort by catching -EEXIST here. In that
 737	 * case, we proceed by re-initializing the existing structure
 738	 * on disk.
 739	 */
 740
 741	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
 742				      sizeof(*qgroup_info));
 743	if (ret && ret != -EEXIST)
 744		goto out;
 745
 746	leaf = path->nodes[0];
 747	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
 748				 struct btrfs_qgroup_info_item);
 749	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
 750	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
 751	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
 752	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
 753	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
 754
 755	btrfs_mark_buffer_dirty(trans, leaf);
 756
 757	btrfs_release_path(path);
 758
 759	key.type = BTRFS_QGROUP_LIMIT_KEY;
 760	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
 761				      sizeof(*qgroup_limit));
 762	if (ret && ret != -EEXIST)
 763		goto out;
 764
 765	leaf = path->nodes[0];
 766	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
 767				  struct btrfs_qgroup_limit_item);
 768	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
 769	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
 770	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
 771	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
 772	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
 773
 774	btrfs_mark_buffer_dirty(trans, leaf);
 775
 776	ret = 0;
 777out:
 778	btrfs_free_path(path);
 779	return ret;
 780}
 781
 782static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
 783{
 784	int ret;
 785	struct btrfs_root *quota_root = trans->fs_info->quota_root;
 786	struct btrfs_path *path;
 787	struct btrfs_key key;
 788
 789	path = btrfs_alloc_path();
 790	if (!path)
 791		return -ENOMEM;
 792
 793	key.objectid = 0;
 794	key.type = BTRFS_QGROUP_INFO_KEY;
 795	key.offset = qgroupid;
 796	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
 797	if (ret < 0)
 798		goto out;
 799
 800	if (ret > 0) {
 801		ret = -ENOENT;
 802		goto out;
 803	}
 804
 805	ret = btrfs_del_item(trans, quota_root, path);
 806	if (ret)
 807		goto out;
 808
 809	btrfs_release_path(path);
 810
 811	key.type = BTRFS_QGROUP_LIMIT_KEY;
 812	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
 813	if (ret < 0)
 814		goto out;
 815
 816	if (ret > 0) {
 817		ret = -ENOENT;
 818		goto out;
 819	}
 820
 821	ret = btrfs_del_item(trans, quota_root, path);
 822
 823out:
 824	btrfs_free_path(path);
 825	return ret;
 826}
 827
 828static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
 829				    struct btrfs_qgroup *qgroup)
 830{
 831	struct btrfs_root *quota_root = trans->fs_info->quota_root;
 832	struct btrfs_path *path;
 833	struct btrfs_key key;
 834	struct extent_buffer *l;
 835	struct btrfs_qgroup_limit_item *qgroup_limit;
 836	int ret;
 837	int slot;
 838
 839	key.objectid = 0;
 840	key.type = BTRFS_QGROUP_LIMIT_KEY;
 841	key.offset = qgroup->qgroupid;
 842
 843	path = btrfs_alloc_path();
 844	if (!path)
 845		return -ENOMEM;
 846
 847	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
 848	if (ret > 0)
 849		ret = -ENOENT;
 850
 851	if (ret)
 852		goto out;
 853
 854	l = path->nodes[0];
 855	slot = path->slots[0];
 856	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
 857	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
 858	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
 859	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
 860	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
 861	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
 862
 863	btrfs_mark_buffer_dirty(trans, l);
 864
 865out:
 866	btrfs_free_path(path);
 867	return ret;
 868}
 869
 870static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
 871				   struct btrfs_qgroup *qgroup)
 872{
 873	struct btrfs_fs_info *fs_info = trans->fs_info;
 874	struct btrfs_root *quota_root = fs_info->quota_root;
 875	struct btrfs_path *path;
 876	struct btrfs_key key;
 877	struct extent_buffer *l;
 878	struct btrfs_qgroup_info_item *qgroup_info;
 879	int ret;
 880	int slot;
 881
 882	if (btrfs_is_testing(fs_info))
 883		return 0;
 884
 885	key.objectid = 0;
 886	key.type = BTRFS_QGROUP_INFO_KEY;
 887	key.offset = qgroup->qgroupid;
 888
 889	path = btrfs_alloc_path();
 890	if (!path)
 891		return -ENOMEM;
 892
 893	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
 894	if (ret > 0)
 895		ret = -ENOENT;
 896
 897	if (ret)
 898		goto out;
 899
 900	l = path->nodes[0];
 901	slot = path->slots[0];
 902	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
 903	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
 904	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
 905	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
 906	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
 907	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
 908
 909	btrfs_mark_buffer_dirty(trans, l);
 910
 911out:
 912	btrfs_free_path(path);
 913	return ret;
 914}
 915
 916static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
 917{
 918	struct btrfs_fs_info *fs_info = trans->fs_info;
 919	struct btrfs_root *quota_root = fs_info->quota_root;
 920	struct btrfs_path *path;
 921	struct btrfs_key key;
 922	struct extent_buffer *l;
 923	struct btrfs_qgroup_status_item *ptr;
 924	int ret;
 925	int slot;
 926
 927	key.objectid = 0;
 928	key.type = BTRFS_QGROUP_STATUS_KEY;
 929	key.offset = 0;
 930
 931	path = btrfs_alloc_path();
 932	if (!path)
 933		return -ENOMEM;
 934
 935	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
 936	if (ret > 0)
 937		ret = -ENOENT;
 938
 939	if (ret)
 940		goto out;
 941
 942	l = path->nodes[0];
 943	slot = path->slots[0];
 944	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
 945	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
 946				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
 947	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
 948	btrfs_set_qgroup_status_rescan(l, ptr,
 949				fs_info->qgroup_rescan_progress.objectid);
 950
 951	btrfs_mark_buffer_dirty(trans, l);
 952
 953out:
 954	btrfs_free_path(path);
 955	return ret;
 956}
 957
 958/*
 959 * called with qgroup_lock held
 960 */
 961static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
 962				  struct btrfs_root *root)
 963{
 964	struct btrfs_path *path;
 965	struct btrfs_key key;
 966	struct extent_buffer *leaf = NULL;
 967	int ret;
 968	int nr = 0;
 969
 970	path = btrfs_alloc_path();
 971	if (!path)
 972		return -ENOMEM;
 973
 974	key.objectid = 0;
 975	key.offset = 0;
 976	key.type = 0;
 977
 978	while (1) {
 979		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 980		if (ret < 0)
 981			goto out;
 982		leaf = path->nodes[0];
 983		nr = btrfs_header_nritems(leaf);
 984		if (!nr)
 985			break;
 986		/*
 987		 * delete the leaf one by one
 988		 * since the whole tree is going
 989		 * to be deleted.
 990		 */
 991		path->slots[0] = 0;
 992		ret = btrfs_del_items(trans, root, path, 0, nr);
 993		if (ret)
 994			goto out;
 995
 996		btrfs_release_path(path);
 997	}
 998	ret = 0;
 999out:
1000	btrfs_free_path(path);
1001	return ret;
1002}
1003
1004int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
1005		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
1006{
1007	struct btrfs_root *quota_root;
1008	struct btrfs_root *tree_root = fs_info->tree_root;
1009	struct btrfs_path *path = NULL;
1010	struct btrfs_qgroup_status_item *ptr;
1011	struct extent_buffer *leaf;
1012	struct btrfs_key key;
1013	struct btrfs_key found_key;
1014	struct btrfs_qgroup *qgroup = NULL;
1015	struct btrfs_qgroup *prealloc = NULL;
1016	struct btrfs_trans_handle *trans = NULL;
1017	struct ulist *ulist = NULL;
1018	const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
1019	int ret = 0;
1020	int slot;
1021
1022	/*
1023	 * We need to have subvol_sem write locked, to prevent races between
1024	 * concurrent tasks trying to enable quotas, because we will unlock
1025	 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
1026	 * and before setting BTRFS_FS_QUOTA_ENABLED.
1027	 */
1028	lockdep_assert_held_write(&fs_info->subvol_sem);
1029
1030	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
1031		btrfs_err(fs_info,
1032			  "qgroups are currently unsupported in extent tree v2");
1033		return -EINVAL;
1034	}
1035
1036	mutex_lock(&fs_info->qgroup_ioctl_lock);
1037	if (fs_info->quota_root)
1038		goto out;
1039
1040	ulist = ulist_alloc(GFP_KERNEL);
1041	if (!ulist) {
1042		ret = -ENOMEM;
1043		goto out;
1044	}
1045
1046	ret = btrfs_sysfs_add_qgroups(fs_info);
1047	if (ret < 0)
1048		goto out;
1049
1050	/*
1051	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
1052	 * avoid lock acquisition inversion problems (reported by lockdep) between
1053	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
1054	 * start a transaction.
1055	 * After we started the transaction lock qgroup_ioctl_lock again and
1056	 * check if someone else created the quota root in the meanwhile. If so,
1057	 * just return success and release the transaction handle.
1058	 *
1059	 * Also we don't need to worry about someone else calling
1060	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
1061	 * that function returns 0 (success) when the sysfs entries already exist.
1062	 */
1063	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1064
1065	/*
1066	 * 1 for quota root item
1067	 * 1 for BTRFS_QGROUP_STATUS item
1068	 *
1069	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
1070	 * per subvolume. However those are not currently reserved since it
1071	 * would be a lot of overkill.
1072	 */
1073	trans = btrfs_start_transaction(tree_root, 2);
1074
1075	mutex_lock(&fs_info->qgroup_ioctl_lock);
1076	if (IS_ERR(trans)) {
1077		ret = PTR_ERR(trans);
1078		trans = NULL;
1079		goto out;
1080	}
1081
1082	if (fs_info->quota_root)
1083		goto out;
1084
1085	fs_info->qgroup_ulist = ulist;
1086	ulist = NULL;
1087
1088	/*
1089	 * initially create the quota tree
1090	 */
1091	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1092	if (IS_ERR(quota_root)) {
1093		ret =  PTR_ERR(quota_root);
1094		btrfs_abort_transaction(trans, ret);
1095		goto out;
1096	}
1097
1098	path = btrfs_alloc_path();
1099	if (!path) {
1100		ret = -ENOMEM;
1101		btrfs_abort_transaction(trans, ret);
1102		goto out_free_root;
1103	}
1104
1105	key.objectid = 0;
1106	key.type = BTRFS_QGROUP_STATUS_KEY;
1107	key.offset = 0;
1108
1109	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1110				      sizeof(*ptr));
1111	if (ret) {
1112		btrfs_abort_transaction(trans, ret);
1113		goto out_free_path;
1114	}
1115
1116	leaf = path->nodes[0];
1117	ptr = btrfs_item_ptr(leaf, path->slots[0],
1118				 struct btrfs_qgroup_status_item);
1119	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1120	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1121	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
1122	if (simple) {
1123		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1124		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
1125		btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
1126	} else {
1127		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1128	}
1129	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
1130				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
1131	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1132
1133	btrfs_mark_buffer_dirty(trans, leaf);
1134
1135	key.objectid = 0;
1136	key.type = BTRFS_ROOT_REF_KEY;
1137	key.offset = 0;
1138
1139	btrfs_release_path(path);
1140	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1141	if (ret > 0)
1142		goto out_add_root;
1143	if (ret < 0) {
1144		btrfs_abort_transaction(trans, ret);
1145		goto out_free_path;
1146	}
1147
1148	while (1) {
1149		slot = path->slots[0];
1150		leaf = path->nodes[0];
1151		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1152
1153		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1154
1155			/* Release locks on tree_root before we access quota_root */
1156			btrfs_release_path(path);
1157
1158			/* We should not have a stray @prealloc pointer. */
1159			ASSERT(prealloc == NULL);
1160			prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1161			if (!prealloc) {
1162				ret = -ENOMEM;
1163				btrfs_abort_transaction(trans, ret);
1164				goto out_free_path;
1165			}
1166
1167			ret = add_qgroup_item(trans, quota_root,
1168					      found_key.offset);
1169			if (ret) {
1170				btrfs_abort_transaction(trans, ret);
1171				goto out_free_path;
1172			}
1173
1174			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
1175			prealloc = NULL;
1176			if (IS_ERR(qgroup)) {
1177				ret = PTR_ERR(qgroup);
1178				btrfs_abort_transaction(trans, ret);
1179				goto out_free_path;
1180			}
1181			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1182			if (ret < 0) {
1183				btrfs_abort_transaction(trans, ret);
1184				goto out_free_path;
1185			}
1186			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1187							 path, 1, 0);
1188			if (ret < 0) {
1189				btrfs_abort_transaction(trans, ret);
1190				goto out_free_path;
1191			}
1192			if (ret > 0) {
1193				/*
1194				 * Shouldn't happen, but in case it does we
1195				 * don't need to do the btrfs_next_item, just
1196				 * continue.
1197				 */
1198				continue;
1199			}
1200		}
1201		ret = btrfs_next_item(tree_root, path);
1202		if (ret < 0) {
1203			btrfs_abort_transaction(trans, ret);
1204			goto out_free_path;
1205		}
1206		if (ret)
1207			break;
1208	}
1209
1210out_add_root:
1211	btrfs_release_path(path);
1212	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1213	if (ret) {
1214		btrfs_abort_transaction(trans, ret);
1215		goto out_free_path;
1216	}
1217
1218	ASSERT(prealloc == NULL);
1219	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1220	if (!prealloc) {
1221		ret = -ENOMEM;
1222		goto out_free_path;
1223	}
1224	qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
1225	prealloc = NULL;
1226	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1227	if (ret < 0) {
1228		btrfs_abort_transaction(trans, ret);
1229		goto out_free_path;
1230	}
1231
1232	fs_info->qgroup_enable_gen = trans->transid;
1233
1234	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1235	/*
1236	 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1237	 * a deadlock with tasks concurrently doing other qgroup operations, such
1238	 * adding/removing qgroups or adding/deleting qgroup relations for example,
1239	 * because all qgroup operations first start or join a transaction and then
1240	 * lock the qgroup_ioctl_lock mutex.
1241	 * We are safe from a concurrent task trying to enable quotas, by calling
1242	 * this function, since we are serialized by fs_info->subvol_sem.
1243	 */
1244	ret = btrfs_commit_transaction(trans);
1245	trans = NULL;
1246	mutex_lock(&fs_info->qgroup_ioctl_lock);
1247	if (ret)
1248		goto out_free_path;
1249
1250	/*
1251	 * Set quota enabled flag after committing the transaction, to avoid
1252	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1253	 * creation.
1254	 */
1255	spin_lock(&fs_info->qgroup_lock);
1256	fs_info->quota_root = quota_root;
1257	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
 
 
1258	spin_unlock(&fs_info->qgroup_lock);
1259
1260	/* Skip rescan for simple qgroups. */
1261	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1262		goto out_free_path;
1263
1264	ret = qgroup_rescan_init(fs_info, 0, 1);
1265	if (!ret) {
1266	        qgroup_rescan_zero_tracking(fs_info);
1267		fs_info->qgroup_rescan_running = true;
1268	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1269	                         &fs_info->qgroup_rescan_work);
1270	} else {
1271		/*
1272		 * We have set both BTRFS_FS_QUOTA_ENABLED and
1273		 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1274		 * -EINPROGRESS. That can happen because someone started the
1275		 * rescan worker by calling quota rescan ioctl before we
1276		 * attempted to initialize the rescan worker. Failure due to
1277		 * quotas disabled in the meanwhile is not possible, because
1278		 * we are holding a write lock on fs_info->subvol_sem, which
1279		 * is also acquired when disabling quotas.
1280		 * Ignore such error, and any other error would need to undo
1281		 * everything we did in the transaction we just committed.
1282		 */
1283		ASSERT(ret == -EINPROGRESS);
1284		ret = 0;
1285	}
1286
1287out_free_path:
1288	btrfs_free_path(path);
1289out_free_root:
1290	if (ret)
1291		btrfs_put_root(quota_root);
1292out:
1293	if (ret) {
1294		ulist_free(fs_info->qgroup_ulist);
1295		fs_info->qgroup_ulist = NULL;
1296		btrfs_sysfs_del_qgroups(fs_info);
1297	}
1298	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1299	if (ret && trans)
1300		btrfs_end_transaction(trans);
1301	else if (trans)
1302		ret = btrfs_end_transaction(trans);
1303	ulist_free(ulist);
1304	kfree(prealloc);
1305	return ret;
1306}
1307
1308/*
1309 * It is possible to have outstanding ordered extents which reserved bytes
1310 * before we disabled. We need to fully flush delalloc, ordered extents, and a
1311 * commit to ensure that we don't leak such reservations, only to have them
1312 * come back if we re-enable.
1313 *
1314 * - enable simple quotas
1315 * - reserve space
1316 * - release it, store rsv_bytes in OE
1317 * - disable quotas
1318 * - enable simple quotas (qgroup rsv are all 0)
1319 * - OE finishes
1320 * - run delayed refs
1321 * - free rsv_bytes, resulting in miscounting or even underflow
1322 */
1323static int flush_reservations(struct btrfs_fs_info *fs_info)
1324{
 
1325	int ret;
1326
1327	ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
1328	if (ret)
1329		return ret;
1330	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
 
 
 
 
1331
1332	return btrfs_commit_current_transaction(fs_info->tree_root);
1333}
1334
1335int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1336{
1337	struct btrfs_root *quota_root = NULL;
1338	struct btrfs_trans_handle *trans = NULL;
1339	int ret = 0;
1340
1341	/*
1342	 * We need to have subvol_sem write locked to prevent races with
1343	 * snapshot creation.
1344	 */
1345	lockdep_assert_held_write(&fs_info->subvol_sem);
1346
1347	/*
1348	 * Relocation will mess with backrefs, so make sure we have the
1349	 * cleaner_mutex held to protect us from relocate.
 
 
 
 
 
 
1350	 */
1351	lockdep_assert_held(&fs_info->cleaner_mutex);
1352
1353	mutex_lock(&fs_info->qgroup_ioctl_lock);
1354	if (!fs_info->quota_root)
1355		goto out;
1356
1357	/*
1358	 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1359	 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1360	 * to lock that mutex while holding a transaction handle and the rescan
1361	 * worker needs to commit a transaction.
1362	 */
1363	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1364
1365	/*
1366	 * Request qgroup rescan worker to complete and wait for it. This wait
1367	 * must be done before transaction start for quota disable since it may
1368	 * deadlock with transaction by the qgroup rescan worker.
1369	 */
1370	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1371	btrfs_qgroup_wait_for_completion(fs_info, false);
1372
1373	/*
1374	 * We have nothing held here and no trans handle, just return the error
1375	 * if there is one.
1376	 */
1377	ret = flush_reservations(fs_info);
1378	if (ret)
1379		return ret;
1380
1381	/*
1382	 * 1 For the root item
1383	 *
1384	 * We should also reserve enough items for the quota tree deletion in
1385	 * btrfs_clean_quota_tree but this is not done.
1386	 *
1387	 * Also, we must always start a transaction without holding the mutex
1388	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1389	 */
1390	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1391
1392	mutex_lock(&fs_info->qgroup_ioctl_lock);
1393	if (IS_ERR(trans)) {
1394		ret = PTR_ERR(trans);
1395		trans = NULL;
1396		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1397		goto out;
1398	}
1399
1400	if (!fs_info->quota_root)
1401		goto out;
1402
1403	spin_lock(&fs_info->qgroup_lock);
1404	quota_root = fs_info->quota_root;
1405	fs_info->quota_root = NULL;
1406	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1407	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1408	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1409	spin_unlock(&fs_info->qgroup_lock);
1410
1411	btrfs_free_qgroup_config(fs_info);
1412
1413	ret = btrfs_clean_quota_tree(trans, quota_root);
1414	if (ret) {
1415		btrfs_abort_transaction(trans, ret);
1416		goto out;
1417	}
1418
1419	ret = btrfs_del_root(trans, &quota_root->root_key);
1420	if (ret) {
1421		btrfs_abort_transaction(trans, ret);
1422		goto out;
1423	}
1424
1425	spin_lock(&fs_info->trans_lock);
1426	list_del(&quota_root->dirty_list);
1427	spin_unlock(&fs_info->trans_lock);
1428
1429	btrfs_tree_lock(quota_root->node);
1430	btrfs_clear_buffer_dirty(trans, quota_root->node);
1431	btrfs_tree_unlock(quota_root->node);
1432	ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
1433				    quota_root->node, 0, 1);
1434
1435	if (ret < 0)
1436		btrfs_abort_transaction(trans, ret);
1437
1438out:
1439	btrfs_put_root(quota_root);
1440	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1441	if (ret && trans)
1442		btrfs_end_transaction(trans);
1443	else if (trans)
1444		ret = btrfs_commit_transaction(trans);
 
 
 
1445	return ret;
1446}
1447
1448static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1449			 struct btrfs_qgroup *qgroup)
1450{
1451	if (list_empty(&qgroup->dirty))
1452		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1453}
1454
1455static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
1456{
1457	if (!list_empty(&qgroup->iterator))
1458		return;
1459
1460	list_add_tail(&qgroup->iterator, head);
1461}
1462
1463static void qgroup_iterator_clean(struct list_head *head)
1464{
1465	while (!list_empty(head)) {
1466		struct btrfs_qgroup *qgroup;
1467
1468		qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
1469		list_del_init(&qgroup->iterator);
1470	}
1471}
1472
1473/*
1474 * The easy accounting, we're updating qgroup relationship whose child qgroup
1475 * only has exclusive extents.
1476 *
1477 * In this case, all exclusive extents will also be exclusive for parent, so
1478 * excl/rfer just get added/removed.
1479 *
1480 * So is qgroup reservation space, which should also be added/removed to
1481 * parent.
1482 * Or when child tries to release reservation space, parent will underflow its
1483 * reservation (for relationship adding case).
1484 *
1485 * Caller should hold fs_info->qgroup_lock.
1486 */
1487static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
1488				    struct btrfs_qgroup *src, int sign)
1489{
1490	struct btrfs_qgroup *qgroup;
1491	struct btrfs_qgroup *cur;
1492	LIST_HEAD(qgroup_list);
1493	u64 num_bytes = src->excl;
1494	int ret = 0;
1495
1496	qgroup = find_qgroup_rb(fs_info, ref_root);
1497	if (!qgroup)
1498		goto out;
1499
1500	qgroup_iterator_add(&qgroup_list, qgroup);
1501	list_for_each_entry(cur, &qgroup_list, iterator) {
1502		struct btrfs_qgroup_list *glist;
1503
1504		qgroup->rfer += sign * num_bytes;
1505		qgroup->rfer_cmpr += sign * num_bytes;
1506
1507		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1508		qgroup->excl += sign * num_bytes;
1509		qgroup->excl_cmpr += sign * num_bytes;
1510
1511		if (sign > 0)
1512			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1513		else
1514			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1515		qgroup_dirty(fs_info, qgroup);
1516
1517		/* Append parent qgroups to @qgroup_list. */
1518		list_for_each_entry(glist, &qgroup->groups, next_group)
1519			qgroup_iterator_add(&qgroup_list, glist->group);
1520	}
1521	ret = 0;
1522out:
1523	qgroup_iterator_clean(&qgroup_list);
1524	return ret;
1525}
1526
1527
1528/*
1529 * Quick path for updating qgroup with only excl refs.
1530 *
1531 * In that case, just update all parent will be enough.
1532 * Or we needs to do a full rescan.
1533 * Caller should also hold fs_info->qgroup_lock.
1534 *
1535 * Return 0 for quick update, return >0 for need to full rescan
1536 * and mark INCONSISTENT flag.
1537 * Return < 0 for other error.
1538 */
1539static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1540				   u64 src, u64 dst, int sign)
1541{
1542	struct btrfs_qgroup *qgroup;
1543	int ret = 1;
 
1544
1545	qgroup = find_qgroup_rb(fs_info, src);
1546	if (!qgroup)
1547		goto out;
1548	if (qgroup->excl == qgroup->rfer) {
1549		ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
1550		if (ret < 0)
1551			goto out;
1552		ret = 0;
 
 
 
 
 
1553	}
1554out:
1555	if (ret)
1556		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1557	return ret;
1558}
1559
1560/*
1561 * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
1562 * callers and transferred here (either used or freed on error).
1563 */
1564int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
1565			      struct btrfs_qgroup_list *prealloc)
1566{
1567	struct btrfs_fs_info *fs_info = trans->fs_info;
1568	struct btrfs_qgroup *parent;
1569	struct btrfs_qgroup *member;
1570	struct btrfs_qgroup_list *list;
 
1571	int ret = 0;
1572
1573	ASSERT(prealloc);
1574
1575	/* Check the level of src and dst first */
1576	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1577		return -EINVAL;
1578
1579	mutex_lock(&fs_info->qgroup_ioctl_lock);
1580	if (!fs_info->quota_root) {
1581		ret = -ENOTCONN;
1582		goto out;
1583	}
1584	member = find_qgroup_rb(fs_info, src);
1585	parent = find_qgroup_rb(fs_info, dst);
1586	if (!member || !parent) {
1587		ret = -EINVAL;
1588		goto out;
1589	}
1590
1591	/* check if such qgroup relation exist firstly */
1592	list_for_each_entry(list, &member->groups, next_group) {
1593		if (list->group == parent) {
1594			ret = -EEXIST;
1595			goto out;
1596		}
1597	}
1598
 
 
 
 
 
1599	ret = add_qgroup_relation_item(trans, src, dst);
1600	if (ret)
1601		goto out;
1602
1603	ret = add_qgroup_relation_item(trans, dst, src);
1604	if (ret) {
1605		del_qgroup_relation_item(trans, src, dst);
1606		goto out;
1607	}
1608
1609	spin_lock(&fs_info->qgroup_lock);
1610	ret = __add_relation_rb(prealloc, member, parent);
1611	prealloc = NULL;
1612	if (ret < 0) {
1613		spin_unlock(&fs_info->qgroup_lock);
1614		goto out;
1615	}
1616	ret = quick_update_accounting(fs_info, src, dst, 1);
1617	spin_unlock(&fs_info->qgroup_lock);
1618out:
1619	kfree(prealloc);
1620	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1621	return ret;
1622}
1623
1624static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1625				 u64 dst)
1626{
1627	struct btrfs_fs_info *fs_info = trans->fs_info;
1628	struct btrfs_qgroup *parent;
1629	struct btrfs_qgroup *member;
1630	struct btrfs_qgroup_list *list;
1631	bool found = false;
1632	int ret = 0;
1633	int ret2;
1634
1635	if (!fs_info->quota_root) {
1636		ret = -ENOTCONN;
1637		goto out;
1638	}
1639
1640	member = find_qgroup_rb(fs_info, src);
1641	parent = find_qgroup_rb(fs_info, dst);
1642	/*
1643	 * The parent/member pair doesn't exist, then try to delete the dead
1644	 * relation items only.
1645	 */
1646	if (!member || !parent)
1647		goto delete_item;
1648
1649	/* check if such qgroup relation exist firstly */
1650	list_for_each_entry(list, &member->groups, next_group) {
1651		if (list->group == parent) {
1652			found = true;
1653			break;
1654		}
1655	}
1656
1657delete_item:
1658	ret = del_qgroup_relation_item(trans, src, dst);
1659	if (ret < 0 && ret != -ENOENT)
1660		goto out;
1661	ret2 = del_qgroup_relation_item(trans, dst, src);
1662	if (ret2 < 0 && ret2 != -ENOENT)
1663		goto out;
1664
1665	/* At least one deletion succeeded, return 0 */
1666	if (!ret || !ret2)
1667		ret = 0;
1668
1669	if (found) {
1670		spin_lock(&fs_info->qgroup_lock);
1671		del_relation_rb(fs_info, src, dst);
1672		ret = quick_update_accounting(fs_info, src, dst, -1);
1673		spin_unlock(&fs_info->qgroup_lock);
1674	}
1675out:
1676	return ret;
1677}
1678
1679int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1680			      u64 dst)
1681{
1682	struct btrfs_fs_info *fs_info = trans->fs_info;
1683	int ret = 0;
1684
1685	mutex_lock(&fs_info->qgroup_ioctl_lock);
1686	ret = __del_qgroup_relation(trans, src, dst);
1687	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1688
1689	return ret;
1690}
1691
1692int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1693{
1694	struct btrfs_fs_info *fs_info = trans->fs_info;
1695	struct btrfs_root *quota_root;
1696	struct btrfs_qgroup *qgroup;
1697	struct btrfs_qgroup *prealloc = NULL;
1698	int ret = 0;
1699
1700	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
1701		return 0;
1702
1703	mutex_lock(&fs_info->qgroup_ioctl_lock);
1704	if (!fs_info->quota_root) {
1705		ret = -ENOTCONN;
1706		goto out;
1707	}
1708	quota_root = fs_info->quota_root;
1709	qgroup = find_qgroup_rb(fs_info, qgroupid);
1710	if (qgroup) {
1711		ret = -EEXIST;
1712		goto out;
1713	}
1714
1715	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1716	if (!prealloc) {
1717		ret = -ENOMEM;
1718		goto out;
1719	}
1720
1721	ret = add_qgroup_item(trans, quota_root, qgroupid);
1722	if (ret)
1723		goto out;
1724
1725	spin_lock(&fs_info->qgroup_lock);
1726	qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
1727	spin_unlock(&fs_info->qgroup_lock);
1728	prealloc = NULL;
1729
1730	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1731out:
1732	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1733	kfree(prealloc);
1734	return ret;
1735}
1736
1737/*
1738 * Return 0 if we can not delete the qgroup (not empty or has children etc).
1739 * Return >0 if we can delete the qgroup.
1740 * Return <0 for other errors during tree search.
1741 */
1742static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
1743{
1744	struct btrfs_key key;
1745	struct btrfs_path *path;
1746	int ret;
1747
1748	/*
1749	 * Squota would never be inconsistent, but there can still be case
1750	 * where a dropped subvolume still has qgroup numbers, and squota
1751	 * relies on such qgroup for future accounting.
1752	 *
1753	 * So for squota, do not allow dropping any non-zero qgroup.
1754	 */
1755	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
1756	    (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
1757		return 0;
1758
1759	/* For higher level qgroup, we can only delete it if it has no child. */
1760	if (btrfs_qgroup_level(qgroup->qgroupid)) {
1761		if (!list_empty(&qgroup->members))
1762			return 0;
1763		return 1;
1764	}
1765
1766	/*
1767	 * For level-0 qgroups, we can only delete it if it has no subvolume
1768	 * for it.
1769	 * This means even a subvolume is unlinked but not yet fully dropped,
1770	 * we can not delete the qgroup.
1771	 */
1772	key.objectid = qgroup->qgroupid;
1773	key.type = BTRFS_ROOT_ITEM_KEY;
1774	key.offset = -1ULL;
1775	path = btrfs_alloc_path();
1776	if (!path)
1777		return -ENOMEM;
1778
1779	ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
1780	btrfs_free_path(path);
1781	/*
1782	 * The @ret from btrfs_find_root() exactly matches our definition for
1783	 * the return value, thus can be returned directly.
1784	 */
1785	return ret;
1786}
1787
1788int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1789{
1790	struct btrfs_fs_info *fs_info = trans->fs_info;
1791	struct btrfs_qgroup *qgroup;
1792	struct btrfs_qgroup_list *list;
1793	int ret = 0;
1794
1795	mutex_lock(&fs_info->qgroup_ioctl_lock);
1796	if (!fs_info->quota_root) {
1797		ret = -ENOTCONN;
1798		goto out;
1799	}
1800
1801	qgroup = find_qgroup_rb(fs_info, qgroupid);
1802	if (!qgroup) {
1803		ret = -ENOENT;
1804		goto out;
1805	}
1806
1807	ret = can_delete_qgroup(fs_info, qgroup);
1808	if (ret < 0)
1809		goto out;
1810	if (ret == 0) {
1811		ret = -EBUSY;
1812		goto out;
1813	}
1814
1815	/* Check if there are no children of this qgroup */
1816	if (!list_empty(&qgroup->members)) {
1817		ret = -EBUSY;
1818		goto out;
1819	}
1820
1821	ret = del_qgroup_item(trans, qgroupid);
1822	if (ret && ret != -ENOENT)
1823		goto out;
1824
1825	while (!list_empty(&qgroup->groups)) {
1826		list = list_first_entry(&qgroup->groups,
1827					struct btrfs_qgroup_list, next_group);
1828		ret = __del_qgroup_relation(trans, qgroupid,
1829					    list->group->qgroupid);
1830		if (ret)
1831			goto out;
1832	}
1833
1834	spin_lock(&fs_info->qgroup_lock);
1835	/*
1836	 * Warn on reserved space. The subvolume should has no child nor
1837	 * corresponding subvolume.
1838	 * Thus its reserved space should all be zero, no matter if qgroup
1839	 * is consistent or the mode.
1840	 */
1841	if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
1842	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
1843	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
1844		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
1845		btrfs_warn_rl(fs_info,
1846"to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
1847			      btrfs_qgroup_level(qgroup->qgroupid),
1848			      btrfs_qgroup_subvolid(qgroup->qgroupid),
1849			      qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
1850			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
1851			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
1852
1853	}
1854	/*
1855	 * The same for rfer/excl numbers, but that's only if our qgroup is
1856	 * consistent and if it's in regular qgroup mode.
1857	 * For simple mode it's not as accurate thus we can hit non-zero values
1858	 * very frequently.
1859	 */
1860	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
1861	    !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
1862		if (qgroup->rfer || qgroup->excl ||
1863		    qgroup->rfer_cmpr || qgroup->excl_cmpr) {
1864			WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
1865			btrfs_warn_rl(fs_info,
1866"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
1867				      btrfs_qgroup_level(qgroup->qgroupid),
1868				      btrfs_qgroup_subvolid(qgroup->qgroupid),
1869				      qgroup->rfer, qgroup->rfer_cmpr,
1870				      qgroup->excl, qgroup->excl_cmpr);
1871			qgroup_mark_inconsistent(fs_info);
1872		}
1873	}
1874	del_qgroup_rb(fs_info, qgroupid);
1875	spin_unlock(&fs_info->qgroup_lock);
1876
1877	/*
1878	 * Remove the qgroup from sysfs now without holding the qgroup_lock
1879	 * spinlock, since the sysfs_remove_group() function needs to take
1880	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1881	 */
1882	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1883	kfree(qgroup);
1884out:
1885	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1886	return ret;
1887}
1888
1889int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
1890{
1891	struct btrfs_trans_handle *trans;
1892	int ret;
1893
1894	if (!is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || !fs_info->quota_root)
1895		return 0;
1896
1897	/*
1898	 * Commit current transaction to make sure all the rfer/excl numbers
1899	 * get updated.
1900	 */
1901	trans = btrfs_start_transaction(fs_info->quota_root, 0);
1902	if (IS_ERR(trans))
1903		return PTR_ERR(trans);
1904
1905	ret = btrfs_commit_transaction(trans);
1906	if (ret < 0)
1907		return ret;
1908
1909	/* Start new trans to delete the qgroup info and limit items. */
1910	trans = btrfs_start_transaction(fs_info->quota_root, 2);
1911	if (IS_ERR(trans))
1912		return PTR_ERR(trans);
1913	ret = btrfs_remove_qgroup(trans, subvolid);
1914	btrfs_end_transaction(trans);
1915	/*
1916	 * It's squota and the subvolume still has numbers needed for future
1917	 * accounting, in this case we can not delete it.  Just skip it.
1918	 *
1919	 * Or the qgroup is already removed by a qgroup rescan. For both cases we're
1920	 * safe to ignore them.
1921	 */
1922	if (ret == -EBUSY || ret == -ENOENT)
1923		ret = 0;
1924	return ret;
1925}
1926
1927int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1928		       struct btrfs_qgroup_limit *limit)
1929{
1930	struct btrfs_fs_info *fs_info = trans->fs_info;
1931	struct btrfs_qgroup *qgroup;
1932	int ret = 0;
1933	/* Sometimes we would want to clear the limit on this qgroup.
1934	 * To meet this requirement, we treat the -1 as a special value
1935	 * which tell kernel to clear the limit on this qgroup.
1936	 */
1937	const u64 CLEAR_VALUE = -1;
1938
1939	mutex_lock(&fs_info->qgroup_ioctl_lock);
1940	if (!fs_info->quota_root) {
1941		ret = -ENOTCONN;
1942		goto out;
1943	}
1944
1945	qgroup = find_qgroup_rb(fs_info, qgroupid);
1946	if (!qgroup) {
1947		ret = -ENOENT;
1948		goto out;
1949	}
1950
1951	spin_lock(&fs_info->qgroup_lock);
1952	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1953		if (limit->max_rfer == CLEAR_VALUE) {
1954			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1955			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1956			qgroup->max_rfer = 0;
1957		} else {
1958			qgroup->max_rfer = limit->max_rfer;
1959		}
1960	}
1961	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1962		if (limit->max_excl == CLEAR_VALUE) {
1963			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1964			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1965			qgroup->max_excl = 0;
1966		} else {
1967			qgroup->max_excl = limit->max_excl;
1968		}
1969	}
1970	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1971		if (limit->rsv_rfer == CLEAR_VALUE) {
1972			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1973			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1974			qgroup->rsv_rfer = 0;
1975		} else {
1976			qgroup->rsv_rfer = limit->rsv_rfer;
1977		}
1978	}
1979	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1980		if (limit->rsv_excl == CLEAR_VALUE) {
1981			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1982			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1983			qgroup->rsv_excl = 0;
1984		} else {
1985			qgroup->rsv_excl = limit->rsv_excl;
1986		}
1987	}
1988	qgroup->lim_flags |= limit->flags;
1989
1990	spin_unlock(&fs_info->qgroup_lock);
1991
1992	ret = update_qgroup_limit_item(trans, qgroup);
1993	if (ret) {
1994		qgroup_mark_inconsistent(fs_info);
1995		btrfs_info(fs_info, "unable to update quota limit for %llu",
1996		       qgroupid);
1997	}
1998
1999out:
2000	mutex_unlock(&fs_info->qgroup_ioctl_lock);
2001	return ret;
2002}
2003
2004/*
2005 * Inform qgroup to trace one dirty extent, its info is recorded in @record.
2006 * So qgroup can account it at transaction committing time.
2007 *
2008 * No lock version, caller must acquire delayed ref lock and allocated memory,
2009 * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
2010 *
2011 * Return 0 for success insert
2012 * Return >0 for existing record, caller can free @record safely.
2013 * Return <0 for insertion failure, caller can free @record safely.
2014 */
2015int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
2016				     struct btrfs_delayed_ref_root *delayed_refs,
2017				     struct btrfs_qgroup_extent_record *record,
2018				     u64 bytenr)
2019{
2020	struct btrfs_qgroup_extent_record *existing, *ret;
2021	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
 
 
2022
2023	if (!btrfs_qgroup_full_accounting(fs_info))
2024		return 1;
2025
2026#if BITS_PER_LONG == 32
2027	if (bytenr >= MAX_LFS_FILESIZE) {
2028		btrfs_err_rl(fs_info,
2029"qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
2030			     bytenr);
2031		btrfs_err_32bit_limit(fs_info);
2032		return -EOVERFLOW;
2033	}
2034#endif
2035
2036	trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
2037
2038	xa_lock(&delayed_refs->dirty_extents);
2039	existing = xa_load(&delayed_refs->dirty_extents, index);
2040	if (existing) {
2041		if (record->data_rsv && !existing->data_rsv) {
2042			existing->data_rsv = record->data_rsv;
2043			existing->data_rsv_refroot = record->data_rsv_refroot;
 
 
 
 
 
 
 
 
 
2044		}
2045		xa_unlock(&delayed_refs->dirty_extents);
2046		return 1;
2047	}
2048
2049	ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
2050	xa_unlock(&delayed_refs->dirty_extents);
2051	if (xa_is_err(ret)) {
2052		qgroup_mark_inconsistent(fs_info);
2053		return xa_err(ret);
2054	}
2055
 
 
2056	return 0;
2057}
2058
2059/*
2060 * Post handler after qgroup_trace_extent_nolock().
2061 *
2062 * NOTE: Current qgroup does the expensive backref walk at transaction
2063 * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
2064 * new transaction.
2065 * This is designed to allow btrfs_find_all_roots() to get correct new_roots
2066 * result.
2067 *
2068 * However for old_roots there is no need to do backref walk at that time,
2069 * since we search commit roots to walk backref and result will always be
2070 * correct.
2071 *
2072 * Due to the nature of no lock version, we can't do backref there.
2073 * So we must call btrfs_qgroup_trace_extent_post() after exiting
2074 * spinlock context.
2075 *
2076 * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
2077 * using current root, then we can move all expensive backref walk out of
2078 * transaction committing, but not now as qgroup accounting will be wrong again.
2079 */
2080int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
2081				   struct btrfs_qgroup_extent_record *qrecord,
2082				   u64 bytenr)
2083{
2084	struct btrfs_fs_info *fs_info = trans->fs_info;
2085	struct btrfs_backref_walk_ctx ctx = {
2086		.bytenr = bytenr,
2087		.fs_info = fs_info,
2088	};
2089	int ret;
2090
2091	if (!btrfs_qgroup_full_accounting(fs_info))
2092		return 0;
2093	/*
2094	 * We are always called in a context where we are already holding a
2095	 * transaction handle. Often we are called when adding a data delayed
2096	 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
2097	 * in which case we will be holding a write lock on extent buffer from a
2098	 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
2099	 * acquire fs_info->commit_root_sem, because that is a higher level lock
2100	 * that must be acquired before locking any extent buffers.
2101	 *
2102	 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
2103	 * but we can't pass it a non-NULL transaction handle, because otherwise
2104	 * it would not use commit roots and would lock extent buffers, causing
2105	 * a deadlock if it ends up trying to read lock the same extent buffer
2106	 * that was previously write locked at btrfs_truncate_inode_items().
2107	 *
2108	 * So pass a NULL transaction handle to btrfs_find_all_roots() and
2109	 * explicitly tell it to not acquire the commit_root_sem - if we are
2110	 * holding a transaction handle we don't need its protection.
2111	 */
2112	ASSERT(trans != NULL);
2113
2114	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2115		return 0;
2116
 
 
 
2117	ret = btrfs_find_all_roots(&ctx, true);
2118	if (ret < 0) {
2119		qgroup_mark_inconsistent(fs_info);
2120		btrfs_warn(fs_info,
2121"error accounting new delayed refs extent (err code: %d), quota inconsistent",
2122			ret);
2123		return 0;
2124	}
2125
2126	/*
2127	 * Here we don't need to get the lock of
2128	 * trans->transaction->delayed_refs, since inserted qrecord won't
2129	 * be deleted, only qrecord->node may be modified (new qrecord insert)
2130	 *
2131	 * So modifying qrecord->old_roots is safe here
2132	 */
2133	qrecord->old_roots = ctx.roots;
2134	return 0;
2135}
2136
2137/*
2138 * Inform qgroup to trace one dirty extent, specified by @bytenr and
2139 * @num_bytes.
2140 * So qgroup can account it at commit trans time.
2141 *
2142 * Better encapsulated version, with memory allocation and backref walk for
2143 * commit roots.
2144 * So this can sleep.
2145 *
2146 * Return 0 if the operation is done.
2147 * Return <0 for error, like memory allocation failure or invalid parameter
2148 * (NULL trans)
2149 */
2150int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2151			      u64 num_bytes)
2152{
2153	struct btrfs_fs_info *fs_info = trans->fs_info;
2154	struct btrfs_qgroup_extent_record *record;
2155	struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs;
2156	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
2157	int ret;
2158
2159	if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
2160		return 0;
2161	record = kzalloc(sizeof(*record), GFP_NOFS);
2162	if (!record)
2163		return -ENOMEM;
2164
2165	if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
2166		kfree(record);
2167		return -ENOMEM;
2168	}
2169
2170	record->num_bytes = num_bytes;
 
2171
2172	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
2173	if (ret) {
2174		/* Clean up if insertion fails or item exists. */
2175		xa_release(&delayed_refs->dirty_extents, index);
2176		kfree(record);
2177		return 0;
2178	}
2179	return btrfs_qgroup_trace_extent_post(trans, record, bytenr);
2180}
2181
2182/*
2183 * Inform qgroup to trace all leaf items of data
2184 *
2185 * Return 0 for success
2186 * Return <0 for error(ENOMEM)
2187 */
2188int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
2189				  struct extent_buffer *eb)
2190{
2191	struct btrfs_fs_info *fs_info = trans->fs_info;
2192	int nr = btrfs_header_nritems(eb);
2193	int i, extent_type, ret;
2194	struct btrfs_key key;
2195	struct btrfs_file_extent_item *fi;
2196	u64 bytenr, num_bytes;
2197
2198	/* We can be called directly from walk_up_proc() */
2199	if (!btrfs_qgroup_full_accounting(fs_info))
2200		return 0;
2201
2202	for (i = 0; i < nr; i++) {
2203		btrfs_item_key_to_cpu(eb, &key, i);
2204
2205		if (key.type != BTRFS_EXTENT_DATA_KEY)
2206			continue;
2207
2208		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2209		/* filter out non qgroup-accountable extents  */
2210		extent_type = btrfs_file_extent_type(eb, fi);
2211
2212		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
2213			continue;
2214
2215		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
2216		if (!bytenr)
2217			continue;
2218
2219		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
2220
2221		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
2222		if (ret)
2223			return ret;
2224	}
2225	cond_resched();
2226	return 0;
2227}
2228
2229/*
2230 * Walk up the tree from the bottom, freeing leaves and any interior
2231 * nodes which have had all slots visited. If a node (leaf or
2232 * interior) is freed, the node above it will have it's slot
2233 * incremented. The root node will never be freed.
2234 *
2235 * At the end of this function, we should have a path which has all
2236 * slots incremented to the next position for a search. If we need to
2237 * read a new node it will be NULL and the node above it will have the
2238 * correct slot selected for a later read.
2239 *
2240 * If we increment the root nodes slot counter past the number of
2241 * elements, 1 is returned to signal completion of the search.
2242 */
2243static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
2244{
2245	int level = 0;
2246	int nr, slot;
2247	struct extent_buffer *eb;
2248
2249	if (root_level == 0)
2250		return 1;
2251
2252	while (level <= root_level) {
2253		eb = path->nodes[level];
2254		nr = btrfs_header_nritems(eb);
2255		path->slots[level]++;
2256		slot = path->slots[level];
2257		if (slot >= nr || level == 0) {
2258			/*
2259			 * Don't free the root -  we will detect this
2260			 * condition after our loop and return a
2261			 * positive value for caller to stop walking the tree.
2262			 */
2263			if (level != root_level) {
2264				btrfs_tree_unlock_rw(eb, path->locks[level]);
2265				path->locks[level] = 0;
2266
2267				free_extent_buffer(eb);
2268				path->nodes[level] = NULL;
2269				path->slots[level] = 0;
2270			}
2271		} else {
2272			/*
2273			 * We have a valid slot to walk back down
2274			 * from. Stop here so caller can process these
2275			 * new nodes.
2276			 */
2277			break;
2278		}
2279
2280		level++;
2281	}
2282
2283	eb = path->nodes[root_level];
2284	if (path->slots[root_level] >= btrfs_header_nritems(eb))
2285		return 1;
2286
2287	return 0;
2288}
2289
2290/*
2291 * Helper function to trace a subtree tree block swap.
2292 *
2293 * The swap will happen in highest tree block, but there may be a lot of
2294 * tree blocks involved.
2295 *
2296 * For example:
2297 *  OO = Old tree blocks
2298 *  NN = New tree blocks allocated during balance
2299 *
2300 *           File tree (257)                  Reloc tree for 257
2301 * L2              OO                                NN
2302 *               /    \                            /    \
2303 * L1          OO      OO (a)                    OO      NN (a)
2304 *            / \     / \                       / \     / \
2305 * L0       OO   OO OO   OO                   OO   OO NN   NN
2306 *                  (b)  (c)                          (b)  (c)
2307 *
2308 * When calling qgroup_trace_extent_swap(), we will pass:
2309 * @src_eb = OO(a)
2310 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
2311 * @dst_level = 0
2312 * @root_level = 1
2313 *
2314 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
2315 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2316 *
2317 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
2318 *
2319 * 1) Tree search from @src_eb
2320 *    It should acts as a simplified btrfs_search_slot().
2321 *    The key for search can be extracted from @dst_path->nodes[dst_level]
2322 *    (first key).
2323 *
2324 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2325 *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2326 *    They should be marked during previous (@dst_level = 1) iteration.
2327 *
2328 * 3) Mark file extents in leaves dirty
2329 *    We don't have good way to pick out new file extents only.
2330 *    So we still follow the old method by scanning all file extents in
2331 *    the leave.
2332 *
2333 * This function can free us from keeping two paths, thus later we only need
2334 * to care about how to iterate all new tree blocks in reloc tree.
2335 */
2336static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
2337				    struct extent_buffer *src_eb,
2338				    struct btrfs_path *dst_path,
2339				    int dst_level, int root_level,
2340				    bool trace_leaf)
2341{
2342	struct btrfs_key key;
2343	struct btrfs_path *src_path;
2344	struct btrfs_fs_info *fs_info = trans->fs_info;
2345	u32 nodesize = fs_info->nodesize;
2346	int cur_level = root_level;
2347	int ret;
2348
2349	BUG_ON(dst_level > root_level);
2350	/* Level mismatch */
2351	if (btrfs_header_level(src_eb) != root_level)
2352		return -EINVAL;
2353
2354	src_path = btrfs_alloc_path();
2355	if (!src_path) {
2356		ret = -ENOMEM;
2357		goto out;
2358	}
2359
2360	if (dst_level)
2361		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2362	else
2363		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2364
2365	/* For src_path */
2366	atomic_inc(&src_eb->refs);
2367	src_path->nodes[root_level] = src_eb;
2368	src_path->slots[root_level] = dst_path->slots[root_level];
2369	src_path->locks[root_level] = 0;
2370
2371	/* A simplified version of btrfs_search_slot() */
2372	while (cur_level >= dst_level) {
2373		struct btrfs_key src_key;
2374		struct btrfs_key dst_key;
2375
2376		if (src_path->nodes[cur_level] == NULL) {
2377			struct extent_buffer *eb;
2378			int parent_slot;
2379
2380			eb = src_path->nodes[cur_level + 1];
2381			parent_slot = src_path->slots[cur_level + 1];
2382
2383			eb = btrfs_read_node_slot(eb, parent_slot);
2384			if (IS_ERR(eb)) {
2385				ret = PTR_ERR(eb);
2386				goto out;
2387			}
2388
2389			src_path->nodes[cur_level] = eb;
2390
2391			btrfs_tree_read_lock(eb);
2392			src_path->locks[cur_level] = BTRFS_READ_LOCK;
2393		}
2394
2395		src_path->slots[cur_level] = dst_path->slots[cur_level];
2396		if (cur_level) {
2397			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2398					&dst_key, dst_path->slots[cur_level]);
2399			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2400					&src_key, src_path->slots[cur_level]);
2401		} else {
2402			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2403					&dst_key, dst_path->slots[cur_level]);
2404			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2405					&src_key, src_path->slots[cur_level]);
2406		}
2407		/* Content mismatch, something went wrong */
2408		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
2409			ret = -ENOENT;
2410			goto out;
2411		}
2412		cur_level--;
2413	}
2414
2415	/*
2416	 * Now both @dst_path and @src_path have been populated, record the tree
2417	 * blocks for qgroup accounting.
2418	 */
2419	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2420					nodesize);
2421	if (ret < 0)
2422		goto out;
2423	ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
2424					nodesize);
2425	if (ret < 0)
2426		goto out;
2427
2428	/* Record leaf file extents */
2429	if (dst_level == 0 && trace_leaf) {
2430		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2431		if (ret < 0)
2432			goto out;
2433		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2434	}
2435out:
2436	btrfs_free_path(src_path);
2437	return ret;
2438}
2439
2440/*
2441 * Helper function to do recursive generation-aware depth-first search, to
2442 * locate all new tree blocks in a subtree of reloc tree.
2443 *
2444 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2445 *         reloc tree
2446 * L2         NN (a)
2447 *          /    \
2448 * L1    OO        NN (b)
2449 *      /  \      /  \
2450 * L0  OO  OO    OO  NN
2451 *               (c) (d)
2452 * If we pass:
2453 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2454 * @cur_level = 1
2455 * @root_level = 1
2456 *
2457 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2458 * above tree blocks along with their counter parts in file tree.
2459 * While during search, old tree blocks OO(c) will be skipped as tree block swap
2460 * won't affect OO(c).
2461 */
2462static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2463					   struct extent_buffer *src_eb,
2464					   struct btrfs_path *dst_path,
2465					   int cur_level, int root_level,
2466					   u64 last_snapshot, bool trace_leaf)
2467{
2468	struct btrfs_fs_info *fs_info = trans->fs_info;
2469	struct extent_buffer *eb;
2470	bool need_cleanup = false;
2471	int ret = 0;
2472	int i;
2473
2474	/* Level sanity check */
2475	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2476	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2477	    root_level < cur_level) {
2478		btrfs_err_rl(fs_info,
2479			"%s: bad levels, cur_level=%d root_level=%d",
2480			__func__, cur_level, root_level);
2481		return -EUCLEAN;
2482	}
2483
2484	/* Read the tree block if needed */
2485	if (dst_path->nodes[cur_level] == NULL) {
2486		int parent_slot;
2487		u64 child_gen;
2488
2489		/*
2490		 * dst_path->nodes[root_level] must be initialized before
2491		 * calling this function.
2492		 */
2493		if (cur_level == root_level) {
2494			btrfs_err_rl(fs_info,
2495	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2496				__func__, root_level, root_level, cur_level);
2497			return -EUCLEAN;
2498		}
2499
2500		/*
2501		 * We need to get child blockptr/gen from parent before we can
2502		 * read it.
2503		  */
2504		eb = dst_path->nodes[cur_level + 1];
2505		parent_slot = dst_path->slots[cur_level + 1];
2506		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2507
2508		/* This node is old, no need to trace */
2509		if (child_gen < last_snapshot)
2510			goto out;
2511
2512		eb = btrfs_read_node_slot(eb, parent_slot);
2513		if (IS_ERR(eb)) {
2514			ret = PTR_ERR(eb);
2515			goto out;
2516		}
2517
2518		dst_path->nodes[cur_level] = eb;
2519		dst_path->slots[cur_level] = 0;
2520
2521		btrfs_tree_read_lock(eb);
2522		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2523		need_cleanup = true;
2524	}
2525
2526	/* Now record this tree block and its counter part for qgroups */
2527	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2528				       root_level, trace_leaf);
2529	if (ret < 0)
2530		goto cleanup;
2531
2532	eb = dst_path->nodes[cur_level];
2533
2534	if (cur_level > 0) {
2535		/* Iterate all child tree blocks */
2536		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2537			/* Skip old tree blocks as they won't be swapped */
2538			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2539				continue;
2540			dst_path->slots[cur_level] = i;
2541
2542			/* Recursive call (at most 7 times) */
2543			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2544					dst_path, cur_level - 1, root_level,
2545					last_snapshot, trace_leaf);
2546			if (ret < 0)
2547				goto cleanup;
2548		}
2549	}
2550
2551cleanup:
2552	if (need_cleanup) {
2553		/* Clean up */
2554		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2555				     dst_path->locks[cur_level]);
2556		free_extent_buffer(dst_path->nodes[cur_level]);
2557		dst_path->nodes[cur_level] = NULL;
2558		dst_path->slots[cur_level] = 0;
2559		dst_path->locks[cur_level] = 0;
2560	}
2561out:
2562	return ret;
2563}
2564
2565static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2566				struct extent_buffer *src_eb,
2567				struct extent_buffer *dst_eb,
2568				u64 last_snapshot, bool trace_leaf)
2569{
2570	struct btrfs_fs_info *fs_info = trans->fs_info;
2571	struct btrfs_path *dst_path = NULL;
2572	int level;
2573	int ret;
2574
2575	if (!btrfs_qgroup_full_accounting(fs_info))
2576		return 0;
2577
2578	/* Wrong parameter order */
2579	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2580		btrfs_err_rl(fs_info,
2581		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2582			     btrfs_header_generation(src_eb),
2583			     btrfs_header_generation(dst_eb));
2584		return -EUCLEAN;
2585	}
2586
2587	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2588		ret = -EIO;
2589		goto out;
2590	}
2591
2592	level = btrfs_header_level(dst_eb);
2593	dst_path = btrfs_alloc_path();
2594	if (!dst_path) {
2595		ret = -ENOMEM;
2596		goto out;
2597	}
2598	/* For dst_path */
2599	atomic_inc(&dst_eb->refs);
2600	dst_path->nodes[level] = dst_eb;
2601	dst_path->slots[level] = 0;
2602	dst_path->locks[level] = 0;
2603
2604	/* Do the generation aware breadth-first search */
2605	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2606					      level, last_snapshot, trace_leaf);
2607	if (ret < 0)
2608		goto out;
2609	ret = 0;
2610
2611out:
2612	btrfs_free_path(dst_path);
2613	if (ret < 0)
2614		qgroup_mark_inconsistent(fs_info);
2615	return ret;
2616}
2617
2618/*
2619 * Inform qgroup to trace a whole subtree, including all its child tree
2620 * blocks and data.
2621 * The root tree block is specified by @root_eb.
2622 *
2623 * Normally used by relocation(tree block swap) and subvolume deletion.
2624 *
2625 * Return 0 for success
2626 * Return <0 for error(ENOMEM or tree search error)
2627 */
2628int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2629			       struct extent_buffer *root_eb,
2630			       u64 root_gen, int root_level)
2631{
2632	struct btrfs_fs_info *fs_info = trans->fs_info;
2633	int ret = 0;
2634	int level;
2635	u8 drop_subptree_thres;
2636	struct extent_buffer *eb = root_eb;
2637	struct btrfs_path *path = NULL;
2638
2639	ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL);
2640	ASSERT(root_eb != NULL);
2641
2642	if (!btrfs_qgroup_full_accounting(fs_info))
2643		return 0;
2644
2645	spin_lock(&fs_info->qgroup_lock);
2646	drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
2647	spin_unlock(&fs_info->qgroup_lock);
2648
2649	/*
2650	 * This function only gets called for snapshot drop, if we hit a high
2651	 * node here, it means we are going to change ownership for quite a lot
2652	 * of extents, which will greatly slow down btrfs_commit_transaction().
2653	 *
2654	 * So here if we find a high tree here, we just skip the accounting and
2655	 * mark qgroup inconsistent.
2656	 */
2657	if (root_level >= drop_subptree_thres) {
2658		qgroup_mark_inconsistent(fs_info);
2659		return 0;
2660	}
2661
2662	if (!extent_buffer_uptodate(root_eb)) {
2663		struct btrfs_tree_parent_check check = {
 
2664			.transid = root_gen,
2665			.level = root_level
2666		};
2667
2668		ret = btrfs_read_extent_buffer(root_eb, &check);
2669		if (ret)
2670			goto out;
2671	}
2672
2673	if (root_level == 0) {
2674		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2675		goto out;
2676	}
2677
2678	path = btrfs_alloc_path();
2679	if (!path)
2680		return -ENOMEM;
2681
2682	/*
2683	 * Walk down the tree.  Missing extent blocks are filled in as
2684	 * we go. Metadata is accounted every time we read a new
2685	 * extent block.
2686	 *
2687	 * When we reach a leaf, we account for file extent items in it,
2688	 * walk back up the tree (adjusting slot pointers as we go)
2689	 * and restart the search process.
2690	 */
2691	atomic_inc(&root_eb->refs);	/* For path */
2692	path->nodes[root_level] = root_eb;
2693	path->slots[root_level] = 0;
2694	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2695walk_down:
2696	level = root_level;
2697	while (level >= 0) {
2698		if (path->nodes[level] == NULL) {
2699			int parent_slot;
2700			u64 child_bytenr;
2701
2702			/*
2703			 * We need to get child blockptr from parent before we
2704			 * can read it.
2705			  */
2706			eb = path->nodes[level + 1];
2707			parent_slot = path->slots[level + 1];
2708			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2709
2710			eb = btrfs_read_node_slot(eb, parent_slot);
2711			if (IS_ERR(eb)) {
2712				ret = PTR_ERR(eb);
2713				goto out;
2714			}
2715
2716			path->nodes[level] = eb;
2717			path->slots[level] = 0;
2718
2719			btrfs_tree_read_lock(eb);
2720			path->locks[level] = BTRFS_READ_LOCK;
2721
2722			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2723							fs_info->nodesize);
2724			if (ret)
2725				goto out;
2726		}
2727
2728		if (level == 0) {
2729			ret = btrfs_qgroup_trace_leaf_items(trans,
2730							    path->nodes[level]);
2731			if (ret)
2732				goto out;
2733
2734			/* Nonzero return here means we completed our search */
2735			ret = adjust_slots_upwards(path, root_level);
2736			if (ret)
2737				break;
2738
2739			/* Restart search with new slots */
2740			goto walk_down;
2741		}
2742
2743		level--;
2744	}
2745
2746	ret = 0;
2747out:
2748	btrfs_free_path(path);
2749
2750	return ret;
2751}
2752
2753static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
2754{
2755	if (!list_empty(&qgroup->nested_iterator))
2756		return;
2757
2758	list_add_tail(&qgroup->nested_iterator, head);
2759}
2760
2761static void qgroup_iterator_nested_clean(struct list_head *head)
2762{
2763	while (!list_empty(head)) {
2764		struct btrfs_qgroup *qgroup;
2765
2766		qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
2767		list_del_init(&qgroup->nested_iterator);
2768	}
2769}
2770
2771#define UPDATE_NEW	0
2772#define UPDATE_OLD	1
2773/*
2774 * Walk all of the roots that points to the bytenr and adjust their refcnts.
2775 */
2776static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2777				 struct ulist *roots, struct list_head *qgroups,
2778				 u64 seq, int update_old)
2779{
2780	struct ulist_node *unode;
2781	struct ulist_iterator uiter;
2782	struct btrfs_qgroup *qg;
2783
2784	if (!roots)
2785		return;
2786	ULIST_ITER_INIT(&uiter);
2787	while ((unode = ulist_next(roots, &uiter))) {
2788		LIST_HEAD(tmp);
2789
2790		qg = find_qgroup_rb(fs_info, unode->val);
2791		if (!qg)
2792			continue;
2793
2794		qgroup_iterator_nested_add(qgroups, qg);
2795		qgroup_iterator_add(&tmp, qg);
2796		list_for_each_entry(qg, &tmp, iterator) {
2797			struct btrfs_qgroup_list *glist;
2798
2799			if (update_old)
2800				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2801			else
2802				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2803
2804			list_for_each_entry(glist, &qg->groups, next_group) {
2805				qgroup_iterator_nested_add(qgroups, glist->group);
2806				qgroup_iterator_add(&tmp, glist->group);
2807			}
2808		}
2809		qgroup_iterator_clean(&tmp);
2810	}
2811}
2812
2813/*
2814 * Update qgroup rfer/excl counters.
2815 * Rfer update is easy, codes can explain themselves.
2816 *
2817 * Excl update is tricky, the update is split into 2 parts.
2818 * Part 1: Possible exclusive <-> sharing detect:
2819 *	|	A	|	!A	|
2820 *  -------------------------------------
2821 *  B	|	*	|	-	|
2822 *  -------------------------------------
2823 *  !B	|	+	|	**	|
2824 *  -------------------------------------
2825 *
2826 * Conditions:
2827 * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2828 * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2829 * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2830 * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2831 *
2832 * Results:
2833 * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2834 * *: Definitely not changed.		**: Possible unchanged.
2835 *
2836 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2837 *
2838 * To make the logic clear, we first use condition A and B to split
2839 * combination into 4 results.
2840 *
2841 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2842 * only on variant maybe 0.
2843 *
2844 * Lastly, check result **, since there are 2 variants maybe 0, split them
2845 * again(2x2).
2846 * But this time we don't need to consider other things, the codes and logic
2847 * is easy to understand now.
2848 */
2849static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
2850				   struct list_head *qgroups, u64 nr_old_roots,
2851				   u64 nr_new_roots, u64 num_bytes, u64 seq)
2852{
2853	struct btrfs_qgroup *qg;
2854
2855	list_for_each_entry(qg, qgroups, nested_iterator) {
2856		u64 cur_new_count, cur_old_count;
2857		bool dirty = false;
2858
2859		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2860		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2861
2862		trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2863					     cur_new_count);
2864
2865		/* Rfer update part */
2866		if (cur_old_count == 0 && cur_new_count > 0) {
2867			qg->rfer += num_bytes;
2868			qg->rfer_cmpr += num_bytes;
2869			dirty = true;
2870		}
2871		if (cur_old_count > 0 && cur_new_count == 0) {
2872			qg->rfer -= num_bytes;
2873			qg->rfer_cmpr -= num_bytes;
2874			dirty = true;
2875		}
2876
2877		/* Excl update part */
2878		/* Exclusive/none -> shared case */
2879		if (cur_old_count == nr_old_roots &&
2880		    cur_new_count < nr_new_roots) {
2881			/* Exclusive -> shared */
2882			if (cur_old_count != 0) {
2883				qg->excl -= num_bytes;
2884				qg->excl_cmpr -= num_bytes;
2885				dirty = true;
2886			}
2887		}
2888
2889		/* Shared -> exclusive/none case */
2890		if (cur_old_count < nr_old_roots &&
2891		    cur_new_count == nr_new_roots) {
2892			/* Shared->exclusive */
2893			if (cur_new_count != 0) {
2894				qg->excl += num_bytes;
2895				qg->excl_cmpr += num_bytes;
2896				dirty = true;
2897			}
2898		}
2899
2900		/* Exclusive/none -> exclusive/none case */
2901		if (cur_old_count == nr_old_roots &&
2902		    cur_new_count == nr_new_roots) {
2903			if (cur_old_count == 0) {
2904				/* None -> exclusive/none */
2905
2906				if (cur_new_count != 0) {
2907					/* None -> exclusive */
2908					qg->excl += num_bytes;
2909					qg->excl_cmpr += num_bytes;
2910					dirty = true;
2911				}
2912				/* None -> none, nothing changed */
2913			} else {
2914				/* Exclusive -> exclusive/none */
2915
2916				if (cur_new_count == 0) {
2917					/* Exclusive -> none */
2918					qg->excl -= num_bytes;
2919					qg->excl_cmpr -= num_bytes;
2920					dirty = true;
2921				}
2922				/* Exclusive -> exclusive, nothing changed */
2923			}
2924		}
2925
2926		if (dirty)
2927			qgroup_dirty(fs_info, qg);
2928	}
2929}
2930
2931/*
2932 * Check if the @roots potentially is a list of fs tree roots
2933 *
2934 * Return 0 for definitely not a fs/subvol tree roots ulist
2935 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2936 *          one as well)
2937 */
2938static int maybe_fs_roots(struct ulist *roots)
2939{
2940	struct ulist_node *unode;
2941	struct ulist_iterator uiter;
2942
2943	/* Empty one, still possible for fs roots */
2944	if (!roots || roots->nnodes == 0)
2945		return 1;
2946
2947	ULIST_ITER_INIT(&uiter);
2948	unode = ulist_next(roots, &uiter);
2949	if (!unode)
2950		return 1;
2951
2952	/*
2953	 * If it contains fs tree roots, then it must belong to fs/subvol
2954	 * trees.
2955	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2956	 */
2957	return is_fstree(unode->val);
2958}
2959
2960int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2961				u64 num_bytes, struct ulist *old_roots,
2962				struct ulist *new_roots)
2963{
2964	struct btrfs_fs_info *fs_info = trans->fs_info;
2965	LIST_HEAD(qgroups);
2966	u64 seq;
2967	u64 nr_new_roots = 0;
2968	u64 nr_old_roots = 0;
2969	int ret = 0;
2970
2971	/*
2972	 * If quotas get disabled meanwhile, the resources need to be freed and
2973	 * we can't just exit here.
2974	 */
2975	if (!btrfs_qgroup_full_accounting(fs_info) ||
2976	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2977		goto out_free;
2978
2979	if (new_roots) {
2980		if (!maybe_fs_roots(new_roots))
2981			goto out_free;
2982		nr_new_roots = new_roots->nnodes;
2983	}
2984	if (old_roots) {
2985		if (!maybe_fs_roots(old_roots))
2986			goto out_free;
2987		nr_old_roots = old_roots->nnodes;
2988	}
2989
2990	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2991	if (nr_old_roots == 0 && nr_new_roots == 0)
2992		goto out_free;
2993
 
 
2994	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2995					num_bytes, nr_old_roots, nr_new_roots);
2996
2997	mutex_lock(&fs_info->qgroup_rescan_lock);
2998	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2999		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
3000			mutex_unlock(&fs_info->qgroup_rescan_lock);
3001			ret = 0;
3002			goto out_free;
3003		}
3004	}
3005	mutex_unlock(&fs_info->qgroup_rescan_lock);
3006
3007	spin_lock(&fs_info->qgroup_lock);
3008	seq = fs_info->qgroup_seq;
3009
3010	/* Update old refcnts using old_roots */
3011	qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
3012
3013	/* Update new refcnts using new_roots */
3014	qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
3015
3016	qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
3017			       num_bytes, seq);
3018
3019	/*
3020	 * We're done using the iterator, release all its qgroups while holding
3021	 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup()
3022	 * and trigger use-after-free accesses to qgroups.
3023	 */
3024	qgroup_iterator_nested_clean(&qgroups);
3025
3026	/*
3027	 * Bump qgroup_seq to avoid seq overlap
3028	 */
3029	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
3030	spin_unlock(&fs_info->qgroup_lock);
3031out_free:
3032	ulist_free(old_roots);
3033	ulist_free(new_roots);
3034	return ret;
3035}
3036
3037int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
3038{
3039	struct btrfs_fs_info *fs_info = trans->fs_info;
3040	struct btrfs_qgroup_extent_record *record;
3041	struct btrfs_delayed_ref_root *delayed_refs;
3042	struct ulist *new_roots = NULL;
3043	unsigned long index;
3044	u64 num_dirty_extents = 0;
3045	u64 qgroup_to_skip;
3046	int ret = 0;
3047
3048	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3049		return 0;
3050
3051	delayed_refs = &trans->transaction->delayed_refs;
3052	qgroup_to_skip = delayed_refs->qgroup_to_skip;
3053	xa_for_each(&delayed_refs->dirty_extents, index, record) {
3054		const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits);
 
3055
3056		num_dirty_extents++;
3057		trace_btrfs_qgroup_account_extents(fs_info, record, bytenr);
3058
3059		if (!ret && !(fs_info->qgroup_flags &
3060			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
3061			struct btrfs_backref_walk_ctx ctx = { 0 };
3062
3063			ctx.bytenr = bytenr;
3064			ctx.fs_info = fs_info;
3065
3066			/*
3067			 * Old roots should be searched when inserting qgroup
3068			 * extent record.
3069			 *
3070			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
3071			 * we may have some record inserted during
3072			 * NO_ACCOUNTING (thus no old_roots populated), but
3073			 * later we start rescan, which clears NO_ACCOUNTING,
3074			 * leaving some inserted records without old_roots
3075			 * populated.
3076			 *
3077			 * Those cases are rare and should not cause too much
3078			 * time spent during commit_transaction().
3079			 */
3080			if (!record->old_roots) {
3081				/* Search commit root to find old_roots */
3082				ret = btrfs_find_all_roots(&ctx, false);
3083				if (ret < 0)
3084					goto cleanup;
3085				record->old_roots = ctx.roots;
3086				ctx.roots = NULL;
3087			}
3088
 
 
 
 
 
3089			/*
3090			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
3091			 * which doesn't lock tree or delayed_refs and search
3092			 * current root. It's safe inside commit_transaction().
3093			 */
3094			ctx.trans = trans;
3095			ctx.time_seq = BTRFS_SEQ_LAST;
3096			ret = btrfs_find_all_roots(&ctx, false);
3097			if (ret < 0)
3098				goto cleanup;
3099			new_roots = ctx.roots;
3100			if (qgroup_to_skip) {
3101				ulist_del(new_roots, qgroup_to_skip, 0);
3102				ulist_del(record->old_roots, qgroup_to_skip,
3103					  0);
3104			}
3105			ret = btrfs_qgroup_account_extent(trans, bytenr,
3106							  record->num_bytes,
3107							  record->old_roots,
3108							  new_roots);
3109			record->old_roots = NULL;
3110			new_roots = NULL;
3111		}
3112		/* Free the reserved data space */
3113		btrfs_qgroup_free_refroot(fs_info,
3114				record->data_rsv_refroot,
3115				record->data_rsv,
3116				BTRFS_QGROUP_RSV_DATA);
3117cleanup:
3118		ulist_free(record->old_roots);
3119		ulist_free(new_roots);
3120		new_roots = NULL;
3121		xa_erase(&delayed_refs->dirty_extents, index);
3122		kfree(record);
3123
3124	}
3125	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
3126				       num_dirty_extents);
3127	return ret;
3128}
3129
3130/*
3131 * Writes all changed qgroups to disk.
3132 * Called by the transaction commit path and the qgroup assign ioctl.
3133 */
3134int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
3135{
3136	struct btrfs_fs_info *fs_info = trans->fs_info;
3137	int ret = 0;
3138
3139	/*
3140	 * In case we are called from the qgroup assign ioctl, assert that we
3141	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
3142	 * disable operation (ioctl) and access a freed quota root.
3143	 */
3144	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
3145		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
3146
3147	if (!fs_info->quota_root)
3148		return ret;
3149
3150	spin_lock(&fs_info->qgroup_lock);
3151	while (!list_empty(&fs_info->dirty_qgroups)) {
3152		struct btrfs_qgroup *qgroup;
3153		qgroup = list_first_entry(&fs_info->dirty_qgroups,
3154					  struct btrfs_qgroup, dirty);
3155		list_del_init(&qgroup->dirty);
3156		spin_unlock(&fs_info->qgroup_lock);
3157		ret = update_qgroup_info_item(trans, qgroup);
3158		if (ret)
3159			qgroup_mark_inconsistent(fs_info);
3160		ret = update_qgroup_limit_item(trans, qgroup);
3161		if (ret)
3162			qgroup_mark_inconsistent(fs_info);
3163		spin_lock(&fs_info->qgroup_lock);
3164	}
3165	if (btrfs_qgroup_enabled(fs_info))
3166		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
3167	else
3168		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
3169	spin_unlock(&fs_info->qgroup_lock);
3170
3171	ret = update_qgroup_status_item(trans);
3172	if (ret)
3173		qgroup_mark_inconsistent(fs_info);
3174
3175	return ret;
3176}
3177
3178int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
3179			       struct btrfs_qgroup_inherit *inherit,
3180			       size_t size)
3181{
3182	if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
3183		return -EOPNOTSUPP;
3184	if (size < sizeof(*inherit) || size > PAGE_SIZE)
3185		return -EINVAL;
3186
3187	/*
3188	 * In the past we allowed btrfs_qgroup_inherit to specify to copy
3189	 * rfer/excl numbers directly from other qgroups.  This behavior has
3190	 * been disabled in userspace for a very long time, but here we should
3191	 * also disable it in kernel, as this behavior is known to mark qgroup
3192	 * inconsistent, and a rescan would wipe out the changes anyway.
3193	 *
3194	 * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies.
3195	 */
3196	if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0)
3197		return -EINVAL;
3198
3199	if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
3200		return -EINVAL;
3201
3202	/*
3203	 * Skip the inherit source qgroups check if qgroup is not enabled.
3204	 * Qgroup can still be later enabled causing problems, but in that case
3205	 * btrfs_qgroup_inherit() would just ignore those invalid ones.
3206	 */
3207	if (!btrfs_qgroup_enabled(fs_info))
3208		return 0;
3209
3210	/*
3211	 * Now check all the remaining qgroups, they should all:
3212	 *
3213	 * - Exist
3214	 * - Be higher level qgroups.
3215	 */
3216	for (int i = 0; i < inherit->num_qgroups; i++) {
3217		struct btrfs_qgroup *qgroup;
3218		u64 qgroupid = inherit->qgroups[i];
3219
3220		if (btrfs_qgroup_level(qgroupid) == 0)
3221			return -EINVAL;
3222
3223		spin_lock(&fs_info->qgroup_lock);
3224		qgroup = find_qgroup_rb(fs_info, qgroupid);
3225		if (!qgroup) {
3226			spin_unlock(&fs_info->qgroup_lock);
3227			return -ENOENT;
3228		}
3229		spin_unlock(&fs_info->qgroup_lock);
3230	}
3231	return 0;
3232}
3233
3234static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
3235			       u64 inode_rootid,
3236			       struct btrfs_qgroup_inherit **inherit)
3237{
3238	int i = 0;
3239	u64 num_qgroups = 0;
3240	struct btrfs_qgroup *inode_qg;
3241	struct btrfs_qgroup_list *qg_list;
3242	struct btrfs_qgroup_inherit *res;
3243	size_t struct_sz;
3244	u64 *qgids;
3245
3246	if (*inherit)
3247		return -EEXIST;
3248
3249	inode_qg = find_qgroup_rb(fs_info, inode_rootid);
3250	if (!inode_qg)
3251		return -ENOENT;
3252
3253	num_qgroups = list_count_nodes(&inode_qg->groups);
3254
3255	if (!num_qgroups)
3256		return 0;
3257
3258	struct_sz = struct_size(res, qgroups, num_qgroups);
3259	if (struct_sz == SIZE_MAX)
3260		return -ERANGE;
3261
3262	res = kzalloc(struct_sz, GFP_NOFS);
3263	if (!res)
3264		return -ENOMEM;
3265	res->num_qgroups = num_qgroups;
3266	qgids = res->qgroups;
3267
3268	list_for_each_entry(qg_list, &inode_qg->groups, next_group)
3269		qgids[i++] = qg_list->group->qgroupid;
3270
3271	*inherit = res;
3272	return 0;
3273}
3274
3275/*
3276 * Check if we can skip rescan when inheriting qgroups.  If @src has a single
3277 * @parent, and that @parent is owning all its bytes exclusively, we can skip
3278 * the full rescan, by just adding nodesize to the @parent's excl/rfer.
3279 *
3280 * Return <0 for fatal errors (like srcid/parentid has no qgroup).
3281 * Return 0 if a quick inherit is done.
3282 * Return >0 if a quick inherit is not possible, and a full rescan is needed.
3283 */
3284static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
3285					 u64 srcid, u64 parentid)
3286{
3287	struct btrfs_qgroup *src;
3288	struct btrfs_qgroup *parent;
3289	struct btrfs_qgroup_list *list;
3290	int nr_parents = 0;
3291
3292	src = find_qgroup_rb(fs_info, srcid);
3293	if (!src)
3294		return -ENOENT;
3295	parent = find_qgroup_rb(fs_info, parentid);
3296	if (!parent)
3297		return -ENOENT;
3298
3299	/*
3300	 * Source has no parent qgroup, but our new qgroup would have one.
3301	 * Qgroup numbers would become inconsistent.
3302	 */
3303	if (list_empty(&src->groups))
3304		return 1;
3305
3306	list_for_each_entry(list, &src->groups, next_group) {
3307		/* The parent is not the same, quick update is not possible. */
3308		if (list->group->qgroupid != parentid)
3309			return 1;
3310		nr_parents++;
3311		/*
3312		 * More than one parent qgroup, we can't be sure about accounting
3313		 * consistency.
3314		 */
3315		if (nr_parents > 1)
3316			return 1;
3317	}
3318
3319	/*
3320	 * The parent is not exclusively owning all its bytes.  We're not sure
3321	 * if the source has any bytes not fully owned by the parent.
3322	 */
3323	if (parent->excl != parent->rfer)
3324		return 1;
3325
3326	parent->excl += fs_info->nodesize;
3327	parent->rfer += fs_info->nodesize;
3328	return 0;
3329}
3330
3331/*
3332 * Copy the accounting information between qgroups. This is necessary
3333 * when a snapshot or a subvolume is created. Throwing an error will
3334 * cause a transaction abort so we take extra care here to only error
3335 * when a readonly fs is a reasonable outcome.
3336 */
3337int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
3338			 u64 objectid, u64 inode_rootid,
3339			 struct btrfs_qgroup_inherit *inherit)
3340{
3341	int ret = 0;
 
3342	u64 *i_qgroups;
3343	bool committing = false;
3344	struct btrfs_fs_info *fs_info = trans->fs_info;
3345	struct btrfs_root *quota_root;
3346	struct btrfs_qgroup *srcgroup;
3347	struct btrfs_qgroup *dstgroup;
3348	struct btrfs_qgroup *prealloc;
3349	struct btrfs_qgroup_list **qlist_prealloc = NULL;
3350	bool free_inherit = false;
3351	bool need_rescan = false;
3352	u32 level_size = 0;
3353	u64 nums;
3354
3355	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
3356	if (!prealloc)
3357		return -ENOMEM;
3358
3359	/*
3360	 * There are only two callers of this function.
3361	 *
3362	 * One in create_subvol() in the ioctl context, which needs to hold
3363	 * the qgroup_ioctl_lock.
3364	 *
3365	 * The other one in create_pending_snapshot() where no other qgroup
3366	 * code can modify the fs as they all need to either start a new trans
3367	 * or hold a trans handler, thus we don't need to hold
3368	 * qgroup_ioctl_lock.
3369	 * This would avoid long and complex lock chain and make lockdep happy.
3370	 */
3371	spin_lock(&fs_info->trans_lock);
3372	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
3373		committing = true;
3374	spin_unlock(&fs_info->trans_lock);
3375
3376	if (!committing)
3377		mutex_lock(&fs_info->qgroup_ioctl_lock);
3378	if (!btrfs_qgroup_enabled(fs_info))
3379		goto out;
3380
3381	quota_root = fs_info->quota_root;
3382	if (!quota_root) {
3383		ret = -EINVAL;
3384		goto out;
3385	}
3386
3387	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
3388		ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
3389		if (ret)
3390			goto out;
3391		free_inherit = true;
3392	}
3393
3394	if (inherit) {
3395		i_qgroups = (u64 *)(inherit + 1);
3396		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
3397		       2 * inherit->num_excl_copies;
3398		for (int i = 0; i < nums; i++) {
3399			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
3400
3401			/*
3402			 * Zero out invalid groups so we can ignore
3403			 * them later.
3404			 */
3405			if (!srcgroup ||
3406			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
3407				*i_qgroups = 0ULL;
3408
3409			++i_qgroups;
3410		}
3411	}
3412
3413	/*
3414	 * create a tracking group for the subvol itself
3415	 */
3416	ret = add_qgroup_item(trans, quota_root, objectid);
3417	if (ret)
3418		goto out;
3419
3420	/*
3421	 * add qgroup to all inherited groups
3422	 */
3423	if (inherit) {
3424		i_qgroups = (u64 *)(inherit + 1);
3425		for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) {
3426			if (*i_qgroups == 0)
3427				continue;
3428			ret = add_qgroup_relation_item(trans, objectid,
3429						       *i_qgroups);
3430			if (ret && ret != -EEXIST)
3431				goto out;
3432			ret = add_qgroup_relation_item(trans, *i_qgroups,
3433						       objectid);
3434			if (ret && ret != -EEXIST)
3435				goto out;
3436		}
3437		ret = 0;
3438
3439		qlist_prealloc = kcalloc(inherit->num_qgroups,
3440					 sizeof(struct btrfs_qgroup_list *),
3441					 GFP_NOFS);
3442		if (!qlist_prealloc) {
3443			ret = -ENOMEM;
3444			goto out;
3445		}
3446		for (int i = 0; i < inherit->num_qgroups; i++) {
3447			qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
3448						    GFP_NOFS);
3449			if (!qlist_prealloc[i]) {
3450				ret = -ENOMEM;
3451				goto out;
3452			}
3453		}
3454	}
3455
3456	spin_lock(&fs_info->qgroup_lock);
3457
3458	dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
3459	prealloc = NULL;
3460
3461	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
3462		dstgroup->lim_flags = inherit->lim.flags;
3463		dstgroup->max_rfer = inherit->lim.max_rfer;
3464		dstgroup->max_excl = inherit->lim.max_excl;
3465		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
3466		dstgroup->rsv_excl = inherit->lim.rsv_excl;
3467
3468		qgroup_dirty(fs_info, dstgroup);
3469	}
3470
3471	if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
3472		srcgroup = find_qgroup_rb(fs_info, srcid);
3473		if (!srcgroup)
3474			goto unlock;
3475
3476		/*
3477		 * We call inherit after we clone the root in order to make sure
3478		 * our counts don't go crazy, so at this point the only
3479		 * difference between the two roots should be the root node.
3480		 */
3481		level_size = fs_info->nodesize;
3482		dstgroup->rfer = srcgroup->rfer;
3483		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
3484		dstgroup->excl = level_size;
3485		dstgroup->excl_cmpr = level_size;
3486		srcgroup->excl = level_size;
3487		srcgroup->excl_cmpr = level_size;
3488
3489		/* inherit the limit info */
3490		dstgroup->lim_flags = srcgroup->lim_flags;
3491		dstgroup->max_rfer = srcgroup->max_rfer;
3492		dstgroup->max_excl = srcgroup->max_excl;
3493		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
3494		dstgroup->rsv_excl = srcgroup->rsv_excl;
3495
3496		qgroup_dirty(fs_info, dstgroup);
3497		qgroup_dirty(fs_info, srcgroup);
3498
3499		/*
3500		 * If the source qgroup has parent but the new one doesn't,
3501		 * we need a full rescan.
3502		 */
3503		if (!inherit && !list_empty(&srcgroup->groups))
3504			need_rescan = true;
3505	}
3506
3507	if (!inherit)
3508		goto unlock;
3509
3510	i_qgroups = (u64 *)(inherit + 1);
3511	for (int i = 0; i < inherit->num_qgroups; i++) {
3512		if (*i_qgroups) {
3513			ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
3514					      *i_qgroups);
3515			qlist_prealloc[i] = NULL;
3516			if (ret)
3517				goto unlock;
3518		}
3519		if (srcid) {
3520			/* Check if we can do a quick inherit. */
3521			ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups);
3522			if (ret < 0)
3523				goto unlock;
3524			if (ret > 0)
3525				need_rescan = true;
3526			ret = 0;
3527		}
3528		++i_qgroups;
 
 
 
 
 
 
 
3529	}
3530
3531	for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) {
3532		struct btrfs_qgroup *src;
3533		struct btrfs_qgroup *dst;
3534
3535		if (!i_qgroups[0] || !i_qgroups[1])
3536			continue;
3537
3538		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3539		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3540
3541		if (!src || !dst) {
3542			ret = -EINVAL;
3543			goto unlock;
3544		}
3545
3546		dst->rfer = src->rfer - level_size;
3547		dst->rfer_cmpr = src->rfer_cmpr - level_size;
3548
3549		/* Manually tweaking numbers certainly needs a rescan */
3550		need_rescan = true;
3551	}
3552	for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) {
3553		struct btrfs_qgroup *src;
3554		struct btrfs_qgroup *dst;
3555
3556		if (!i_qgroups[0] || !i_qgroups[1])
3557			continue;
3558
3559		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3560		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3561
3562		if (!src || !dst) {
3563			ret = -EINVAL;
3564			goto unlock;
3565		}
3566
3567		dst->excl = src->excl + level_size;
3568		dst->excl_cmpr = src->excl_cmpr + level_size;
3569		need_rescan = true;
3570	}
3571
3572unlock:
3573	spin_unlock(&fs_info->qgroup_lock);
3574	if (!ret)
3575		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3576out:
3577	if (!committing)
3578		mutex_unlock(&fs_info->qgroup_ioctl_lock);
3579	if (need_rescan)
3580		qgroup_mark_inconsistent(fs_info);
3581	if (qlist_prealloc) {
3582		for (int i = 0; i < inherit->num_qgroups; i++)
3583			kfree(qlist_prealloc[i]);
3584		kfree(qlist_prealloc);
3585	}
3586	if (free_inherit)
3587		kfree(inherit);
3588	kfree(prealloc);
3589	return ret;
3590}
3591
3592static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3593{
3594	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3595	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3596		return false;
3597
3598	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3599	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3600		return false;
3601
3602	return true;
3603}
3604
3605static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3606			  enum btrfs_qgroup_rsv_type type)
3607{
3608	struct btrfs_qgroup *qgroup;
3609	struct btrfs_fs_info *fs_info = root->fs_info;
3610	u64 ref_root = btrfs_root_id(root);
3611	int ret = 0;
3612	LIST_HEAD(qgroup_list);
3613
3614	if (!is_fstree(ref_root))
3615		return 0;
3616
3617	if (num_bytes == 0)
3618		return 0;
3619
3620	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3621	    capable(CAP_SYS_RESOURCE))
3622		enforce = false;
3623
3624	spin_lock(&fs_info->qgroup_lock);
3625	if (!fs_info->quota_root)
3626		goto out;
3627
3628	qgroup = find_qgroup_rb(fs_info, ref_root);
3629	if (!qgroup)
3630		goto out;
3631
3632	qgroup_iterator_add(&qgroup_list, qgroup);
3633	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3634		struct btrfs_qgroup_list *glist;
3635
3636		if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
3637			ret = -EDQUOT;
3638			goto out;
3639		}
3640
3641		list_for_each_entry(glist, &qgroup->groups, next_group)
3642			qgroup_iterator_add(&qgroup_list, glist->group);
3643	}
3644
3645	ret = 0;
3646	/*
3647	 * no limits exceeded, now record the reservation into all qgroups
3648	 */
3649	list_for_each_entry(qgroup, &qgroup_list, iterator)
3650		qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
3651
3652out:
3653	qgroup_iterator_clean(&qgroup_list);
3654	spin_unlock(&fs_info->qgroup_lock);
3655	return ret;
3656}
3657
3658/*
3659 * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3660 * qgroup).
3661 *
3662 * Will handle all higher level qgroup too.
3663 *
3664 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3665 * This special case is only used for META_PERTRANS type.
3666 */
3667void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3668			       u64 ref_root, u64 num_bytes,
3669			       enum btrfs_qgroup_rsv_type type)
3670{
3671	struct btrfs_qgroup *qgroup;
3672	LIST_HEAD(qgroup_list);
3673
3674	if (!is_fstree(ref_root))
3675		return;
3676
3677	if (num_bytes == 0)
3678		return;
3679
3680	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3681		WARN(1, "%s: Invalid type to free", __func__);
3682		return;
3683	}
3684	spin_lock(&fs_info->qgroup_lock);
3685
3686	if (!fs_info->quota_root)
3687		goto out;
3688
3689	qgroup = find_qgroup_rb(fs_info, ref_root);
3690	if (!qgroup)
3691		goto out;
3692
3693	if (num_bytes == (u64)-1)
3694		/*
3695		 * We're freeing all pertrans rsv, get reserved value from
3696		 * level 0 qgroup as real num_bytes to free.
3697		 */
3698		num_bytes = qgroup->rsv.values[type];
3699
3700	qgroup_iterator_add(&qgroup_list, qgroup);
3701	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3702		struct btrfs_qgroup_list *glist;
3703
3704		qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
3705		list_for_each_entry(glist, &qgroup->groups, next_group) {
3706			qgroup_iterator_add(&qgroup_list, glist->group);
3707		}
3708	}
3709out:
3710	qgroup_iterator_clean(&qgroup_list);
3711	spin_unlock(&fs_info->qgroup_lock);
3712}
3713
3714/*
3715 * Check if the leaf is the last leaf. Which means all node pointers
3716 * are at their last position.
3717 */
3718static bool is_last_leaf(struct btrfs_path *path)
3719{
3720	int i;
3721
3722	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3723		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3724			return false;
3725	}
3726	return true;
3727}
3728
3729/*
3730 * returns < 0 on error, 0 when more leafs are to be scanned.
3731 * returns 1 when done.
3732 */
3733static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3734			      struct btrfs_path *path)
3735{
3736	struct btrfs_fs_info *fs_info = trans->fs_info;
3737	struct btrfs_root *extent_root;
3738	struct btrfs_key found;
3739	struct extent_buffer *scratch_leaf = NULL;
3740	u64 num_bytes;
3741	bool done;
3742	int slot;
3743	int ret;
3744
3745	if (!btrfs_qgroup_full_accounting(fs_info))
3746		return 1;
3747
3748	mutex_lock(&fs_info->qgroup_rescan_lock);
3749	extent_root = btrfs_extent_root(fs_info,
3750				fs_info->qgroup_rescan_progress.objectid);
3751	ret = btrfs_search_slot_for_read(extent_root,
3752					 &fs_info->qgroup_rescan_progress,
3753					 path, 1, 0);
3754
3755	btrfs_debug(fs_info,
3756		"current progress key (%llu %u %llu), search_slot ret %d",
3757		fs_info->qgroup_rescan_progress.objectid,
3758		fs_info->qgroup_rescan_progress.type,
3759		fs_info->qgroup_rescan_progress.offset, ret);
3760
3761	if (ret) {
3762		/*
3763		 * The rescan is about to end, we will not be scanning any
3764		 * further blocks. We cannot unset the RESCAN flag here, because
3765		 * we want to commit the transaction if everything went well.
3766		 * To make the live accounting work in this phase, we set our
3767		 * scan progress pointer such that every real extent objectid
3768		 * will be smaller.
3769		 */
3770		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3771		btrfs_release_path(path);
3772		mutex_unlock(&fs_info->qgroup_rescan_lock);
3773		return ret;
3774	}
3775	done = is_last_leaf(path);
3776
3777	btrfs_item_key_to_cpu(path->nodes[0], &found,
3778			      btrfs_header_nritems(path->nodes[0]) - 1);
3779	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3780
3781	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3782	if (!scratch_leaf) {
3783		ret = -ENOMEM;
3784		mutex_unlock(&fs_info->qgroup_rescan_lock);
3785		goto out;
3786	}
3787	slot = path->slots[0];
3788	btrfs_release_path(path);
3789	mutex_unlock(&fs_info->qgroup_rescan_lock);
3790
3791	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3792		struct btrfs_backref_walk_ctx ctx = { 0 };
3793
3794		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3795		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3796		    found.type != BTRFS_METADATA_ITEM_KEY)
3797			continue;
3798		if (found.type == BTRFS_METADATA_ITEM_KEY)
3799			num_bytes = fs_info->nodesize;
3800		else
3801			num_bytes = found.offset;
3802
3803		ctx.bytenr = found.objectid;
3804		ctx.fs_info = fs_info;
3805
3806		ret = btrfs_find_all_roots(&ctx, false);
3807		if (ret < 0)
3808			goto out;
3809		/* For rescan, just pass old_roots as NULL */
3810		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3811						  num_bytes, NULL, ctx.roots);
3812		if (ret < 0)
3813			goto out;
3814	}
3815out:
3816	if (scratch_leaf)
3817		free_extent_buffer(scratch_leaf);
3818
3819	if (done && !ret) {
3820		ret = 1;
3821		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3822	}
3823	return ret;
3824}
3825
3826static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3827{
3828	if (btrfs_fs_closing(fs_info))
3829		return true;
3830	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
3831		return true;
3832	if (!btrfs_qgroup_enabled(fs_info))
3833		return true;
3834	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3835		return true;
3836	return false;
3837}
3838
3839static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3840{
3841	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3842						     qgroup_rescan_work);
3843	struct btrfs_path *path;
3844	struct btrfs_trans_handle *trans = NULL;
 
3845	int ret = 0;
3846	bool stopped = false;
3847	bool did_leaf_rescans = false;
3848
3849	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3850		return;
3851
3852	path = btrfs_alloc_path();
3853	if (!path) {
3854		ret = -ENOMEM;
3855		goto out;
3856	}
3857	/*
3858	 * Rescan should only search for commit root, and any later difference
3859	 * should be recorded by qgroup
3860	 */
3861	path->search_commit_root = 1;
3862	path->skip_locking = 1;
3863
3864	while (!ret && !(stopped = rescan_should_stop(fs_info))) {
 
3865		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3866		if (IS_ERR(trans)) {
3867			ret = PTR_ERR(trans);
3868			break;
3869		}
3870
3871		ret = qgroup_rescan_leaf(trans, path);
3872		did_leaf_rescans = true;
3873
3874		if (ret > 0)
3875			btrfs_commit_transaction(trans);
3876		else
3877			btrfs_end_transaction(trans);
3878	}
3879
3880out:
3881	btrfs_free_path(path);
3882
3883	mutex_lock(&fs_info->qgroup_rescan_lock);
3884	if (ret > 0 &&
3885	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3886		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3887	} else if (ret < 0 || stopped) {
3888		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3889	}
3890	mutex_unlock(&fs_info->qgroup_rescan_lock);
3891
3892	/*
3893	 * Only update status, since the previous part has already updated the
3894	 * qgroup info, and only if we did any actual work. This also prevents
3895	 * race with a concurrent quota disable, which has already set
3896	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
3897	 * btrfs_quota_disable().
3898	 */
3899	if (did_leaf_rescans) {
3900		trans = btrfs_start_transaction(fs_info->quota_root, 1);
3901		if (IS_ERR(trans)) {
3902			ret = PTR_ERR(trans);
3903			trans = NULL;
3904			btrfs_err(fs_info,
3905				  "fail to start transaction for status update: %d",
3906				  ret);
3907		}
3908	} else {
3909		trans = NULL;
3910	}
3911
3912	mutex_lock(&fs_info->qgroup_rescan_lock);
3913	if (!stopped ||
3914	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3915		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3916	if (trans) {
3917		int ret2 = update_qgroup_status_item(trans);
3918
3919		if (ret2 < 0) {
3920			ret = ret2;
3921			btrfs_err(fs_info, "fail to update qgroup status: %d", ret);
3922		}
3923	}
3924	fs_info->qgroup_rescan_running = false;
3925	fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3926	complete_all(&fs_info->qgroup_rescan_completion);
3927	mutex_unlock(&fs_info->qgroup_rescan_lock);
3928
3929	if (!trans)
3930		return;
3931
3932	btrfs_end_transaction(trans);
3933
3934	if (stopped) {
3935		btrfs_info(fs_info, "qgroup scan paused");
3936	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
3937		btrfs_info(fs_info, "qgroup scan cancelled");
3938	} else if (ret >= 0) {
3939		btrfs_info(fs_info, "qgroup scan completed%s",
3940			ret > 0 ? " (inconsistency flag cleared)" : "");
3941	} else {
3942		btrfs_err(fs_info, "qgroup scan failed with %d", ret);
3943	}
3944}
3945
3946/*
3947 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3948 * memory required for the rescan context.
3949 */
3950static int
3951qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3952		   int init_flags)
3953{
3954	int ret = 0;
3955
3956	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3957		btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
3958		return -EINVAL;
3959	}
3960
3961	if (!init_flags) {
3962		/* we're resuming qgroup rescan at mount time */
3963		if (!(fs_info->qgroup_flags &
3964		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3965			btrfs_debug(fs_info,
3966			"qgroup rescan init failed, qgroup rescan is not queued");
3967			ret = -EINVAL;
3968		} else if (!(fs_info->qgroup_flags &
3969			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3970			btrfs_debug(fs_info,
3971			"qgroup rescan init failed, qgroup is not enabled");
3972			ret = -ENOTCONN;
3973		}
3974
3975		if (ret)
3976			return ret;
3977	}
3978
3979	mutex_lock(&fs_info->qgroup_rescan_lock);
3980
3981	if (init_flags) {
3982		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
 
 
3983			ret = -EINPROGRESS;
3984		} else if (!(fs_info->qgroup_flags &
3985			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3986			btrfs_debug(fs_info,
3987			"qgroup rescan init failed, qgroup is not enabled");
3988			ret = -ENOTCONN;
3989		} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
3990			/* Quota disable is in progress */
3991			ret = -EBUSY;
3992		}
3993
3994		if (ret) {
3995			mutex_unlock(&fs_info->qgroup_rescan_lock);
3996			return ret;
3997		}
3998		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3999	}
4000
4001	memset(&fs_info->qgroup_rescan_progress, 0,
4002		sizeof(fs_info->qgroup_rescan_progress));
4003	fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
4004				   BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
4005	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
4006	init_completion(&fs_info->qgroup_rescan_completion);
4007	mutex_unlock(&fs_info->qgroup_rescan_lock);
4008
4009	btrfs_init_work(&fs_info->qgroup_rescan_work,
4010			btrfs_qgroup_rescan_worker, NULL);
4011	return 0;
4012}
4013
4014static void
4015qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
4016{
4017	struct rb_node *n;
4018	struct btrfs_qgroup *qgroup;
4019
4020	spin_lock(&fs_info->qgroup_lock);
4021	/* clear all current qgroup tracking information */
4022	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
4023		qgroup = rb_entry(n, struct btrfs_qgroup, node);
4024		qgroup->rfer = 0;
4025		qgroup->rfer_cmpr = 0;
4026		qgroup->excl = 0;
4027		qgroup->excl_cmpr = 0;
4028		qgroup_dirty(fs_info, qgroup);
4029	}
4030	spin_unlock(&fs_info->qgroup_lock);
4031}
4032
4033int
4034btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
4035{
4036	int ret = 0;
 
4037
4038	ret = qgroup_rescan_init(fs_info, 0, 1);
4039	if (ret)
4040		return ret;
4041
4042	/*
4043	 * We have set the rescan_progress to 0, which means no more
4044	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
4045	 * However, btrfs_qgroup_account_ref may be right after its call
4046	 * to btrfs_find_all_roots, in which case it would still do the
4047	 * accounting.
4048	 * To solve this, we're committing the transaction, which will
4049	 * ensure we run all delayed refs and only after that, we are
4050	 * going to clear all tracking information for a clean start.
4051	 */
4052
4053	ret = btrfs_commit_current_transaction(fs_info->fs_root);
4054	if (ret) {
4055		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
4056		return ret;
 
 
 
 
 
 
4057	}
4058
4059	qgroup_rescan_zero_tracking(fs_info);
4060
4061	mutex_lock(&fs_info->qgroup_rescan_lock);
4062	fs_info->qgroup_rescan_running = true;
4063	btrfs_queue_work(fs_info->qgroup_rescan_workers,
4064			 &fs_info->qgroup_rescan_work);
4065	mutex_unlock(&fs_info->qgroup_rescan_lock);
4066
4067	return 0;
4068}
4069
4070int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
4071				     bool interruptible)
4072{
4073	int running;
4074	int ret = 0;
4075
4076	mutex_lock(&fs_info->qgroup_rescan_lock);
4077	running = fs_info->qgroup_rescan_running;
4078	mutex_unlock(&fs_info->qgroup_rescan_lock);
4079
4080	if (!running)
4081		return 0;
4082
4083	if (interruptible)
4084		ret = wait_for_completion_interruptible(
4085					&fs_info->qgroup_rescan_completion);
4086	else
4087		wait_for_completion(&fs_info->qgroup_rescan_completion);
4088
4089	return ret;
4090}
4091
4092/*
4093 * this is only called from open_ctree where we're still single threaded, thus
4094 * locking is omitted here.
4095 */
4096void
4097btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
4098{
4099	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4100		mutex_lock(&fs_info->qgroup_rescan_lock);
4101		fs_info->qgroup_rescan_running = true;
4102		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4103				 &fs_info->qgroup_rescan_work);
4104		mutex_unlock(&fs_info->qgroup_rescan_lock);
4105	}
4106}
4107
4108#define rbtree_iterate_from_safe(node, next, start)				\
4109       for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
4110
4111static int qgroup_unreserve_range(struct btrfs_inode *inode,
4112				  struct extent_changeset *reserved, u64 start,
4113				  u64 len)
4114{
4115	struct rb_node *node;
4116	struct rb_node *next;
4117	struct ulist_node *entry;
4118	int ret = 0;
4119
4120	node = reserved->range_changed.root.rb_node;
4121	if (!node)
4122		return 0;
4123	while (node) {
4124		entry = rb_entry(node, struct ulist_node, rb_node);
4125		if (entry->val < start)
4126			node = node->rb_right;
4127		else
4128			node = node->rb_left;
4129	}
4130
4131	if (entry->val > start && rb_prev(&entry->rb_node))
4132		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
4133				 rb_node);
4134
4135	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
4136		u64 entry_start;
4137		u64 entry_end;
4138		u64 entry_len;
4139		int clear_ret;
4140
4141		entry = rb_entry(node, struct ulist_node, rb_node);
4142		entry_start = entry->val;
4143		entry_end = entry->aux;
4144		entry_len = entry_end - entry_start + 1;
4145
4146		if (entry_start >= start + len)
4147			break;
4148		if (entry_start + entry_len <= start)
4149			continue;
4150		/*
4151		 * Now the entry is in [start, start + len), revert the
4152		 * EXTENT_QGROUP_RESERVED bit.
4153		 */
4154		clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
4155					      entry_end, EXTENT_QGROUP_RESERVED);
4156		if (!ret && clear_ret < 0)
4157			ret = clear_ret;
4158
4159		ulist_del(&reserved->range_changed, entry->val, entry->aux);
4160		if (likely(reserved->bytes_changed >= entry_len)) {
4161			reserved->bytes_changed -= entry_len;
4162		} else {
4163			WARN_ON(1);
4164			reserved->bytes_changed = 0;
4165		}
4166	}
4167
4168	return ret;
4169}
4170
4171/*
4172 * Try to free some space for qgroup.
4173 *
4174 * For qgroup, there are only 3 ways to free qgroup space:
4175 * - Flush nodatacow write
4176 *   Any nodatacow write will free its reserved data space at run_delalloc_range().
4177 *   In theory, we should only flush nodatacow inodes, but it's not yet
4178 *   possible, so we need to flush the whole root.
4179 *
4180 * - Wait for ordered extents
4181 *   When ordered extents are finished, their reserved metadata is finally
4182 *   converted to per_trans status, which can be freed by later commit
4183 *   transaction.
4184 *
4185 * - Commit transaction
4186 *   This would free the meta_per_trans space.
4187 *   In theory this shouldn't provide much space, but any more qgroup space
4188 *   is needed.
4189 */
4190static int try_flush_qgroup(struct btrfs_root *root)
4191{
 
4192	int ret;
4193
4194	/* Can't hold an open transaction or we run the risk of deadlocking. */
4195	ASSERT(current->journal_info == NULL);
4196	if (WARN_ON(current->journal_info))
4197		return 0;
4198
4199	/*
4200	 * We don't want to run flush again and again, so if there is a running
4201	 * one, we won't try to start a new flush, but exit directly.
4202	 */
4203	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
4204		wait_event(root->qgroup_flush_wait,
4205			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
4206		return 0;
4207	}
4208
4209	ret = btrfs_start_delalloc_snapshot(root, true);
4210	if (ret < 0)
4211		goto out;
4212	btrfs_wait_ordered_extents(root, U64_MAX, NULL);
4213
4214	/*
4215	 * After waiting for ordered extents run delayed iputs in order to free
4216	 * space from unlinked files before committing the current transaction,
4217	 * as ordered extents may have been holding the last reference of an
4218	 * inode and they add a delayed iput when they complete.
4219	 */
4220	btrfs_run_delayed_iputs(root->fs_info);
4221	btrfs_wait_on_delayed_iputs(root->fs_info);
4222
4223	ret = btrfs_commit_current_transaction(root);
4224out:
4225	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
4226	wake_up(&root->qgroup_flush_wait);
4227	return ret;
4228}
4229
4230static int qgroup_reserve_data(struct btrfs_inode *inode,
4231			struct extent_changeset **reserved_ret, u64 start,
4232			u64 len)
4233{
4234	struct btrfs_root *root = inode->root;
4235	struct extent_changeset *reserved;
4236	bool new_reserved = false;
4237	u64 orig_reserved;
4238	u64 to_reserve;
4239	int ret;
4240
4241	if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4242	    !is_fstree(btrfs_root_id(root)) || len == 0)
4243		return 0;
4244
4245	/* @reserved parameter is mandatory for qgroup */
4246	if (WARN_ON(!reserved_ret))
4247		return -EINVAL;
4248	if (!*reserved_ret) {
4249		new_reserved = true;
4250		*reserved_ret = extent_changeset_alloc();
4251		if (!*reserved_ret)
4252			return -ENOMEM;
4253	}
4254	reserved = *reserved_ret;
4255	/* Record already reserved space */
4256	orig_reserved = reserved->bytes_changed;
4257	ret = set_record_extent_bits(&inode->io_tree, start,
4258			start + len -1, EXTENT_QGROUP_RESERVED, reserved);
4259
4260	/* Newly reserved space */
4261	to_reserve = reserved->bytes_changed - orig_reserved;
4262	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
4263					to_reserve, QGROUP_RESERVE);
4264	if (ret < 0)
4265		goto out;
4266	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
4267	if (ret < 0)
4268		goto cleanup;
4269
4270	return ret;
4271
4272cleanup:
4273	qgroup_unreserve_range(inode, reserved, start, len);
4274out:
4275	if (new_reserved) {
4276		extent_changeset_free(reserved);
4277		*reserved_ret = NULL;
4278	}
4279	return ret;
4280}
4281
4282/*
4283 * Reserve qgroup space for range [start, start + len).
4284 *
4285 * This function will either reserve space from related qgroups or do nothing
4286 * if the range is already reserved.
4287 *
4288 * Return 0 for successful reservation
4289 * Return <0 for error (including -EQUOT)
4290 *
4291 * NOTE: This function may sleep for memory allocation, dirty page flushing and
4292 *	 commit transaction. So caller should not hold any dirty page locked.
4293 */
4294int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
4295			struct extent_changeset **reserved_ret, u64 start,
4296			u64 len)
4297{
4298	int ret;
4299
4300	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
4301	if (ret <= 0 && ret != -EDQUOT)
4302		return ret;
4303
4304	ret = try_flush_qgroup(inode->root);
4305	if (ret < 0)
4306		return ret;
4307	return qgroup_reserve_data(inode, reserved_ret, start, len);
4308}
4309
4310/* Free ranges specified by @reserved, normally in error path */
4311static int qgroup_free_reserved_data(struct btrfs_inode *inode,
4312				     struct extent_changeset *reserved,
4313				     u64 start, u64 len, u64 *freed_ret)
4314{
4315	struct btrfs_root *root = inode->root;
4316	struct ulist_node *unode;
4317	struct ulist_iterator uiter;
4318	struct extent_changeset changeset;
4319	u64 freed = 0;
4320	int ret;
4321
4322	extent_changeset_init(&changeset);
4323	len = round_up(start + len, root->fs_info->sectorsize);
4324	start = round_down(start, root->fs_info->sectorsize);
4325
4326	ULIST_ITER_INIT(&uiter);
4327	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
4328		u64 range_start = unode->val;
4329		/* unode->aux is the inclusive end */
4330		u64 range_len = unode->aux - range_start + 1;
4331		u64 free_start;
4332		u64 free_len;
4333
4334		extent_changeset_release(&changeset);
4335
4336		/* Only free range in range [start, start + len) */
4337		if (range_start >= start + len ||
4338		    range_start + range_len <= start)
4339			continue;
4340		free_start = max(range_start, start);
4341		free_len = min(start + len, range_start + range_len) -
4342			   free_start;
4343		/*
4344		 * TODO: To also modify reserved->ranges_reserved to reflect
4345		 * the modification.
4346		 *
4347		 * However as long as we free qgroup reserved according to
4348		 * EXTENT_QGROUP_RESERVED, we won't double free.
4349		 * So not need to rush.
4350		 */
4351		ret = clear_record_extent_bits(&inode->io_tree, free_start,
4352				free_start + free_len - 1,
4353				EXTENT_QGROUP_RESERVED, &changeset);
4354		if (ret < 0)
4355			goto out;
4356		freed += changeset.bytes_changed;
4357	}
4358	btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed,
4359				  BTRFS_QGROUP_RSV_DATA);
4360	if (freed_ret)
4361		*freed_ret = freed;
4362	ret = 0;
4363out:
4364	extent_changeset_release(&changeset);
4365	return ret;
4366}
4367
4368static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
4369			struct extent_changeset *reserved, u64 start, u64 len,
4370			u64 *released, int free)
4371{
4372	struct extent_changeset changeset;
4373	int trace_op = QGROUP_RELEASE;
4374	int ret;
4375
4376	if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
 
4377		return clear_record_extent_bits(&inode->io_tree, start,
4378						start + len - 1,
4379						EXTENT_QGROUP_RESERVED, NULL);
4380	}
4381
4382	/* In release case, we shouldn't have @reserved */
4383	WARN_ON(!free && reserved);
4384	if (free && reserved)
4385		return qgroup_free_reserved_data(inode, reserved, start, len, released);
4386	extent_changeset_init(&changeset);
4387	ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
4388				       EXTENT_QGROUP_RESERVED, &changeset);
4389	if (ret < 0)
4390		goto out;
4391
4392	if (free)
4393		trace_op = QGROUP_FREE;
4394	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
4395					changeset.bytes_changed, trace_op);
4396	if (free)
4397		btrfs_qgroup_free_refroot(inode->root->fs_info,
4398				btrfs_root_id(inode->root),
4399				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4400	if (released)
4401		*released = changeset.bytes_changed;
4402out:
4403	extent_changeset_release(&changeset);
4404	return ret;
4405}
4406
4407/*
4408 * Free a reserved space range from io_tree and related qgroups
4409 *
4410 * Should be called when a range of pages get invalidated before reaching disk.
4411 * Or for error cleanup case.
4412 * if @reserved is given, only reserved range in [@start, @start + @len) will
4413 * be freed.
4414 *
4415 * For data written to disk, use btrfs_qgroup_release_data().
4416 *
4417 * NOTE: This function may sleep for memory allocation.
4418 */
4419int btrfs_qgroup_free_data(struct btrfs_inode *inode,
4420			   struct extent_changeset *reserved,
4421			   u64 start, u64 len, u64 *freed)
4422{
4423	return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
4424}
4425
4426/*
4427 * Release a reserved space range from io_tree only.
4428 *
4429 * Should be called when a range of pages get written to disk and corresponding
4430 * FILE_EXTENT is inserted into corresponding root.
4431 *
4432 * Since new qgroup accounting framework will only update qgroup numbers at
4433 * commit_transaction() time, its reserved space shouldn't be freed from
4434 * related qgroups.
4435 *
4436 * But we should release the range from io_tree, to allow further write to be
4437 * COWed.
4438 *
4439 * NOTE: This function may sleep for memory allocation.
4440 */
4441int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
4442{
4443	return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
4444}
4445
4446static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4447			      enum btrfs_qgroup_rsv_type type)
4448{
4449	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4450	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4451		return;
4452	if (num_bytes == 0)
4453		return;
4454
4455	spin_lock(&root->qgroup_meta_rsv_lock);
4456	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
4457		root->qgroup_meta_rsv_prealloc += num_bytes;
4458	else
4459		root->qgroup_meta_rsv_pertrans += num_bytes;
4460	spin_unlock(&root->qgroup_meta_rsv_lock);
4461}
4462
4463static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4464			     enum btrfs_qgroup_rsv_type type)
4465{
4466	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4467	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4468		return 0;
4469	if (num_bytes == 0)
4470		return 0;
4471
4472	spin_lock(&root->qgroup_meta_rsv_lock);
4473	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
4474		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
4475				  num_bytes);
4476		root->qgroup_meta_rsv_prealloc -= num_bytes;
4477	} else {
4478		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
4479				  num_bytes);
4480		root->qgroup_meta_rsv_pertrans -= num_bytes;
4481	}
4482	spin_unlock(&root->qgroup_meta_rsv_lock);
4483	return num_bytes;
4484}
4485
4486int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4487			      enum btrfs_qgroup_rsv_type type, bool enforce)
4488{
4489	struct btrfs_fs_info *fs_info = root->fs_info;
4490	int ret;
4491
4492	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4493	    !is_fstree(btrfs_root_id(root)) || num_bytes == 0)
4494		return 0;
4495
4496	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4497	trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
4498	ret = qgroup_reserve(root, num_bytes, enforce, type);
4499	if (ret < 0)
4500		return ret;
4501	/*
4502	 * Record what we have reserved into root.
4503	 *
4504	 * To avoid quota disabled->enabled underflow.
4505	 * In that case, we may try to free space we haven't reserved
4506	 * (since quota was disabled), so record what we reserved into root.
4507	 * And ensure later release won't underflow this number.
4508	 */
4509	add_root_meta_rsv(root, num_bytes, type);
4510	return ret;
4511}
4512
4513int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4514				enum btrfs_qgroup_rsv_type type, bool enforce,
4515				bool noflush)
4516{
4517	int ret;
4518
4519	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4520	if ((ret <= 0 && ret != -EDQUOT) || noflush)
4521		return ret;
4522
4523	ret = try_flush_qgroup(root);
4524	if (ret < 0)
4525		return ret;
4526	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4527}
4528
4529/*
4530 * Per-transaction meta reservation should be all freed at transaction commit
4531 * time
4532 */
4533void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
4534{
4535	struct btrfs_fs_info *fs_info = root->fs_info;
4536
4537	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4538	    !is_fstree(btrfs_root_id(root)))
4539		return;
4540
4541	/* TODO: Update trace point to handle such free */
4542	trace_qgroup_meta_free_all_pertrans(root);
4543	/* Special value -1 means to free all reserved space */
4544	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
4545				  BTRFS_QGROUP_RSV_META_PERTRANS);
4546}
4547
4548void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
4549			      enum btrfs_qgroup_rsv_type type)
4550{
4551	struct btrfs_fs_info *fs_info = root->fs_info;
4552
4553	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4554	    !is_fstree(btrfs_root_id(root)))
4555		return;
4556
4557	/*
4558	 * reservation for META_PREALLOC can happen before quota is enabled,
4559	 * which can lead to underflow.
4560	 * Here ensure we will only free what we really have reserved.
4561	 */
4562	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4563	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4564	trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4565	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
 
4566}
4567
4568static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4569				int num_bytes)
4570{
4571	struct btrfs_qgroup *qgroup;
4572	LIST_HEAD(qgroup_list);
4573
4574	if (num_bytes == 0)
4575		return;
4576	if (!fs_info->quota_root)
4577		return;
4578
4579	spin_lock(&fs_info->qgroup_lock);
4580	qgroup = find_qgroup_rb(fs_info, ref_root);
4581	if (!qgroup)
4582		goto out;
4583
4584	qgroup_iterator_add(&qgroup_list, qgroup);
4585	list_for_each_entry(qgroup, &qgroup_list, iterator) {
4586		struct btrfs_qgroup_list *glist;
4587
4588		qgroup_rsv_release(fs_info, qgroup, num_bytes,
4589				BTRFS_QGROUP_RSV_META_PREALLOC);
4590		if (!sb_rdonly(fs_info->sb))
4591			qgroup_rsv_add(fs_info, qgroup, num_bytes,
4592				       BTRFS_QGROUP_RSV_META_PERTRANS);
4593
4594		list_for_each_entry(glist, &qgroup->groups, next_group)
4595			qgroup_iterator_add(&qgroup_list, glist->group);
4596	}
4597out:
4598	qgroup_iterator_clean(&qgroup_list);
4599	spin_unlock(&fs_info->qgroup_lock);
4600}
4601
4602/*
4603 * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
4604 *
4605 * This is called when preallocated meta reservation needs to be used.
4606 * Normally after btrfs_join_transaction() call.
4607 */
4608void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4609{
4610	struct btrfs_fs_info *fs_info = root->fs_info;
4611
4612	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4613	    !is_fstree(btrfs_root_id(root)))
4614		return;
4615	/* Same as btrfs_qgroup_free_meta_prealloc() */
4616	num_bytes = sub_root_meta_rsv(root, num_bytes,
4617				      BTRFS_QGROUP_RSV_META_PREALLOC);
4618	trace_qgroup_meta_convert(root, num_bytes);
4619	qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
4620	if (!sb_rdonly(fs_info->sb))
4621		add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
4622}
4623
4624/*
4625 * Check qgroup reserved space leaking, normally at destroy inode
4626 * time
4627 */
4628void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4629{
4630	struct extent_changeset changeset;
4631	struct ulist_node *unode;
4632	struct ulist_iterator iter;
4633	int ret;
4634
4635	extent_changeset_init(&changeset);
4636	ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4637			EXTENT_QGROUP_RESERVED, &changeset);
4638
4639	WARN_ON(ret < 0);
4640	if (WARN_ON(changeset.bytes_changed)) {
4641		ULIST_ITER_INIT(&iter);
4642		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4643			btrfs_warn(inode->root->fs_info,
4644		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4645				btrfs_ino(inode), unode->val, unode->aux);
4646		}
4647		btrfs_qgroup_free_refroot(inode->root->fs_info,
4648				btrfs_root_id(inode->root),
4649				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4650
4651	}
4652	extent_changeset_release(&changeset);
4653}
4654
4655void btrfs_qgroup_init_swapped_blocks(
4656	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4657{
4658	int i;
4659
4660	spin_lock_init(&swapped_blocks->lock);
4661	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4662		swapped_blocks->blocks[i] = RB_ROOT;
4663	swapped_blocks->swapped = false;
4664}
4665
4666/*
4667 * Delete all swapped blocks record of @root.
4668 * Every record here means we skipped a full subtree scan for qgroup.
4669 *
4670 * Gets called when committing one transaction.
4671 */
4672void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4673{
4674	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4675	int i;
4676
4677	swapped_blocks = &root->swapped_blocks;
4678
4679	spin_lock(&swapped_blocks->lock);
4680	if (!swapped_blocks->swapped)
4681		goto out;
4682	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4683		struct rb_root *cur_root = &swapped_blocks->blocks[i];
4684		struct btrfs_qgroup_swapped_block *entry;
4685		struct btrfs_qgroup_swapped_block *next;
4686
4687		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4688						     node)
4689			kfree(entry);
4690		swapped_blocks->blocks[i] = RB_ROOT;
4691	}
4692	swapped_blocks->swapped = false;
4693out:
4694	spin_unlock(&swapped_blocks->lock);
4695}
4696
4697/*
4698 * Add subtree roots record into @subvol_root.
4699 *
4700 * @subvol_root:	tree root of the subvolume tree get swapped
4701 * @bg:			block group under balance
4702 * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4703 * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4704 *			BOTH POINTERS ARE BEFORE TREE SWAP
4705 * @last_snapshot:	last snapshot generation of the subvolume tree
4706 */
4707int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
 
4708		struct btrfs_block_group *bg,
4709		struct extent_buffer *subvol_parent, int subvol_slot,
4710		struct extent_buffer *reloc_parent, int reloc_slot,
4711		u64 last_snapshot)
4712{
4713	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4714	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4715	struct btrfs_qgroup_swapped_block *block;
4716	struct rb_node **cur;
4717	struct rb_node *parent = NULL;
4718	int level = btrfs_header_level(subvol_parent) - 1;
4719	int ret = 0;
4720
4721	if (!btrfs_qgroup_full_accounting(fs_info))
4722		return 0;
4723
4724	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4725	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4726		btrfs_err_rl(fs_info,
4727		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4728			__func__,
4729			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4730			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4731		return -EUCLEAN;
4732	}
4733
4734	block = kmalloc(sizeof(*block), GFP_NOFS);
4735	if (!block) {
4736		ret = -ENOMEM;
4737		goto out;
4738	}
4739
4740	/*
4741	 * @reloc_parent/slot is still before swap, while @block is going to
4742	 * record the bytenr after swap, so we do the swap here.
4743	 */
4744	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4745	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4746							     reloc_slot);
4747	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4748	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4749							    subvol_slot);
4750	block->last_snapshot = last_snapshot;
4751	block->level = level;
4752
4753	/*
4754	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4755	 * no one else can modify tree blocks thus we qgroup will not change
4756	 * no matter the value of trace_leaf.
4757	 */
4758	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4759		block->trace_leaf = true;
4760	else
4761		block->trace_leaf = false;
4762	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4763
4764	/* Insert @block into @blocks */
4765	spin_lock(&blocks->lock);
4766	cur = &blocks->blocks[level].rb_node;
4767	while (*cur) {
4768		struct btrfs_qgroup_swapped_block *entry;
4769
4770		parent = *cur;
4771		entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
4772				 node);
4773
4774		if (entry->subvol_bytenr < block->subvol_bytenr) {
4775			cur = &(*cur)->rb_left;
4776		} else if (entry->subvol_bytenr > block->subvol_bytenr) {
4777			cur = &(*cur)->rb_right;
4778		} else {
4779			if (entry->subvol_generation !=
4780					block->subvol_generation ||
4781			    entry->reloc_bytenr != block->reloc_bytenr ||
4782			    entry->reloc_generation !=
4783					block->reloc_generation) {
4784				/*
4785				 * Duplicated but mismatch entry found.
4786				 * Shouldn't happen.
4787				 *
4788				 * Marking qgroup inconsistent should be enough
4789				 * for end users.
4790				 */
4791				WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4792				ret = -EEXIST;
4793			}
4794			kfree(block);
4795			goto out_unlock;
4796		}
4797	}
4798	rb_link_node(&block->node, parent, cur);
4799	rb_insert_color(&block->node, &blocks->blocks[level]);
4800	blocks->swapped = true;
4801out_unlock:
4802	spin_unlock(&blocks->lock);
4803out:
4804	if (ret < 0)
4805		qgroup_mark_inconsistent(fs_info);
4806	return ret;
4807}
4808
4809/*
4810 * Check if the tree block is a subtree root, and if so do the needed
4811 * delayed subtree trace for qgroup.
4812 *
4813 * This is called during btrfs_cow_block().
4814 */
4815int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4816					 struct btrfs_root *root,
4817					 struct extent_buffer *subvol_eb)
4818{
4819	struct btrfs_fs_info *fs_info = root->fs_info;
4820	struct btrfs_tree_parent_check check = { 0 };
4821	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4822	struct btrfs_qgroup_swapped_block *block;
4823	struct extent_buffer *reloc_eb = NULL;
4824	struct rb_node *node;
4825	bool found = false;
4826	bool swapped = false;
4827	int level = btrfs_header_level(subvol_eb);
4828	int ret = 0;
4829	int i;
4830
4831	if (!btrfs_qgroup_full_accounting(fs_info))
4832		return 0;
4833	if (!is_fstree(btrfs_root_id(root)) || !root->reloc_root)
4834		return 0;
4835
4836	spin_lock(&blocks->lock);
4837	if (!blocks->swapped) {
4838		spin_unlock(&blocks->lock);
4839		return 0;
4840	}
4841	node = blocks->blocks[level].rb_node;
4842
4843	while (node) {
4844		block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4845		if (block->subvol_bytenr < subvol_eb->start) {
4846			node = node->rb_left;
4847		} else if (block->subvol_bytenr > subvol_eb->start) {
4848			node = node->rb_right;
4849		} else {
4850			found = true;
4851			break;
4852		}
4853	}
4854	if (!found) {
4855		spin_unlock(&blocks->lock);
4856		goto out;
4857	}
4858	/* Found one, remove it from @blocks first and update blocks->swapped */
4859	rb_erase(&block->node, &blocks->blocks[level]);
4860	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4861		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4862			swapped = true;
4863			break;
4864		}
4865	}
4866	blocks->swapped = swapped;
4867	spin_unlock(&blocks->lock);
4868
4869	check.level = block->level;
4870	check.transid = block->reloc_generation;
4871	check.has_first_key = true;
4872	memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
4873
4874	/* Read out reloc subtree root */
4875	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
4876	if (IS_ERR(reloc_eb)) {
4877		ret = PTR_ERR(reloc_eb);
4878		reloc_eb = NULL;
4879		goto free_out;
4880	}
4881	if (!extent_buffer_uptodate(reloc_eb)) {
4882		ret = -EIO;
4883		goto free_out;
4884	}
4885
4886	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4887			block->last_snapshot, block->trace_leaf);
4888free_out:
4889	kfree(block);
4890	free_extent_buffer(reloc_eb);
4891out:
4892	if (ret < 0) {
4893		btrfs_err_rl(fs_info,
4894			     "failed to account subtree at bytenr %llu: %d",
4895			     subvol_eb->start, ret);
4896		qgroup_mark_inconsistent(fs_info);
4897	}
4898	return ret;
4899}
4900
4901void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4902{
4903	struct btrfs_qgroup_extent_record *entry;
4904	unsigned long index;
 
4905
4906	xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
 
4907		ulist_free(entry->old_roots);
4908		kfree(entry);
4909	}
4910	xa_destroy(&trans->delayed_refs.dirty_extents);
 
 
 
 
 
 
 
 
 
 
 
4911}
4912
4913int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
4914			      const struct btrfs_squota_delta *delta)
4915{
4916	int ret;
4917	struct btrfs_qgroup *qgroup;
4918	struct btrfs_qgroup *qg;
4919	LIST_HEAD(qgroup_list);
4920	u64 root = delta->root;
4921	u64 num_bytes = delta->num_bytes;
4922	const int sign = (delta->is_inc ? 1 : -1);
4923
4924	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4925		return 0;
4926
4927	if (!is_fstree(root))
4928		return 0;
4929
4930	/* If the extent predates enabling quotas, don't count it. */
4931	if (delta->generation < fs_info->qgroup_enable_gen)
4932		return 0;
4933
4934	spin_lock(&fs_info->qgroup_lock);
4935	qgroup = find_qgroup_rb(fs_info, root);
4936	if (!qgroup) {
4937		ret = -ENOENT;
4938		goto out;
4939	}
4940
4941	ret = 0;
4942	qgroup_iterator_add(&qgroup_list, qgroup);
4943	list_for_each_entry(qg, &qgroup_list, iterator) {
4944		struct btrfs_qgroup_list *glist;
4945
4946		qg->excl += num_bytes * sign;
4947		qg->rfer += num_bytes * sign;
4948		qgroup_dirty(fs_info, qg);
4949
4950		list_for_each_entry(glist, &qg->groups, next_group)
4951			qgroup_iterator_add(&qgroup_list, glist->group);
4952	}
4953	qgroup_iterator_clean(&qgroup_list);
4954
4955out:
4956	spin_unlock(&fs_info->qgroup_lock);
4957	return ret;
4958}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/pagemap.h>
   8#include <linux/writeback.h>
   9#include <linux/blkdev.h>
  10#include <linux/rbtree.h>
  11#include <linux/slab.h>
  12#include <linux/workqueue.h>
  13#include <linux/btrfs.h>
  14#include <linux/sched/mm.h>
  15
  16#include "ctree.h"
  17#include "transaction.h"
  18#include "disk-io.h"
  19#include "locking.h"
  20#include "ulist.h"
  21#include "backref.h"
  22#include "extent_io.h"
  23#include "qgroup.h"
  24#include "block-group.h"
  25#include "sysfs.h"
  26#include "tree-mod-log.h"
  27#include "fs.h"
  28#include "accessors.h"
  29#include "extent-tree.h"
  30#include "root-tree.h"
  31#include "tree-checker.h"
  32
  33enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info)
  34{
  35	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
  36		return BTRFS_QGROUP_MODE_DISABLED;
  37	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
  38		return BTRFS_QGROUP_MODE_SIMPLE;
  39	return BTRFS_QGROUP_MODE_FULL;
  40}
  41
  42bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info)
  43{
  44	return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
  45}
  46
  47bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info)
  48{
  49	return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
  50}
  51
  52/*
  53 * Helpers to access qgroup reservation
  54 *
  55 * Callers should ensure the lock context and type are valid
  56 */
  57
  58static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
  59{
  60	u64 ret = 0;
  61	int i;
  62
  63	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
  64		ret += qgroup->rsv.values[i];
  65
  66	return ret;
  67}
  68
  69#ifdef CONFIG_BTRFS_DEBUG
  70static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
  71{
  72	if (type == BTRFS_QGROUP_RSV_DATA)
  73		return "data";
  74	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
  75		return "meta_pertrans";
  76	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
  77		return "meta_prealloc";
  78	return NULL;
  79}
  80#endif
  81
  82static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
  83			   struct btrfs_qgroup *qgroup, u64 num_bytes,
  84			   enum btrfs_qgroup_rsv_type type)
  85{
  86	trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
  87	qgroup->rsv.values[type] += num_bytes;
  88}
  89
  90static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
  91			       struct btrfs_qgroup *qgroup, u64 num_bytes,
  92			       enum btrfs_qgroup_rsv_type type)
  93{
  94	trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
  95	if (qgroup->rsv.values[type] >= num_bytes) {
  96		qgroup->rsv.values[type] -= num_bytes;
  97		return;
  98	}
  99#ifdef CONFIG_BTRFS_DEBUG
 100	WARN_RATELIMIT(1,
 101		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
 102		qgroup->qgroupid, qgroup_rsv_type_str(type),
 103		qgroup->rsv.values[type], num_bytes);
 104#endif
 105	qgroup->rsv.values[type] = 0;
 106}
 107
 108static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
 109				     struct btrfs_qgroup *dest,
 110				     struct btrfs_qgroup *src)
 111{
 112	int i;
 113
 114	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
 115		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
 116}
 117
 118static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
 119					 struct btrfs_qgroup *dest,
 120					  struct btrfs_qgroup *src)
 121{
 122	int i;
 123
 124	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
 125		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
 126}
 127
 128static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
 129					   int mod)
 130{
 131	if (qg->old_refcnt < seq)
 132		qg->old_refcnt = seq;
 133	qg->old_refcnt += mod;
 134}
 135
 136static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
 137					   int mod)
 138{
 139	if (qg->new_refcnt < seq)
 140		qg->new_refcnt = seq;
 141	qg->new_refcnt += mod;
 142}
 143
 144static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
 145{
 146	if (qg->old_refcnt < seq)
 147		return 0;
 148	return qg->old_refcnt - seq;
 149}
 150
 151static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
 152{
 153	if (qg->new_refcnt < seq)
 154		return 0;
 155	return qg->new_refcnt - seq;
 156}
 157
 158/*
 159 * glue structure to represent the relations between qgroups.
 160 */
 161struct btrfs_qgroup_list {
 162	struct list_head next_group;
 163	struct list_head next_member;
 164	struct btrfs_qgroup *group;
 165	struct btrfs_qgroup *member;
 166};
 167
 168static int
 169qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
 170		   int init_flags);
 171static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
 172
 173/* must be called with qgroup_ioctl_lock held */
 174static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
 175					   u64 qgroupid)
 176{
 177	struct rb_node *n = fs_info->qgroup_tree.rb_node;
 178	struct btrfs_qgroup *qgroup;
 179
 180	while (n) {
 181		qgroup = rb_entry(n, struct btrfs_qgroup, node);
 182		if (qgroup->qgroupid < qgroupid)
 183			n = n->rb_left;
 184		else if (qgroup->qgroupid > qgroupid)
 185			n = n->rb_right;
 186		else
 187			return qgroup;
 188	}
 189	return NULL;
 190}
 191
 192/*
 193 * Add qgroup to the filesystem's qgroup tree.
 194 *
 195 * Must be called with qgroup_lock held and @prealloc preallocated.
 196 *
 197 * The control on the lifespan of @prealloc would be transferred to this
 198 * function, thus caller should no longer touch @prealloc.
 199 */
 200static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
 201					  struct btrfs_qgroup *prealloc,
 202					  u64 qgroupid)
 203{
 204	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
 205	struct rb_node *parent = NULL;
 206	struct btrfs_qgroup *qgroup;
 207
 208	/* Caller must have pre-allocated @prealloc. */
 209	ASSERT(prealloc);
 210
 211	while (*p) {
 212		parent = *p;
 213		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
 214
 215		if (qgroup->qgroupid < qgroupid) {
 216			p = &(*p)->rb_left;
 217		} else if (qgroup->qgroupid > qgroupid) {
 218			p = &(*p)->rb_right;
 219		} else {
 220			kfree(prealloc);
 221			return qgroup;
 222		}
 223	}
 224
 225	qgroup = prealloc;
 226	qgroup->qgroupid = qgroupid;
 227	INIT_LIST_HEAD(&qgroup->groups);
 228	INIT_LIST_HEAD(&qgroup->members);
 229	INIT_LIST_HEAD(&qgroup->dirty);
 230	INIT_LIST_HEAD(&qgroup->iterator);
 231	INIT_LIST_HEAD(&qgroup->nested_iterator);
 232
 233	rb_link_node(&qgroup->node, parent, p);
 234	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
 235
 236	return qgroup;
 237}
 238
 239static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
 240			    struct btrfs_qgroup *qgroup)
 241{
 242	struct btrfs_qgroup_list *list;
 243
 244	list_del(&qgroup->dirty);
 245	while (!list_empty(&qgroup->groups)) {
 246		list = list_first_entry(&qgroup->groups,
 247					struct btrfs_qgroup_list, next_group);
 248		list_del(&list->next_group);
 249		list_del(&list->next_member);
 250		kfree(list);
 251	}
 252
 253	while (!list_empty(&qgroup->members)) {
 254		list = list_first_entry(&qgroup->members,
 255					struct btrfs_qgroup_list, next_member);
 256		list_del(&list->next_group);
 257		list_del(&list->next_member);
 258		kfree(list);
 259	}
 260}
 261
 262/* must be called with qgroup_lock held */
 263static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
 264{
 265	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
 266
 267	if (!qgroup)
 268		return -ENOENT;
 269
 270	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
 271	__del_qgroup_rb(fs_info, qgroup);
 272	return 0;
 273}
 274
 275/*
 276 * Add relation specified by two qgroups.
 277 *
 278 * Must be called with qgroup_lock held, the ownership of @prealloc is
 279 * transferred to this function and caller should not touch it anymore.
 280 *
 281 * Return: 0        on success
 282 *         -ENOENT  if one of the qgroups is NULL
 283 *         <0       other errors
 284 */
 285static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
 286			     struct btrfs_qgroup *member,
 287			     struct btrfs_qgroup *parent)
 288{
 289	if (!member || !parent) {
 290		kfree(prealloc);
 291		return -ENOENT;
 292	}
 293
 294	prealloc->group = parent;
 295	prealloc->member = member;
 296	list_add_tail(&prealloc->next_group, &member->groups);
 297	list_add_tail(&prealloc->next_member, &parent->members);
 298
 299	return 0;
 300}
 301
 302/*
 303 * Add relation specified by two qgroup ids.
 304 *
 305 * Must be called with qgroup_lock held.
 306 *
 307 * Return: 0        on success
 308 *         -ENOENT  if one of the ids does not exist
 309 *         <0       other errors
 310 */
 311static int add_relation_rb(struct btrfs_fs_info *fs_info,
 312			   struct btrfs_qgroup_list *prealloc,
 313			   u64 memberid, u64 parentid)
 314{
 315	struct btrfs_qgroup *member;
 316	struct btrfs_qgroup *parent;
 317
 318	member = find_qgroup_rb(fs_info, memberid);
 319	parent = find_qgroup_rb(fs_info, parentid);
 320
 321	return __add_relation_rb(prealloc, member, parent);
 322}
 323
 324/* Must be called with qgroup_lock held */
 325static int del_relation_rb(struct btrfs_fs_info *fs_info,
 326			   u64 memberid, u64 parentid)
 327{
 328	struct btrfs_qgroup *member;
 329	struct btrfs_qgroup *parent;
 330	struct btrfs_qgroup_list *list;
 331
 332	member = find_qgroup_rb(fs_info, memberid);
 333	parent = find_qgroup_rb(fs_info, parentid);
 334	if (!member || !parent)
 335		return -ENOENT;
 336
 337	list_for_each_entry(list, &member->groups, next_group) {
 338		if (list->group == parent) {
 339			list_del(&list->next_group);
 340			list_del(&list->next_member);
 341			kfree(list);
 342			return 0;
 343		}
 344	}
 345	return -ENOENT;
 346}
 347
 348#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 349int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
 350			       u64 rfer, u64 excl)
 351{
 352	struct btrfs_qgroup *qgroup;
 353
 354	qgroup = find_qgroup_rb(fs_info, qgroupid);
 355	if (!qgroup)
 356		return -EINVAL;
 357	if (qgroup->rfer != rfer || qgroup->excl != excl)
 358		return -EINVAL;
 359	return 0;
 360}
 361#endif
 362
 363static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
 364{
 365	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
 366		return;
 367	fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
 368				  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
 369				  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
 370}
 371
 372static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
 373				   struct extent_buffer *leaf, int slot,
 374				   struct btrfs_qgroup_status_item *ptr)
 375{
 376	ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
 377	ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
 378	fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
 379}
 380
 381/*
 382 * The full config is read in one go, only called from open_ctree()
 383 * It doesn't use any locking, as at this point we're still single-threaded
 384 */
 385int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
 386{
 387	struct btrfs_key key;
 388	struct btrfs_key found_key;
 389	struct btrfs_root *quota_root = fs_info->quota_root;
 390	struct btrfs_path *path = NULL;
 391	struct extent_buffer *l;
 392	int slot;
 393	int ret = 0;
 394	u64 flags = 0;
 395	u64 rescan_progress = 0;
 396
 397	if (!fs_info->quota_root)
 398		return 0;
 399
 400	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
 401	if (!fs_info->qgroup_ulist) {
 402		ret = -ENOMEM;
 403		goto out;
 404	}
 405
 406	path = btrfs_alloc_path();
 407	if (!path) {
 408		ret = -ENOMEM;
 409		goto out;
 410	}
 411
 412	ret = btrfs_sysfs_add_qgroups(fs_info);
 413	if (ret < 0)
 414		goto out;
 415	/* default this to quota off, in case no status key is found */
 416	fs_info->qgroup_flags = 0;
 417
 418	/*
 419	 * pass 1: read status, all qgroup infos and limits
 420	 */
 421	key.objectid = 0;
 422	key.type = 0;
 423	key.offset = 0;
 424	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
 425	if (ret)
 426		goto out;
 427
 428	while (1) {
 429		struct btrfs_qgroup *qgroup;
 430
 431		slot = path->slots[0];
 432		l = path->nodes[0];
 433		btrfs_item_key_to_cpu(l, &found_key, slot);
 434
 435		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
 436			struct btrfs_qgroup_status_item *ptr;
 437
 438			ptr = btrfs_item_ptr(l, slot,
 439					     struct btrfs_qgroup_status_item);
 440
 441			if (btrfs_qgroup_status_version(l, ptr) !=
 442			    BTRFS_QGROUP_STATUS_VERSION) {
 443				btrfs_err(fs_info,
 444				 "old qgroup version, quota disabled");
 445				goto out;
 446			}
 447			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
 448			if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) {
 449				qgroup_read_enable_gen(fs_info, l, slot, ptr);
 450			} else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) {
 451				qgroup_mark_inconsistent(fs_info);
 452				btrfs_err(fs_info,
 453					"qgroup generation mismatch, marked as inconsistent");
 454			}
 455			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
 456			goto next1;
 457		}
 458
 459		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
 460		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
 461			goto next1;
 462
 463		qgroup = find_qgroup_rb(fs_info, found_key.offset);
 464		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
 465		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
 466			btrfs_err(fs_info, "inconsistent qgroup config");
 467			qgroup_mark_inconsistent(fs_info);
 468		}
 469		if (!qgroup) {
 470			struct btrfs_qgroup *prealloc;
 
 471
 472			prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
 473			if (!prealloc) {
 474				ret = -ENOMEM;
 475				goto out;
 476			}
 477			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478		}
 479		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
 480		if (ret < 0)
 481			goto out;
 482
 483		switch (found_key.type) {
 484		case BTRFS_QGROUP_INFO_KEY: {
 485			struct btrfs_qgroup_info_item *ptr;
 486
 487			ptr = btrfs_item_ptr(l, slot,
 488					     struct btrfs_qgroup_info_item);
 489			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
 490			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
 491			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
 492			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
 493			/* generation currently unused */
 494			break;
 495		}
 496		case BTRFS_QGROUP_LIMIT_KEY: {
 497			struct btrfs_qgroup_limit_item *ptr;
 498
 499			ptr = btrfs_item_ptr(l, slot,
 500					     struct btrfs_qgroup_limit_item);
 501			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
 502			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
 503			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
 504			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
 505			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
 506			break;
 507		}
 508		}
 509next1:
 510		ret = btrfs_next_item(quota_root, path);
 511		if (ret < 0)
 512			goto out;
 513		if (ret)
 514			break;
 515	}
 516	btrfs_release_path(path);
 517
 518	/*
 519	 * pass 2: read all qgroup relations
 520	 */
 521	key.objectid = 0;
 522	key.type = BTRFS_QGROUP_RELATION_KEY;
 523	key.offset = 0;
 524	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
 525	if (ret)
 526		goto out;
 527	while (1) {
 528		struct btrfs_qgroup_list *list = NULL;
 529
 530		slot = path->slots[0];
 531		l = path->nodes[0];
 532		btrfs_item_key_to_cpu(l, &found_key, slot);
 533
 534		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
 535			goto next2;
 536
 537		if (found_key.objectid > found_key.offset) {
 538			/* parent <- member, not needed to build config */
 539			/* FIXME should we omit the key completely? */
 540			goto next2;
 541		}
 542
 543		list = kzalloc(sizeof(*list), GFP_KERNEL);
 544		if (!list) {
 545			ret = -ENOMEM;
 546			goto out;
 547		}
 548		ret = add_relation_rb(fs_info, list, found_key.objectid,
 549				      found_key.offset);
 550		list = NULL;
 551		if (ret == -ENOENT) {
 552			btrfs_warn(fs_info,
 553				"orphan qgroup relation 0x%llx->0x%llx",
 554				found_key.objectid, found_key.offset);
 555			ret = 0;	/* ignore the error */
 556		}
 557		if (ret)
 558			goto out;
 559next2:
 560		ret = btrfs_next_item(quota_root, path);
 561		if (ret < 0)
 562			goto out;
 563		if (ret)
 564			break;
 565	}
 566out:
 567	btrfs_free_path(path);
 568	fs_info->qgroup_flags |= flags;
 569	if (ret >= 0) {
 570		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
 571			set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
 572		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
 573			ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
 574	} else {
 575		ulist_free(fs_info->qgroup_ulist);
 576		fs_info->qgroup_ulist = NULL;
 577		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
 578		btrfs_sysfs_del_qgroups(fs_info);
 579	}
 580
 581	return ret < 0 ? ret : 0;
 582}
 583
 584/*
 585 * Called in close_ctree() when quota is still enabled.  This verifies we don't
 586 * leak some reserved space.
 587 *
 588 * Return false if no reserved space is left.
 589 * Return true if some reserved space is leaked.
 590 */
 591bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
 592{
 593	struct rb_node *node;
 594	bool ret = false;
 595
 596	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
 597		return ret;
 598	/*
 599	 * Since we're unmounting, there is no race and no need to grab qgroup
 600	 * lock.  And here we don't go post-order to provide a more user
 601	 * friendly sorted result.
 602	 */
 603	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
 604		struct btrfs_qgroup *qgroup;
 605		int i;
 606
 607		qgroup = rb_entry(node, struct btrfs_qgroup, node);
 608		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
 609			if (qgroup->rsv.values[i]) {
 610				ret = true;
 611				btrfs_warn(fs_info,
 612		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
 613				   btrfs_qgroup_level(qgroup->qgroupid),
 614				   btrfs_qgroup_subvolid(qgroup->qgroupid),
 615				   i, qgroup->rsv.values[i]);
 616			}
 617		}
 618	}
 619	return ret;
 620}
 621
 622/*
 623 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
 624 * first two are in single-threaded paths.And for the third one, we have set
 625 * quota_root to be null with qgroup_lock held before, so it is safe to clean
 626 * up the in-memory structures without qgroup_lock held.
 627 */
 628void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
 629{
 630	struct rb_node *n;
 631	struct btrfs_qgroup *qgroup;
 632
 633	while ((n = rb_first(&fs_info->qgroup_tree))) {
 634		qgroup = rb_entry(n, struct btrfs_qgroup, node);
 635		rb_erase(n, &fs_info->qgroup_tree);
 636		__del_qgroup_rb(fs_info, qgroup);
 637		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
 638		kfree(qgroup);
 639	}
 640	/*
 641	 * We call btrfs_free_qgroup_config() when unmounting
 642	 * filesystem and disabling quota, so we set qgroup_ulist
 643	 * to be null here to avoid double free.
 644	 */
 645	ulist_free(fs_info->qgroup_ulist);
 646	fs_info->qgroup_ulist = NULL;
 647	btrfs_sysfs_del_qgroups(fs_info);
 648}
 649
 650static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
 651				    u64 dst)
 652{
 653	int ret;
 654	struct btrfs_root *quota_root = trans->fs_info->quota_root;
 655	struct btrfs_path *path;
 656	struct btrfs_key key;
 657
 658	path = btrfs_alloc_path();
 659	if (!path)
 660		return -ENOMEM;
 661
 662	key.objectid = src;
 663	key.type = BTRFS_QGROUP_RELATION_KEY;
 664	key.offset = dst;
 665
 666	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
 667
 668	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
 669
 670	btrfs_free_path(path);
 671	return ret;
 672}
 673
 674static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
 675				    u64 dst)
 676{
 677	int ret;
 678	struct btrfs_root *quota_root = trans->fs_info->quota_root;
 679	struct btrfs_path *path;
 680	struct btrfs_key key;
 681
 682	path = btrfs_alloc_path();
 683	if (!path)
 684		return -ENOMEM;
 685
 686	key.objectid = src;
 687	key.type = BTRFS_QGROUP_RELATION_KEY;
 688	key.offset = dst;
 689
 690	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
 691	if (ret < 0)
 692		goto out;
 693
 694	if (ret > 0) {
 695		ret = -ENOENT;
 696		goto out;
 697	}
 698
 699	ret = btrfs_del_item(trans, quota_root, path);
 700out:
 701	btrfs_free_path(path);
 702	return ret;
 703}
 704
 705static int add_qgroup_item(struct btrfs_trans_handle *trans,
 706			   struct btrfs_root *quota_root, u64 qgroupid)
 707{
 708	int ret;
 709	struct btrfs_path *path;
 710	struct btrfs_qgroup_info_item *qgroup_info;
 711	struct btrfs_qgroup_limit_item *qgroup_limit;
 712	struct extent_buffer *leaf;
 713	struct btrfs_key key;
 714
 715	if (btrfs_is_testing(quota_root->fs_info))
 716		return 0;
 717
 718	path = btrfs_alloc_path();
 719	if (!path)
 720		return -ENOMEM;
 721
 722	key.objectid = 0;
 723	key.type = BTRFS_QGROUP_INFO_KEY;
 724	key.offset = qgroupid;
 725
 726	/*
 727	 * Avoid a transaction abort by catching -EEXIST here. In that
 728	 * case, we proceed by re-initializing the existing structure
 729	 * on disk.
 730	 */
 731
 732	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
 733				      sizeof(*qgroup_info));
 734	if (ret && ret != -EEXIST)
 735		goto out;
 736
 737	leaf = path->nodes[0];
 738	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
 739				 struct btrfs_qgroup_info_item);
 740	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
 741	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
 742	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
 743	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
 744	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
 745
 746	btrfs_mark_buffer_dirty(trans, leaf);
 747
 748	btrfs_release_path(path);
 749
 750	key.type = BTRFS_QGROUP_LIMIT_KEY;
 751	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
 752				      sizeof(*qgroup_limit));
 753	if (ret && ret != -EEXIST)
 754		goto out;
 755
 756	leaf = path->nodes[0];
 757	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
 758				  struct btrfs_qgroup_limit_item);
 759	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
 760	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
 761	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
 762	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
 763	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
 764
 765	btrfs_mark_buffer_dirty(trans, leaf);
 766
 767	ret = 0;
 768out:
 769	btrfs_free_path(path);
 770	return ret;
 771}
 772
 773static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
 774{
 775	int ret;
 776	struct btrfs_root *quota_root = trans->fs_info->quota_root;
 777	struct btrfs_path *path;
 778	struct btrfs_key key;
 779
 780	path = btrfs_alloc_path();
 781	if (!path)
 782		return -ENOMEM;
 783
 784	key.objectid = 0;
 785	key.type = BTRFS_QGROUP_INFO_KEY;
 786	key.offset = qgroupid;
 787	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
 788	if (ret < 0)
 789		goto out;
 790
 791	if (ret > 0) {
 792		ret = -ENOENT;
 793		goto out;
 794	}
 795
 796	ret = btrfs_del_item(trans, quota_root, path);
 797	if (ret)
 798		goto out;
 799
 800	btrfs_release_path(path);
 801
 802	key.type = BTRFS_QGROUP_LIMIT_KEY;
 803	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
 804	if (ret < 0)
 805		goto out;
 806
 807	if (ret > 0) {
 808		ret = -ENOENT;
 809		goto out;
 810	}
 811
 812	ret = btrfs_del_item(trans, quota_root, path);
 813
 814out:
 815	btrfs_free_path(path);
 816	return ret;
 817}
 818
 819static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
 820				    struct btrfs_qgroup *qgroup)
 821{
 822	struct btrfs_root *quota_root = trans->fs_info->quota_root;
 823	struct btrfs_path *path;
 824	struct btrfs_key key;
 825	struct extent_buffer *l;
 826	struct btrfs_qgroup_limit_item *qgroup_limit;
 827	int ret;
 828	int slot;
 829
 830	key.objectid = 0;
 831	key.type = BTRFS_QGROUP_LIMIT_KEY;
 832	key.offset = qgroup->qgroupid;
 833
 834	path = btrfs_alloc_path();
 835	if (!path)
 836		return -ENOMEM;
 837
 838	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
 839	if (ret > 0)
 840		ret = -ENOENT;
 841
 842	if (ret)
 843		goto out;
 844
 845	l = path->nodes[0];
 846	slot = path->slots[0];
 847	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
 848	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
 849	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
 850	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
 851	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
 852	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
 853
 854	btrfs_mark_buffer_dirty(trans, l);
 855
 856out:
 857	btrfs_free_path(path);
 858	return ret;
 859}
 860
 861static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
 862				   struct btrfs_qgroup *qgroup)
 863{
 864	struct btrfs_fs_info *fs_info = trans->fs_info;
 865	struct btrfs_root *quota_root = fs_info->quota_root;
 866	struct btrfs_path *path;
 867	struct btrfs_key key;
 868	struct extent_buffer *l;
 869	struct btrfs_qgroup_info_item *qgroup_info;
 870	int ret;
 871	int slot;
 872
 873	if (btrfs_is_testing(fs_info))
 874		return 0;
 875
 876	key.objectid = 0;
 877	key.type = BTRFS_QGROUP_INFO_KEY;
 878	key.offset = qgroup->qgroupid;
 879
 880	path = btrfs_alloc_path();
 881	if (!path)
 882		return -ENOMEM;
 883
 884	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
 885	if (ret > 0)
 886		ret = -ENOENT;
 887
 888	if (ret)
 889		goto out;
 890
 891	l = path->nodes[0];
 892	slot = path->slots[0];
 893	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
 894	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
 895	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
 896	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
 897	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
 898	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
 899
 900	btrfs_mark_buffer_dirty(trans, l);
 901
 902out:
 903	btrfs_free_path(path);
 904	return ret;
 905}
 906
 907static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
 908{
 909	struct btrfs_fs_info *fs_info = trans->fs_info;
 910	struct btrfs_root *quota_root = fs_info->quota_root;
 911	struct btrfs_path *path;
 912	struct btrfs_key key;
 913	struct extent_buffer *l;
 914	struct btrfs_qgroup_status_item *ptr;
 915	int ret;
 916	int slot;
 917
 918	key.objectid = 0;
 919	key.type = BTRFS_QGROUP_STATUS_KEY;
 920	key.offset = 0;
 921
 922	path = btrfs_alloc_path();
 923	if (!path)
 924		return -ENOMEM;
 925
 926	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
 927	if (ret > 0)
 928		ret = -ENOENT;
 929
 930	if (ret)
 931		goto out;
 932
 933	l = path->nodes[0];
 934	slot = path->slots[0];
 935	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
 936	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
 937				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
 938	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
 939	btrfs_set_qgroup_status_rescan(l, ptr,
 940				fs_info->qgroup_rescan_progress.objectid);
 941
 942	btrfs_mark_buffer_dirty(trans, l);
 943
 944out:
 945	btrfs_free_path(path);
 946	return ret;
 947}
 948
 949/*
 950 * called with qgroup_lock held
 951 */
 952static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
 953				  struct btrfs_root *root)
 954{
 955	struct btrfs_path *path;
 956	struct btrfs_key key;
 957	struct extent_buffer *leaf = NULL;
 958	int ret;
 959	int nr = 0;
 960
 961	path = btrfs_alloc_path();
 962	if (!path)
 963		return -ENOMEM;
 964
 965	key.objectid = 0;
 966	key.offset = 0;
 967	key.type = 0;
 968
 969	while (1) {
 970		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 971		if (ret < 0)
 972			goto out;
 973		leaf = path->nodes[0];
 974		nr = btrfs_header_nritems(leaf);
 975		if (!nr)
 976			break;
 977		/*
 978		 * delete the leaf one by one
 979		 * since the whole tree is going
 980		 * to be deleted.
 981		 */
 982		path->slots[0] = 0;
 983		ret = btrfs_del_items(trans, root, path, 0, nr);
 984		if (ret)
 985			goto out;
 986
 987		btrfs_release_path(path);
 988	}
 989	ret = 0;
 990out:
 991	btrfs_free_path(path);
 992	return ret;
 993}
 994
 995int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
 996		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
 997{
 998	struct btrfs_root *quota_root;
 999	struct btrfs_root *tree_root = fs_info->tree_root;
1000	struct btrfs_path *path = NULL;
1001	struct btrfs_qgroup_status_item *ptr;
1002	struct extent_buffer *leaf;
1003	struct btrfs_key key;
1004	struct btrfs_key found_key;
1005	struct btrfs_qgroup *qgroup = NULL;
1006	struct btrfs_qgroup *prealloc = NULL;
1007	struct btrfs_trans_handle *trans = NULL;
1008	struct ulist *ulist = NULL;
1009	const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
1010	int ret = 0;
1011	int slot;
1012
1013	/*
1014	 * We need to have subvol_sem write locked, to prevent races between
1015	 * concurrent tasks trying to enable quotas, because we will unlock
1016	 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
1017	 * and before setting BTRFS_FS_QUOTA_ENABLED.
1018	 */
1019	lockdep_assert_held_write(&fs_info->subvol_sem);
1020
1021	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
1022		btrfs_err(fs_info,
1023			  "qgroups are currently unsupported in extent tree v2");
1024		return -EINVAL;
1025	}
1026
1027	mutex_lock(&fs_info->qgroup_ioctl_lock);
1028	if (fs_info->quota_root)
1029		goto out;
1030
1031	ulist = ulist_alloc(GFP_KERNEL);
1032	if (!ulist) {
1033		ret = -ENOMEM;
1034		goto out;
1035	}
1036
1037	ret = btrfs_sysfs_add_qgroups(fs_info);
1038	if (ret < 0)
1039		goto out;
1040
1041	/*
1042	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
1043	 * avoid lock acquisition inversion problems (reported by lockdep) between
1044	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
1045	 * start a transaction.
1046	 * After we started the transaction lock qgroup_ioctl_lock again and
1047	 * check if someone else created the quota root in the meanwhile. If so,
1048	 * just return success and release the transaction handle.
1049	 *
1050	 * Also we don't need to worry about someone else calling
1051	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
1052	 * that function returns 0 (success) when the sysfs entries already exist.
1053	 */
1054	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1055
1056	/*
1057	 * 1 for quota root item
1058	 * 1 for BTRFS_QGROUP_STATUS item
1059	 *
1060	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
1061	 * per subvolume. However those are not currently reserved since it
1062	 * would be a lot of overkill.
1063	 */
1064	trans = btrfs_start_transaction(tree_root, 2);
1065
1066	mutex_lock(&fs_info->qgroup_ioctl_lock);
1067	if (IS_ERR(trans)) {
1068		ret = PTR_ERR(trans);
1069		trans = NULL;
1070		goto out;
1071	}
1072
1073	if (fs_info->quota_root)
1074		goto out;
1075
1076	fs_info->qgroup_ulist = ulist;
1077	ulist = NULL;
1078
1079	/*
1080	 * initially create the quota tree
1081	 */
1082	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1083	if (IS_ERR(quota_root)) {
1084		ret =  PTR_ERR(quota_root);
1085		btrfs_abort_transaction(trans, ret);
1086		goto out;
1087	}
1088
1089	path = btrfs_alloc_path();
1090	if (!path) {
1091		ret = -ENOMEM;
1092		btrfs_abort_transaction(trans, ret);
1093		goto out_free_root;
1094	}
1095
1096	key.objectid = 0;
1097	key.type = BTRFS_QGROUP_STATUS_KEY;
1098	key.offset = 0;
1099
1100	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1101				      sizeof(*ptr));
1102	if (ret) {
1103		btrfs_abort_transaction(trans, ret);
1104		goto out_free_path;
1105	}
1106
1107	leaf = path->nodes[0];
1108	ptr = btrfs_item_ptr(leaf, path->slots[0],
1109				 struct btrfs_qgroup_status_item);
1110	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1111	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1112	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
1113	if (simple) {
1114		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
 
1115		btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
1116	} else {
1117		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1118	}
1119	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
1120				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
1121	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1122
1123	btrfs_mark_buffer_dirty(trans, leaf);
1124
1125	key.objectid = 0;
1126	key.type = BTRFS_ROOT_REF_KEY;
1127	key.offset = 0;
1128
1129	btrfs_release_path(path);
1130	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1131	if (ret > 0)
1132		goto out_add_root;
1133	if (ret < 0) {
1134		btrfs_abort_transaction(trans, ret);
1135		goto out_free_path;
1136	}
1137
1138	while (1) {
1139		slot = path->slots[0];
1140		leaf = path->nodes[0];
1141		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1142
1143		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1144
1145			/* Release locks on tree_root before we access quota_root */
1146			btrfs_release_path(path);
1147
1148			/* We should not have a stray @prealloc pointer. */
1149			ASSERT(prealloc == NULL);
1150			prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1151			if (!prealloc) {
1152				ret = -ENOMEM;
1153				btrfs_abort_transaction(trans, ret);
1154				goto out_free_path;
1155			}
1156
1157			ret = add_qgroup_item(trans, quota_root,
1158					      found_key.offset);
1159			if (ret) {
1160				btrfs_abort_transaction(trans, ret);
1161				goto out_free_path;
1162			}
1163
1164			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
1165			prealloc = NULL;
1166			if (IS_ERR(qgroup)) {
1167				ret = PTR_ERR(qgroup);
1168				btrfs_abort_transaction(trans, ret);
1169				goto out_free_path;
1170			}
1171			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1172			if (ret < 0) {
1173				btrfs_abort_transaction(trans, ret);
1174				goto out_free_path;
1175			}
1176			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1177							 path, 1, 0);
1178			if (ret < 0) {
1179				btrfs_abort_transaction(trans, ret);
1180				goto out_free_path;
1181			}
1182			if (ret > 0) {
1183				/*
1184				 * Shouldn't happen, but in case it does we
1185				 * don't need to do the btrfs_next_item, just
1186				 * continue.
1187				 */
1188				continue;
1189			}
1190		}
1191		ret = btrfs_next_item(tree_root, path);
1192		if (ret < 0) {
1193			btrfs_abort_transaction(trans, ret);
1194			goto out_free_path;
1195		}
1196		if (ret)
1197			break;
1198	}
1199
1200out_add_root:
1201	btrfs_release_path(path);
1202	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1203	if (ret) {
1204		btrfs_abort_transaction(trans, ret);
1205		goto out_free_path;
1206	}
1207
1208	ASSERT(prealloc == NULL);
1209	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1210	if (!prealloc) {
1211		ret = -ENOMEM;
1212		goto out_free_path;
1213	}
1214	qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
1215	prealloc = NULL;
1216	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1217	if (ret < 0) {
1218		btrfs_abort_transaction(trans, ret);
1219		goto out_free_path;
1220	}
1221
1222	fs_info->qgroup_enable_gen = trans->transid;
1223
1224	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1225	/*
1226	 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1227	 * a deadlock with tasks concurrently doing other qgroup operations, such
1228	 * adding/removing qgroups or adding/deleting qgroup relations for example,
1229	 * because all qgroup operations first start or join a transaction and then
1230	 * lock the qgroup_ioctl_lock mutex.
1231	 * We are safe from a concurrent task trying to enable quotas, by calling
1232	 * this function, since we are serialized by fs_info->subvol_sem.
1233	 */
1234	ret = btrfs_commit_transaction(trans);
1235	trans = NULL;
1236	mutex_lock(&fs_info->qgroup_ioctl_lock);
1237	if (ret)
1238		goto out_free_path;
1239
1240	/*
1241	 * Set quota enabled flag after committing the transaction, to avoid
1242	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1243	 * creation.
1244	 */
1245	spin_lock(&fs_info->qgroup_lock);
1246	fs_info->quota_root = quota_root;
1247	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1248	if (simple)
1249		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
1250	spin_unlock(&fs_info->qgroup_lock);
1251
1252	/* Skip rescan for simple qgroups. */
1253	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1254		goto out_free_path;
1255
1256	ret = qgroup_rescan_init(fs_info, 0, 1);
1257	if (!ret) {
1258	        qgroup_rescan_zero_tracking(fs_info);
1259		fs_info->qgroup_rescan_running = true;
1260	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1261	                         &fs_info->qgroup_rescan_work);
1262	} else {
1263		/*
1264		 * We have set both BTRFS_FS_QUOTA_ENABLED and
1265		 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1266		 * -EINPROGRESS. That can happen because someone started the
1267		 * rescan worker by calling quota rescan ioctl before we
1268		 * attempted to initialize the rescan worker. Failure due to
1269		 * quotas disabled in the meanwhile is not possible, because
1270		 * we are holding a write lock on fs_info->subvol_sem, which
1271		 * is also acquired when disabling quotas.
1272		 * Ignore such error, and any other error would need to undo
1273		 * everything we did in the transaction we just committed.
1274		 */
1275		ASSERT(ret == -EINPROGRESS);
1276		ret = 0;
1277	}
1278
1279out_free_path:
1280	btrfs_free_path(path);
1281out_free_root:
1282	if (ret)
1283		btrfs_put_root(quota_root);
1284out:
1285	if (ret) {
1286		ulist_free(fs_info->qgroup_ulist);
1287		fs_info->qgroup_ulist = NULL;
1288		btrfs_sysfs_del_qgroups(fs_info);
1289	}
1290	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1291	if (ret && trans)
1292		btrfs_end_transaction(trans);
1293	else if (trans)
1294		ret = btrfs_end_transaction(trans);
1295	ulist_free(ulist);
1296	kfree(prealloc);
1297	return ret;
1298}
1299
1300/*
1301 * It is possible to have outstanding ordered extents which reserved bytes
1302 * before we disabled. We need to fully flush delalloc, ordered extents, and a
1303 * commit to ensure that we don't leak such reservations, only to have them
1304 * come back if we re-enable.
1305 *
1306 * - enable simple quotas
1307 * - reserve space
1308 * - release it, store rsv_bytes in OE
1309 * - disable quotas
1310 * - enable simple quotas (qgroup rsv are all 0)
1311 * - OE finishes
1312 * - run delayed refs
1313 * - free rsv_bytes, resulting in miscounting or even underflow
1314 */
1315static int flush_reservations(struct btrfs_fs_info *fs_info)
1316{
1317	struct btrfs_trans_handle *trans;
1318	int ret;
1319
1320	ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
1321	if (ret)
1322		return ret;
1323	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1324	trans = btrfs_join_transaction(fs_info->tree_root);
1325	if (IS_ERR(trans))
1326		return PTR_ERR(trans);
1327	btrfs_commit_transaction(trans);
1328
1329	return ret;
1330}
1331
1332int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1333{
1334	struct btrfs_root *quota_root;
1335	struct btrfs_trans_handle *trans = NULL;
1336	int ret = 0;
1337
1338	/*
1339	 * We need to have subvol_sem write locked to prevent races with
1340	 * snapshot creation.
1341	 */
1342	lockdep_assert_held_write(&fs_info->subvol_sem);
1343
1344	/*
1345	 * Lock the cleaner mutex to prevent races with concurrent relocation,
1346	 * because relocation may be building backrefs for blocks of the quota
1347	 * root while we are deleting the root. This is like dropping fs roots
1348	 * of deleted snapshots/subvolumes, we need the same protection.
1349	 *
1350	 * This also prevents races between concurrent tasks trying to disable
1351	 * quotas, because we will unlock and relock qgroup_ioctl_lock across
1352	 * BTRFS_FS_QUOTA_ENABLED changes.
1353	 */
1354	mutex_lock(&fs_info->cleaner_mutex);
1355
1356	mutex_lock(&fs_info->qgroup_ioctl_lock);
1357	if (!fs_info->quota_root)
1358		goto out;
1359
1360	/*
1361	 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1362	 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1363	 * to lock that mutex while holding a transaction handle and the rescan
1364	 * worker needs to commit a transaction.
1365	 */
1366	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1367
1368	/*
1369	 * Request qgroup rescan worker to complete and wait for it. This wait
1370	 * must be done before transaction start for quota disable since it may
1371	 * deadlock with transaction by the qgroup rescan worker.
1372	 */
1373	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1374	btrfs_qgroup_wait_for_completion(fs_info, false);
1375
 
 
 
 
1376	ret = flush_reservations(fs_info);
1377	if (ret)
1378		goto out_unlock_cleaner;
1379
1380	/*
1381	 * 1 For the root item
1382	 *
1383	 * We should also reserve enough items for the quota tree deletion in
1384	 * btrfs_clean_quota_tree but this is not done.
1385	 *
1386	 * Also, we must always start a transaction without holding the mutex
1387	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1388	 */
1389	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1390
1391	mutex_lock(&fs_info->qgroup_ioctl_lock);
1392	if (IS_ERR(trans)) {
1393		ret = PTR_ERR(trans);
1394		trans = NULL;
1395		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1396		goto out;
1397	}
1398
1399	if (!fs_info->quota_root)
1400		goto out;
1401
1402	spin_lock(&fs_info->qgroup_lock);
1403	quota_root = fs_info->quota_root;
1404	fs_info->quota_root = NULL;
1405	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1406	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1407	fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
1408	spin_unlock(&fs_info->qgroup_lock);
1409
1410	btrfs_free_qgroup_config(fs_info);
1411
1412	ret = btrfs_clean_quota_tree(trans, quota_root);
1413	if (ret) {
1414		btrfs_abort_transaction(trans, ret);
1415		goto out;
1416	}
1417
1418	ret = btrfs_del_root(trans, &quota_root->root_key);
1419	if (ret) {
1420		btrfs_abort_transaction(trans, ret);
1421		goto out;
1422	}
1423
1424	spin_lock(&fs_info->trans_lock);
1425	list_del(&quota_root->dirty_list);
1426	spin_unlock(&fs_info->trans_lock);
1427
1428	btrfs_tree_lock(quota_root->node);
1429	btrfs_clear_buffer_dirty(trans, quota_root->node);
1430	btrfs_tree_unlock(quota_root->node);
1431	btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
1432			      quota_root->node, 0, 1);
1433
1434	btrfs_put_root(quota_root);
 
1435
1436out:
 
1437	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1438	if (ret && trans)
1439		btrfs_end_transaction(trans);
1440	else if (trans)
1441		ret = btrfs_commit_transaction(trans);
1442out_unlock_cleaner:
1443	mutex_unlock(&fs_info->cleaner_mutex);
1444
1445	return ret;
1446}
1447
1448static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1449			 struct btrfs_qgroup *qgroup)
1450{
1451	if (list_empty(&qgroup->dirty))
1452		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1453}
1454
1455static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
1456{
1457	if (!list_empty(&qgroup->iterator))
1458		return;
1459
1460	list_add_tail(&qgroup->iterator, head);
1461}
1462
1463static void qgroup_iterator_clean(struct list_head *head)
1464{
1465	while (!list_empty(head)) {
1466		struct btrfs_qgroup *qgroup;
1467
1468		qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
1469		list_del_init(&qgroup->iterator);
1470	}
1471}
1472
1473/*
1474 * The easy accounting, we're updating qgroup relationship whose child qgroup
1475 * only has exclusive extents.
1476 *
1477 * In this case, all exclusive extents will also be exclusive for parent, so
1478 * excl/rfer just get added/removed.
1479 *
1480 * So is qgroup reservation space, which should also be added/removed to
1481 * parent.
1482 * Or when child tries to release reservation space, parent will underflow its
1483 * reservation (for relationship adding case).
1484 *
1485 * Caller should hold fs_info->qgroup_lock.
1486 */
1487static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
1488				    struct btrfs_qgroup *src, int sign)
1489{
1490	struct btrfs_qgroup *qgroup;
1491	struct btrfs_qgroup *cur;
1492	LIST_HEAD(qgroup_list);
1493	u64 num_bytes = src->excl;
1494	int ret = 0;
1495
1496	qgroup = find_qgroup_rb(fs_info, ref_root);
1497	if (!qgroup)
1498		goto out;
1499
1500	qgroup_iterator_add(&qgroup_list, qgroup);
1501	list_for_each_entry(cur, &qgroup_list, iterator) {
1502		struct btrfs_qgroup_list *glist;
1503
1504		qgroup->rfer += sign * num_bytes;
1505		qgroup->rfer_cmpr += sign * num_bytes;
1506
1507		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1508		qgroup->excl += sign * num_bytes;
1509		qgroup->excl_cmpr += sign * num_bytes;
1510
1511		if (sign > 0)
1512			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1513		else
1514			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1515		qgroup_dirty(fs_info, qgroup);
1516
1517		/* Append parent qgroups to @qgroup_list. */
1518		list_for_each_entry(glist, &qgroup->groups, next_group)
1519			qgroup_iterator_add(&qgroup_list, glist->group);
1520	}
1521	ret = 0;
1522out:
1523	qgroup_iterator_clean(&qgroup_list);
1524	return ret;
1525}
1526
1527
1528/*
1529 * Quick path for updating qgroup with only excl refs.
1530 *
1531 * In that case, just update all parent will be enough.
1532 * Or we needs to do a full rescan.
1533 * Caller should also hold fs_info->qgroup_lock.
1534 *
1535 * Return 0 for quick update, return >0 for need to full rescan
1536 * and mark INCONSISTENT flag.
1537 * Return < 0 for other error.
1538 */
1539static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1540				   u64 src, u64 dst, int sign)
1541{
1542	struct btrfs_qgroup *qgroup;
1543	int ret = 1;
1544	int err = 0;
1545
1546	qgroup = find_qgroup_rb(fs_info, src);
1547	if (!qgroup)
1548		goto out;
1549	if (qgroup->excl == qgroup->rfer) {
 
 
 
1550		ret = 0;
1551		err = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
1552		if (err < 0) {
1553			ret = err;
1554			goto out;
1555		}
1556	}
1557out:
1558	if (ret)
1559		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1560	return ret;
1561}
1562
1563int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst)
 
 
 
 
 
1564{
1565	struct btrfs_fs_info *fs_info = trans->fs_info;
1566	struct btrfs_qgroup *parent;
1567	struct btrfs_qgroup *member;
1568	struct btrfs_qgroup_list *list;
1569	struct btrfs_qgroup_list *prealloc = NULL;
1570	int ret = 0;
1571
 
 
1572	/* Check the level of src and dst first */
1573	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1574		return -EINVAL;
1575
1576	mutex_lock(&fs_info->qgroup_ioctl_lock);
1577	if (!fs_info->quota_root) {
1578		ret = -ENOTCONN;
1579		goto out;
1580	}
1581	member = find_qgroup_rb(fs_info, src);
1582	parent = find_qgroup_rb(fs_info, dst);
1583	if (!member || !parent) {
1584		ret = -EINVAL;
1585		goto out;
1586	}
1587
1588	/* check if such qgroup relation exist firstly */
1589	list_for_each_entry(list, &member->groups, next_group) {
1590		if (list->group == parent) {
1591			ret = -EEXIST;
1592			goto out;
1593		}
1594	}
1595
1596	prealloc = kzalloc(sizeof(*list), GFP_NOFS);
1597	if (!prealloc) {
1598		ret = -ENOMEM;
1599		goto out;
1600	}
1601	ret = add_qgroup_relation_item(trans, src, dst);
1602	if (ret)
1603		goto out;
1604
1605	ret = add_qgroup_relation_item(trans, dst, src);
1606	if (ret) {
1607		del_qgroup_relation_item(trans, src, dst);
1608		goto out;
1609	}
1610
1611	spin_lock(&fs_info->qgroup_lock);
1612	ret = __add_relation_rb(prealloc, member, parent);
1613	prealloc = NULL;
1614	if (ret < 0) {
1615		spin_unlock(&fs_info->qgroup_lock);
1616		goto out;
1617	}
1618	ret = quick_update_accounting(fs_info, src, dst, 1);
1619	spin_unlock(&fs_info->qgroup_lock);
1620out:
1621	kfree(prealloc);
1622	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1623	return ret;
1624}
1625
1626static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1627				 u64 dst)
1628{
1629	struct btrfs_fs_info *fs_info = trans->fs_info;
1630	struct btrfs_qgroup *parent;
1631	struct btrfs_qgroup *member;
1632	struct btrfs_qgroup_list *list;
1633	bool found = false;
1634	int ret = 0;
1635	int ret2;
1636
1637	if (!fs_info->quota_root) {
1638		ret = -ENOTCONN;
1639		goto out;
1640	}
1641
1642	member = find_qgroup_rb(fs_info, src);
1643	parent = find_qgroup_rb(fs_info, dst);
1644	/*
1645	 * The parent/member pair doesn't exist, then try to delete the dead
1646	 * relation items only.
1647	 */
1648	if (!member || !parent)
1649		goto delete_item;
1650
1651	/* check if such qgroup relation exist firstly */
1652	list_for_each_entry(list, &member->groups, next_group) {
1653		if (list->group == parent) {
1654			found = true;
1655			break;
1656		}
1657	}
1658
1659delete_item:
1660	ret = del_qgroup_relation_item(trans, src, dst);
1661	if (ret < 0 && ret != -ENOENT)
1662		goto out;
1663	ret2 = del_qgroup_relation_item(trans, dst, src);
1664	if (ret2 < 0 && ret2 != -ENOENT)
1665		goto out;
1666
1667	/* At least one deletion succeeded, return 0 */
1668	if (!ret || !ret2)
1669		ret = 0;
1670
1671	if (found) {
1672		spin_lock(&fs_info->qgroup_lock);
1673		del_relation_rb(fs_info, src, dst);
1674		ret = quick_update_accounting(fs_info, src, dst, -1);
1675		spin_unlock(&fs_info->qgroup_lock);
1676	}
1677out:
1678	return ret;
1679}
1680
1681int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1682			      u64 dst)
1683{
1684	struct btrfs_fs_info *fs_info = trans->fs_info;
1685	int ret = 0;
1686
1687	mutex_lock(&fs_info->qgroup_ioctl_lock);
1688	ret = __del_qgroup_relation(trans, src, dst);
1689	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1690
1691	return ret;
1692}
1693
1694int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1695{
1696	struct btrfs_fs_info *fs_info = trans->fs_info;
1697	struct btrfs_root *quota_root;
1698	struct btrfs_qgroup *qgroup;
1699	struct btrfs_qgroup *prealloc = NULL;
1700	int ret = 0;
1701
1702	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
1703		return 0;
1704
1705	mutex_lock(&fs_info->qgroup_ioctl_lock);
1706	if (!fs_info->quota_root) {
1707		ret = -ENOTCONN;
1708		goto out;
1709	}
1710	quota_root = fs_info->quota_root;
1711	qgroup = find_qgroup_rb(fs_info, qgroupid);
1712	if (qgroup) {
1713		ret = -EEXIST;
1714		goto out;
1715	}
1716
1717	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1718	if (!prealloc) {
1719		ret = -ENOMEM;
1720		goto out;
1721	}
1722
1723	ret = add_qgroup_item(trans, quota_root, qgroupid);
1724	if (ret)
1725		goto out;
1726
1727	spin_lock(&fs_info->qgroup_lock);
1728	qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
1729	spin_unlock(&fs_info->qgroup_lock);
1730	prealloc = NULL;
1731
1732	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1733out:
1734	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1735	kfree(prealloc);
1736	return ret;
1737}
1738
1739static bool qgroup_has_usage(struct btrfs_qgroup *qgroup)
 
 
 
 
 
1740{
1741	return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 ||
1742		qgroup->excl > 0 || qgroup->excl_cmpr > 0 ||
1743		qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 ||
1744		qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 ||
1745		qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1746}
1747
1748int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1749{
1750	struct btrfs_fs_info *fs_info = trans->fs_info;
1751	struct btrfs_qgroup *qgroup;
1752	struct btrfs_qgroup_list *list;
1753	int ret = 0;
1754
1755	mutex_lock(&fs_info->qgroup_ioctl_lock);
1756	if (!fs_info->quota_root) {
1757		ret = -ENOTCONN;
1758		goto out;
1759	}
1760
1761	qgroup = find_qgroup_rb(fs_info, qgroupid);
1762	if (!qgroup) {
1763		ret = -ENOENT;
1764		goto out;
1765	}
1766
1767	if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) {
 
 
 
1768		ret = -EBUSY;
1769		goto out;
1770	}
1771
1772	/* Check if there are no children of this qgroup */
1773	if (!list_empty(&qgroup->members)) {
1774		ret = -EBUSY;
1775		goto out;
1776	}
1777
1778	ret = del_qgroup_item(trans, qgroupid);
1779	if (ret && ret != -ENOENT)
1780		goto out;
1781
1782	while (!list_empty(&qgroup->groups)) {
1783		list = list_first_entry(&qgroup->groups,
1784					struct btrfs_qgroup_list, next_group);
1785		ret = __del_qgroup_relation(trans, qgroupid,
1786					    list->group->qgroupid);
1787		if (ret)
1788			goto out;
1789	}
1790
1791	spin_lock(&fs_info->qgroup_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1792	del_qgroup_rb(fs_info, qgroupid);
1793	spin_unlock(&fs_info->qgroup_lock);
1794
1795	/*
1796	 * Remove the qgroup from sysfs now without holding the qgroup_lock
1797	 * spinlock, since the sysfs_remove_group() function needs to take
1798	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1799	 */
1800	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1801	kfree(qgroup);
1802out:
1803	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1804	return ret;
1805}
1806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1807int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1808		       struct btrfs_qgroup_limit *limit)
1809{
1810	struct btrfs_fs_info *fs_info = trans->fs_info;
1811	struct btrfs_qgroup *qgroup;
1812	int ret = 0;
1813	/* Sometimes we would want to clear the limit on this qgroup.
1814	 * To meet this requirement, we treat the -1 as a special value
1815	 * which tell kernel to clear the limit on this qgroup.
1816	 */
1817	const u64 CLEAR_VALUE = -1;
1818
1819	mutex_lock(&fs_info->qgroup_ioctl_lock);
1820	if (!fs_info->quota_root) {
1821		ret = -ENOTCONN;
1822		goto out;
1823	}
1824
1825	qgroup = find_qgroup_rb(fs_info, qgroupid);
1826	if (!qgroup) {
1827		ret = -ENOENT;
1828		goto out;
1829	}
1830
1831	spin_lock(&fs_info->qgroup_lock);
1832	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1833		if (limit->max_rfer == CLEAR_VALUE) {
1834			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1835			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1836			qgroup->max_rfer = 0;
1837		} else {
1838			qgroup->max_rfer = limit->max_rfer;
1839		}
1840	}
1841	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1842		if (limit->max_excl == CLEAR_VALUE) {
1843			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1844			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1845			qgroup->max_excl = 0;
1846		} else {
1847			qgroup->max_excl = limit->max_excl;
1848		}
1849	}
1850	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1851		if (limit->rsv_rfer == CLEAR_VALUE) {
1852			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1853			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1854			qgroup->rsv_rfer = 0;
1855		} else {
1856			qgroup->rsv_rfer = limit->rsv_rfer;
1857		}
1858	}
1859	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1860		if (limit->rsv_excl == CLEAR_VALUE) {
1861			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1862			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1863			qgroup->rsv_excl = 0;
1864		} else {
1865			qgroup->rsv_excl = limit->rsv_excl;
1866		}
1867	}
1868	qgroup->lim_flags |= limit->flags;
1869
1870	spin_unlock(&fs_info->qgroup_lock);
1871
1872	ret = update_qgroup_limit_item(trans, qgroup);
1873	if (ret) {
1874		qgroup_mark_inconsistent(fs_info);
1875		btrfs_info(fs_info, "unable to update quota limit for %llu",
1876		       qgroupid);
1877	}
1878
1879out:
1880	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1881	return ret;
1882}
1883
1884/*
1885 * Inform qgroup to trace one dirty extent, its info is recorded in @record.
1886 * So qgroup can account it at transaction committing time.
1887 *
1888 * No lock version, caller must acquire delayed ref lock and allocated memory,
1889 * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
1890 *
1891 * Return 0 for success insert
1892 * Return >0 for existing record, caller can free @record safely.
1893 * Error is not possible
1894 */
1895int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1896				struct btrfs_delayed_ref_root *delayed_refs,
1897				struct btrfs_qgroup_extent_record *record)
 
1898{
1899	struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1900	struct rb_node *parent_node = NULL;
1901	struct btrfs_qgroup_extent_record *entry;
1902	u64 bytenr = record->bytenr;
1903
1904	if (!btrfs_qgroup_full_accounting(fs_info))
1905		return 1;
1906
1907	lockdep_assert_held(&delayed_refs->lock);
1908	trace_btrfs_qgroup_trace_extent(fs_info, record);
 
 
 
 
 
 
 
 
 
1909
1910	while (*p) {
1911		parent_node = *p;
1912		entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1913				 node);
1914		if (bytenr < entry->bytenr) {
1915			p = &(*p)->rb_left;
1916		} else if (bytenr > entry->bytenr) {
1917			p = &(*p)->rb_right;
1918		} else {
1919			if (record->data_rsv && !entry->data_rsv) {
1920				entry->data_rsv = record->data_rsv;
1921				entry->data_rsv_refroot =
1922					record->data_rsv_refroot;
1923			}
1924			return 1;
1925		}
 
 
 
 
 
 
 
 
 
1926	}
1927
1928	rb_link_node(&record->node, parent_node, p);
1929	rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1930	return 0;
1931}
1932
1933/*
1934 * Post handler after qgroup_trace_extent_nolock().
1935 *
1936 * NOTE: Current qgroup does the expensive backref walk at transaction
1937 * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
1938 * new transaction.
1939 * This is designed to allow btrfs_find_all_roots() to get correct new_roots
1940 * result.
1941 *
1942 * However for old_roots there is no need to do backref walk at that time,
1943 * since we search commit roots to walk backref and result will always be
1944 * correct.
1945 *
1946 * Due to the nature of no lock version, we can't do backref there.
1947 * So we must call btrfs_qgroup_trace_extent_post() after exiting
1948 * spinlock context.
1949 *
1950 * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
1951 * using current root, then we can move all expensive backref walk out of
1952 * transaction committing, but not now as qgroup accounting will be wrong again.
1953 */
1954int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
1955				   struct btrfs_qgroup_extent_record *qrecord)
 
1956{
1957	struct btrfs_backref_walk_ctx ctx = { 0 };
 
 
 
 
1958	int ret;
1959
1960	if (!btrfs_qgroup_full_accounting(trans->fs_info))
1961		return 0;
1962	/*
1963	 * We are always called in a context where we are already holding a
1964	 * transaction handle. Often we are called when adding a data delayed
1965	 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
1966	 * in which case we will be holding a write lock on extent buffer from a
1967	 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
1968	 * acquire fs_info->commit_root_sem, because that is a higher level lock
1969	 * that must be acquired before locking any extent buffers.
1970	 *
1971	 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
1972	 * but we can't pass it a non-NULL transaction handle, because otherwise
1973	 * it would not use commit roots and would lock extent buffers, causing
1974	 * a deadlock if it ends up trying to read lock the same extent buffer
1975	 * that was previously write locked at btrfs_truncate_inode_items().
1976	 *
1977	 * So pass a NULL transaction handle to btrfs_find_all_roots() and
1978	 * explicitly tell it to not acquire the commit_root_sem - if we are
1979	 * holding a transaction handle we don't need its protection.
1980	 */
1981	ASSERT(trans != NULL);
1982
1983	if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
1984		return 0;
1985
1986	ctx.bytenr = qrecord->bytenr;
1987	ctx.fs_info = trans->fs_info;
1988
1989	ret = btrfs_find_all_roots(&ctx, true);
1990	if (ret < 0) {
1991		qgroup_mark_inconsistent(trans->fs_info);
1992		btrfs_warn(trans->fs_info,
1993"error accounting new delayed refs extent (err code: %d), quota inconsistent",
1994			ret);
1995		return 0;
1996	}
1997
1998	/*
1999	 * Here we don't need to get the lock of
2000	 * trans->transaction->delayed_refs, since inserted qrecord won't
2001	 * be deleted, only qrecord->node may be modified (new qrecord insert)
2002	 *
2003	 * So modifying qrecord->old_roots is safe here
2004	 */
2005	qrecord->old_roots = ctx.roots;
2006	return 0;
2007}
2008
2009/*
2010 * Inform qgroup to trace one dirty extent, specified by @bytenr and
2011 * @num_bytes.
2012 * So qgroup can account it at commit trans time.
2013 *
2014 * Better encapsulated version, with memory allocation and backref walk for
2015 * commit roots.
2016 * So this can sleep.
2017 *
2018 * Return 0 if the operation is done.
2019 * Return <0 for error, like memory allocation failure or invalid parameter
2020 * (NULL trans)
2021 */
2022int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2023			      u64 num_bytes)
2024{
2025	struct btrfs_fs_info *fs_info = trans->fs_info;
2026	struct btrfs_qgroup_extent_record *record;
2027	struct btrfs_delayed_ref_root *delayed_refs;
 
2028	int ret;
2029
2030	if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
2031		return 0;
2032	record = kzalloc(sizeof(*record), GFP_NOFS);
2033	if (!record)
2034		return -ENOMEM;
2035
2036	delayed_refs = &trans->transaction->delayed_refs;
2037	record->bytenr = bytenr;
 
 
 
2038	record->num_bytes = num_bytes;
2039	record->old_roots = NULL;
2040
2041	spin_lock(&delayed_refs->lock);
2042	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
2043	spin_unlock(&delayed_refs->lock);
2044	if (ret > 0) {
2045		kfree(record);
2046		return 0;
2047	}
2048	return btrfs_qgroup_trace_extent_post(trans, record);
2049}
2050
2051/*
2052 * Inform qgroup to trace all leaf items of data
2053 *
2054 * Return 0 for success
2055 * Return <0 for error(ENOMEM)
2056 */
2057int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
2058				  struct extent_buffer *eb)
2059{
2060	struct btrfs_fs_info *fs_info = trans->fs_info;
2061	int nr = btrfs_header_nritems(eb);
2062	int i, extent_type, ret;
2063	struct btrfs_key key;
2064	struct btrfs_file_extent_item *fi;
2065	u64 bytenr, num_bytes;
2066
2067	/* We can be called directly from walk_up_proc() */
2068	if (!btrfs_qgroup_full_accounting(fs_info))
2069		return 0;
2070
2071	for (i = 0; i < nr; i++) {
2072		btrfs_item_key_to_cpu(eb, &key, i);
2073
2074		if (key.type != BTRFS_EXTENT_DATA_KEY)
2075			continue;
2076
2077		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2078		/* filter out non qgroup-accountable extents  */
2079		extent_type = btrfs_file_extent_type(eb, fi);
2080
2081		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
2082			continue;
2083
2084		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
2085		if (!bytenr)
2086			continue;
2087
2088		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
2089
2090		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
2091		if (ret)
2092			return ret;
2093	}
2094	cond_resched();
2095	return 0;
2096}
2097
2098/*
2099 * Walk up the tree from the bottom, freeing leaves and any interior
2100 * nodes which have had all slots visited. If a node (leaf or
2101 * interior) is freed, the node above it will have it's slot
2102 * incremented. The root node will never be freed.
2103 *
2104 * At the end of this function, we should have a path which has all
2105 * slots incremented to the next position for a search. If we need to
2106 * read a new node it will be NULL and the node above it will have the
2107 * correct slot selected for a later read.
2108 *
2109 * If we increment the root nodes slot counter past the number of
2110 * elements, 1 is returned to signal completion of the search.
2111 */
2112static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
2113{
2114	int level = 0;
2115	int nr, slot;
2116	struct extent_buffer *eb;
2117
2118	if (root_level == 0)
2119		return 1;
2120
2121	while (level <= root_level) {
2122		eb = path->nodes[level];
2123		nr = btrfs_header_nritems(eb);
2124		path->slots[level]++;
2125		slot = path->slots[level];
2126		if (slot >= nr || level == 0) {
2127			/*
2128			 * Don't free the root -  we will detect this
2129			 * condition after our loop and return a
2130			 * positive value for caller to stop walking the tree.
2131			 */
2132			if (level != root_level) {
2133				btrfs_tree_unlock_rw(eb, path->locks[level]);
2134				path->locks[level] = 0;
2135
2136				free_extent_buffer(eb);
2137				path->nodes[level] = NULL;
2138				path->slots[level] = 0;
2139			}
2140		} else {
2141			/*
2142			 * We have a valid slot to walk back down
2143			 * from. Stop here so caller can process these
2144			 * new nodes.
2145			 */
2146			break;
2147		}
2148
2149		level++;
2150	}
2151
2152	eb = path->nodes[root_level];
2153	if (path->slots[root_level] >= btrfs_header_nritems(eb))
2154		return 1;
2155
2156	return 0;
2157}
2158
2159/*
2160 * Helper function to trace a subtree tree block swap.
2161 *
2162 * The swap will happen in highest tree block, but there may be a lot of
2163 * tree blocks involved.
2164 *
2165 * For example:
2166 *  OO = Old tree blocks
2167 *  NN = New tree blocks allocated during balance
2168 *
2169 *           File tree (257)                  Reloc tree for 257
2170 * L2              OO                                NN
2171 *               /    \                            /    \
2172 * L1          OO      OO (a)                    OO      NN (a)
2173 *            / \     / \                       / \     / \
2174 * L0       OO   OO OO   OO                   OO   OO NN   NN
2175 *                  (b)  (c)                          (b)  (c)
2176 *
2177 * When calling qgroup_trace_extent_swap(), we will pass:
2178 * @src_eb = OO(a)
2179 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
2180 * @dst_level = 0
2181 * @root_level = 1
2182 *
2183 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
2184 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2185 *
2186 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
2187 *
2188 * 1) Tree search from @src_eb
2189 *    It should acts as a simplified btrfs_search_slot().
2190 *    The key for search can be extracted from @dst_path->nodes[dst_level]
2191 *    (first key).
2192 *
2193 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2194 *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2195 *    They should be marked during previous (@dst_level = 1) iteration.
2196 *
2197 * 3) Mark file extents in leaves dirty
2198 *    We don't have good way to pick out new file extents only.
2199 *    So we still follow the old method by scanning all file extents in
2200 *    the leave.
2201 *
2202 * This function can free us from keeping two paths, thus later we only need
2203 * to care about how to iterate all new tree blocks in reloc tree.
2204 */
2205static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
2206				    struct extent_buffer *src_eb,
2207				    struct btrfs_path *dst_path,
2208				    int dst_level, int root_level,
2209				    bool trace_leaf)
2210{
2211	struct btrfs_key key;
2212	struct btrfs_path *src_path;
2213	struct btrfs_fs_info *fs_info = trans->fs_info;
2214	u32 nodesize = fs_info->nodesize;
2215	int cur_level = root_level;
2216	int ret;
2217
2218	BUG_ON(dst_level > root_level);
2219	/* Level mismatch */
2220	if (btrfs_header_level(src_eb) != root_level)
2221		return -EINVAL;
2222
2223	src_path = btrfs_alloc_path();
2224	if (!src_path) {
2225		ret = -ENOMEM;
2226		goto out;
2227	}
2228
2229	if (dst_level)
2230		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2231	else
2232		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2233
2234	/* For src_path */
2235	atomic_inc(&src_eb->refs);
2236	src_path->nodes[root_level] = src_eb;
2237	src_path->slots[root_level] = dst_path->slots[root_level];
2238	src_path->locks[root_level] = 0;
2239
2240	/* A simplified version of btrfs_search_slot() */
2241	while (cur_level >= dst_level) {
2242		struct btrfs_key src_key;
2243		struct btrfs_key dst_key;
2244
2245		if (src_path->nodes[cur_level] == NULL) {
2246			struct extent_buffer *eb;
2247			int parent_slot;
2248
2249			eb = src_path->nodes[cur_level + 1];
2250			parent_slot = src_path->slots[cur_level + 1];
2251
2252			eb = btrfs_read_node_slot(eb, parent_slot);
2253			if (IS_ERR(eb)) {
2254				ret = PTR_ERR(eb);
2255				goto out;
2256			}
2257
2258			src_path->nodes[cur_level] = eb;
2259
2260			btrfs_tree_read_lock(eb);
2261			src_path->locks[cur_level] = BTRFS_READ_LOCK;
2262		}
2263
2264		src_path->slots[cur_level] = dst_path->slots[cur_level];
2265		if (cur_level) {
2266			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2267					&dst_key, dst_path->slots[cur_level]);
2268			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2269					&src_key, src_path->slots[cur_level]);
2270		} else {
2271			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2272					&dst_key, dst_path->slots[cur_level]);
2273			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2274					&src_key, src_path->slots[cur_level]);
2275		}
2276		/* Content mismatch, something went wrong */
2277		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
2278			ret = -ENOENT;
2279			goto out;
2280		}
2281		cur_level--;
2282	}
2283
2284	/*
2285	 * Now both @dst_path and @src_path have been populated, record the tree
2286	 * blocks for qgroup accounting.
2287	 */
2288	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2289					nodesize);
2290	if (ret < 0)
2291		goto out;
2292	ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
2293					nodesize);
2294	if (ret < 0)
2295		goto out;
2296
2297	/* Record leaf file extents */
2298	if (dst_level == 0 && trace_leaf) {
2299		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2300		if (ret < 0)
2301			goto out;
2302		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2303	}
2304out:
2305	btrfs_free_path(src_path);
2306	return ret;
2307}
2308
2309/*
2310 * Helper function to do recursive generation-aware depth-first search, to
2311 * locate all new tree blocks in a subtree of reloc tree.
2312 *
2313 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2314 *         reloc tree
2315 * L2         NN (a)
2316 *          /    \
2317 * L1    OO        NN (b)
2318 *      /  \      /  \
2319 * L0  OO  OO    OO  NN
2320 *               (c) (d)
2321 * If we pass:
2322 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2323 * @cur_level = 1
2324 * @root_level = 1
2325 *
2326 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2327 * above tree blocks along with their counter parts in file tree.
2328 * While during search, old tree blocks OO(c) will be skipped as tree block swap
2329 * won't affect OO(c).
2330 */
2331static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2332					   struct extent_buffer *src_eb,
2333					   struct btrfs_path *dst_path,
2334					   int cur_level, int root_level,
2335					   u64 last_snapshot, bool trace_leaf)
2336{
2337	struct btrfs_fs_info *fs_info = trans->fs_info;
2338	struct extent_buffer *eb;
2339	bool need_cleanup = false;
2340	int ret = 0;
2341	int i;
2342
2343	/* Level sanity check */
2344	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2345	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2346	    root_level < cur_level) {
2347		btrfs_err_rl(fs_info,
2348			"%s: bad levels, cur_level=%d root_level=%d",
2349			__func__, cur_level, root_level);
2350		return -EUCLEAN;
2351	}
2352
2353	/* Read the tree block if needed */
2354	if (dst_path->nodes[cur_level] == NULL) {
2355		int parent_slot;
2356		u64 child_gen;
2357
2358		/*
2359		 * dst_path->nodes[root_level] must be initialized before
2360		 * calling this function.
2361		 */
2362		if (cur_level == root_level) {
2363			btrfs_err_rl(fs_info,
2364	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2365				__func__, root_level, root_level, cur_level);
2366			return -EUCLEAN;
2367		}
2368
2369		/*
2370		 * We need to get child blockptr/gen from parent before we can
2371		 * read it.
2372		  */
2373		eb = dst_path->nodes[cur_level + 1];
2374		parent_slot = dst_path->slots[cur_level + 1];
2375		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2376
2377		/* This node is old, no need to trace */
2378		if (child_gen < last_snapshot)
2379			goto out;
2380
2381		eb = btrfs_read_node_slot(eb, parent_slot);
2382		if (IS_ERR(eb)) {
2383			ret = PTR_ERR(eb);
2384			goto out;
2385		}
2386
2387		dst_path->nodes[cur_level] = eb;
2388		dst_path->slots[cur_level] = 0;
2389
2390		btrfs_tree_read_lock(eb);
2391		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2392		need_cleanup = true;
2393	}
2394
2395	/* Now record this tree block and its counter part for qgroups */
2396	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2397				       root_level, trace_leaf);
2398	if (ret < 0)
2399		goto cleanup;
2400
2401	eb = dst_path->nodes[cur_level];
2402
2403	if (cur_level > 0) {
2404		/* Iterate all child tree blocks */
2405		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2406			/* Skip old tree blocks as they won't be swapped */
2407			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2408				continue;
2409			dst_path->slots[cur_level] = i;
2410
2411			/* Recursive call (at most 7 times) */
2412			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2413					dst_path, cur_level - 1, root_level,
2414					last_snapshot, trace_leaf);
2415			if (ret < 0)
2416				goto cleanup;
2417		}
2418	}
2419
2420cleanup:
2421	if (need_cleanup) {
2422		/* Clean up */
2423		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2424				     dst_path->locks[cur_level]);
2425		free_extent_buffer(dst_path->nodes[cur_level]);
2426		dst_path->nodes[cur_level] = NULL;
2427		dst_path->slots[cur_level] = 0;
2428		dst_path->locks[cur_level] = 0;
2429	}
2430out:
2431	return ret;
2432}
2433
2434static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2435				struct extent_buffer *src_eb,
2436				struct extent_buffer *dst_eb,
2437				u64 last_snapshot, bool trace_leaf)
2438{
2439	struct btrfs_fs_info *fs_info = trans->fs_info;
2440	struct btrfs_path *dst_path = NULL;
2441	int level;
2442	int ret;
2443
2444	if (!btrfs_qgroup_full_accounting(fs_info))
2445		return 0;
2446
2447	/* Wrong parameter order */
2448	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2449		btrfs_err_rl(fs_info,
2450		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2451			     btrfs_header_generation(src_eb),
2452			     btrfs_header_generation(dst_eb));
2453		return -EUCLEAN;
2454	}
2455
2456	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2457		ret = -EIO;
2458		goto out;
2459	}
2460
2461	level = btrfs_header_level(dst_eb);
2462	dst_path = btrfs_alloc_path();
2463	if (!dst_path) {
2464		ret = -ENOMEM;
2465		goto out;
2466	}
2467	/* For dst_path */
2468	atomic_inc(&dst_eb->refs);
2469	dst_path->nodes[level] = dst_eb;
2470	dst_path->slots[level] = 0;
2471	dst_path->locks[level] = 0;
2472
2473	/* Do the generation aware breadth-first search */
2474	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2475					      level, last_snapshot, trace_leaf);
2476	if (ret < 0)
2477		goto out;
2478	ret = 0;
2479
2480out:
2481	btrfs_free_path(dst_path);
2482	if (ret < 0)
2483		qgroup_mark_inconsistent(fs_info);
2484	return ret;
2485}
2486
2487/*
2488 * Inform qgroup to trace a whole subtree, including all its child tree
2489 * blocks and data.
2490 * The root tree block is specified by @root_eb.
2491 *
2492 * Normally used by relocation(tree block swap) and subvolume deletion.
2493 *
2494 * Return 0 for success
2495 * Return <0 for error(ENOMEM or tree search error)
2496 */
2497int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2498			       struct extent_buffer *root_eb,
2499			       u64 root_gen, int root_level)
2500{
2501	struct btrfs_fs_info *fs_info = trans->fs_info;
2502	int ret = 0;
2503	int level;
2504	u8 drop_subptree_thres;
2505	struct extent_buffer *eb = root_eb;
2506	struct btrfs_path *path = NULL;
2507
2508	BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
2509	BUG_ON(root_eb == NULL);
2510
2511	if (!btrfs_qgroup_full_accounting(fs_info))
2512		return 0;
2513
2514	spin_lock(&fs_info->qgroup_lock);
2515	drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
2516	spin_unlock(&fs_info->qgroup_lock);
2517
2518	/*
2519	 * This function only gets called for snapshot drop, if we hit a high
2520	 * node here, it means we are going to change ownership for quite a lot
2521	 * of extents, which will greatly slow down btrfs_commit_transaction().
2522	 *
2523	 * So here if we find a high tree here, we just skip the accounting and
2524	 * mark qgroup inconsistent.
2525	 */
2526	if (root_level >= drop_subptree_thres) {
2527		qgroup_mark_inconsistent(fs_info);
2528		return 0;
2529	}
2530
2531	if (!extent_buffer_uptodate(root_eb)) {
2532		struct btrfs_tree_parent_check check = {
2533			.has_first_key = false,
2534			.transid = root_gen,
2535			.level = root_level
2536		};
2537
2538		ret = btrfs_read_extent_buffer(root_eb, &check);
2539		if (ret)
2540			goto out;
2541	}
2542
2543	if (root_level == 0) {
2544		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2545		goto out;
2546	}
2547
2548	path = btrfs_alloc_path();
2549	if (!path)
2550		return -ENOMEM;
2551
2552	/*
2553	 * Walk down the tree.  Missing extent blocks are filled in as
2554	 * we go. Metadata is accounted every time we read a new
2555	 * extent block.
2556	 *
2557	 * When we reach a leaf, we account for file extent items in it,
2558	 * walk back up the tree (adjusting slot pointers as we go)
2559	 * and restart the search process.
2560	 */
2561	atomic_inc(&root_eb->refs);	/* For path */
2562	path->nodes[root_level] = root_eb;
2563	path->slots[root_level] = 0;
2564	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2565walk_down:
2566	level = root_level;
2567	while (level >= 0) {
2568		if (path->nodes[level] == NULL) {
2569			int parent_slot;
2570			u64 child_bytenr;
2571
2572			/*
2573			 * We need to get child blockptr from parent before we
2574			 * can read it.
2575			  */
2576			eb = path->nodes[level + 1];
2577			parent_slot = path->slots[level + 1];
2578			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2579
2580			eb = btrfs_read_node_slot(eb, parent_slot);
2581			if (IS_ERR(eb)) {
2582				ret = PTR_ERR(eb);
2583				goto out;
2584			}
2585
2586			path->nodes[level] = eb;
2587			path->slots[level] = 0;
2588
2589			btrfs_tree_read_lock(eb);
2590			path->locks[level] = BTRFS_READ_LOCK;
2591
2592			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2593							fs_info->nodesize);
2594			if (ret)
2595				goto out;
2596		}
2597
2598		if (level == 0) {
2599			ret = btrfs_qgroup_trace_leaf_items(trans,
2600							    path->nodes[level]);
2601			if (ret)
2602				goto out;
2603
2604			/* Nonzero return here means we completed our search */
2605			ret = adjust_slots_upwards(path, root_level);
2606			if (ret)
2607				break;
2608
2609			/* Restart search with new slots */
2610			goto walk_down;
2611		}
2612
2613		level--;
2614	}
2615
2616	ret = 0;
2617out:
2618	btrfs_free_path(path);
2619
2620	return ret;
2621}
2622
2623static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
2624{
2625	if (!list_empty(&qgroup->nested_iterator))
2626		return;
2627
2628	list_add_tail(&qgroup->nested_iterator, head);
2629}
2630
2631static void qgroup_iterator_nested_clean(struct list_head *head)
2632{
2633	while (!list_empty(head)) {
2634		struct btrfs_qgroup *qgroup;
2635
2636		qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
2637		list_del_init(&qgroup->nested_iterator);
2638	}
2639}
2640
2641#define UPDATE_NEW	0
2642#define UPDATE_OLD	1
2643/*
2644 * Walk all of the roots that points to the bytenr and adjust their refcnts.
2645 */
2646static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2647				 struct ulist *roots, struct list_head *qgroups,
2648				 u64 seq, int update_old)
2649{
2650	struct ulist_node *unode;
2651	struct ulist_iterator uiter;
2652	struct btrfs_qgroup *qg;
2653
2654	if (!roots)
2655		return;
2656	ULIST_ITER_INIT(&uiter);
2657	while ((unode = ulist_next(roots, &uiter))) {
2658		LIST_HEAD(tmp);
2659
2660		qg = find_qgroup_rb(fs_info, unode->val);
2661		if (!qg)
2662			continue;
2663
2664		qgroup_iterator_nested_add(qgroups, qg);
2665		qgroup_iterator_add(&tmp, qg);
2666		list_for_each_entry(qg, &tmp, iterator) {
2667			struct btrfs_qgroup_list *glist;
2668
2669			if (update_old)
2670				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2671			else
2672				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2673
2674			list_for_each_entry(glist, &qg->groups, next_group) {
2675				qgroup_iterator_nested_add(qgroups, glist->group);
2676				qgroup_iterator_add(&tmp, glist->group);
2677			}
2678		}
2679		qgroup_iterator_clean(&tmp);
2680	}
2681}
2682
2683/*
2684 * Update qgroup rfer/excl counters.
2685 * Rfer update is easy, codes can explain themselves.
2686 *
2687 * Excl update is tricky, the update is split into 2 parts.
2688 * Part 1: Possible exclusive <-> sharing detect:
2689 *	|	A	|	!A	|
2690 *  -------------------------------------
2691 *  B	|	*	|	-	|
2692 *  -------------------------------------
2693 *  !B	|	+	|	**	|
2694 *  -------------------------------------
2695 *
2696 * Conditions:
2697 * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2698 * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2699 * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2700 * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2701 *
2702 * Results:
2703 * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2704 * *: Definitely not changed.		**: Possible unchanged.
2705 *
2706 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2707 *
2708 * To make the logic clear, we first use condition A and B to split
2709 * combination into 4 results.
2710 *
2711 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2712 * only on variant maybe 0.
2713 *
2714 * Lastly, check result **, since there are 2 variants maybe 0, split them
2715 * again(2x2).
2716 * But this time we don't need to consider other things, the codes and logic
2717 * is easy to understand now.
2718 */
2719static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
2720				   struct list_head *qgroups, u64 nr_old_roots,
2721				   u64 nr_new_roots, u64 num_bytes, u64 seq)
2722{
2723	struct btrfs_qgroup *qg;
2724
2725	list_for_each_entry(qg, qgroups, nested_iterator) {
2726		u64 cur_new_count, cur_old_count;
2727		bool dirty = false;
2728
2729		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2730		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2731
2732		trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2733					     cur_new_count);
2734
2735		/* Rfer update part */
2736		if (cur_old_count == 0 && cur_new_count > 0) {
2737			qg->rfer += num_bytes;
2738			qg->rfer_cmpr += num_bytes;
2739			dirty = true;
2740		}
2741		if (cur_old_count > 0 && cur_new_count == 0) {
2742			qg->rfer -= num_bytes;
2743			qg->rfer_cmpr -= num_bytes;
2744			dirty = true;
2745		}
2746
2747		/* Excl update part */
2748		/* Exclusive/none -> shared case */
2749		if (cur_old_count == nr_old_roots &&
2750		    cur_new_count < nr_new_roots) {
2751			/* Exclusive -> shared */
2752			if (cur_old_count != 0) {
2753				qg->excl -= num_bytes;
2754				qg->excl_cmpr -= num_bytes;
2755				dirty = true;
2756			}
2757		}
2758
2759		/* Shared -> exclusive/none case */
2760		if (cur_old_count < nr_old_roots &&
2761		    cur_new_count == nr_new_roots) {
2762			/* Shared->exclusive */
2763			if (cur_new_count != 0) {
2764				qg->excl += num_bytes;
2765				qg->excl_cmpr += num_bytes;
2766				dirty = true;
2767			}
2768		}
2769
2770		/* Exclusive/none -> exclusive/none case */
2771		if (cur_old_count == nr_old_roots &&
2772		    cur_new_count == nr_new_roots) {
2773			if (cur_old_count == 0) {
2774				/* None -> exclusive/none */
2775
2776				if (cur_new_count != 0) {
2777					/* None -> exclusive */
2778					qg->excl += num_bytes;
2779					qg->excl_cmpr += num_bytes;
2780					dirty = true;
2781				}
2782				/* None -> none, nothing changed */
2783			} else {
2784				/* Exclusive -> exclusive/none */
2785
2786				if (cur_new_count == 0) {
2787					/* Exclusive -> none */
2788					qg->excl -= num_bytes;
2789					qg->excl_cmpr -= num_bytes;
2790					dirty = true;
2791				}
2792				/* Exclusive -> exclusive, nothing changed */
2793			}
2794		}
2795
2796		if (dirty)
2797			qgroup_dirty(fs_info, qg);
2798	}
2799}
2800
2801/*
2802 * Check if the @roots potentially is a list of fs tree roots
2803 *
2804 * Return 0 for definitely not a fs/subvol tree roots ulist
2805 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2806 *          one as well)
2807 */
2808static int maybe_fs_roots(struct ulist *roots)
2809{
2810	struct ulist_node *unode;
2811	struct ulist_iterator uiter;
2812
2813	/* Empty one, still possible for fs roots */
2814	if (!roots || roots->nnodes == 0)
2815		return 1;
2816
2817	ULIST_ITER_INIT(&uiter);
2818	unode = ulist_next(roots, &uiter);
2819	if (!unode)
2820		return 1;
2821
2822	/*
2823	 * If it contains fs tree roots, then it must belong to fs/subvol
2824	 * trees.
2825	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2826	 */
2827	return is_fstree(unode->val);
2828}
2829
2830int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2831				u64 num_bytes, struct ulist *old_roots,
2832				struct ulist *new_roots)
2833{
2834	struct btrfs_fs_info *fs_info = trans->fs_info;
2835	LIST_HEAD(qgroups);
2836	u64 seq;
2837	u64 nr_new_roots = 0;
2838	u64 nr_old_roots = 0;
2839	int ret = 0;
2840
2841	/*
2842	 * If quotas get disabled meanwhile, the resources need to be freed and
2843	 * we can't just exit here.
2844	 */
2845	if (!btrfs_qgroup_full_accounting(fs_info) ||
2846	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2847		goto out_free;
2848
2849	if (new_roots) {
2850		if (!maybe_fs_roots(new_roots))
2851			goto out_free;
2852		nr_new_roots = new_roots->nnodes;
2853	}
2854	if (old_roots) {
2855		if (!maybe_fs_roots(old_roots))
2856			goto out_free;
2857		nr_old_roots = old_roots->nnodes;
2858	}
2859
2860	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2861	if (nr_old_roots == 0 && nr_new_roots == 0)
2862		goto out_free;
2863
2864	BUG_ON(!fs_info->quota_root);
2865
2866	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2867					num_bytes, nr_old_roots, nr_new_roots);
2868
2869	mutex_lock(&fs_info->qgroup_rescan_lock);
2870	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2871		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2872			mutex_unlock(&fs_info->qgroup_rescan_lock);
2873			ret = 0;
2874			goto out_free;
2875		}
2876	}
2877	mutex_unlock(&fs_info->qgroup_rescan_lock);
2878
2879	spin_lock(&fs_info->qgroup_lock);
2880	seq = fs_info->qgroup_seq;
2881
2882	/* Update old refcnts using old_roots */
2883	qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
2884
2885	/* Update new refcnts using new_roots */
2886	qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
2887
2888	qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
2889			       num_bytes, seq);
2890
2891	/*
2892	 * We're done using the iterator, release all its qgroups while holding
2893	 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup()
2894	 * and trigger use-after-free accesses to qgroups.
2895	 */
2896	qgroup_iterator_nested_clean(&qgroups);
2897
2898	/*
2899	 * Bump qgroup_seq to avoid seq overlap
2900	 */
2901	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2902	spin_unlock(&fs_info->qgroup_lock);
2903out_free:
2904	ulist_free(old_roots);
2905	ulist_free(new_roots);
2906	return ret;
2907}
2908
2909int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2910{
2911	struct btrfs_fs_info *fs_info = trans->fs_info;
2912	struct btrfs_qgroup_extent_record *record;
2913	struct btrfs_delayed_ref_root *delayed_refs;
2914	struct ulist *new_roots = NULL;
2915	struct rb_node *node;
2916	u64 num_dirty_extents = 0;
2917	u64 qgroup_to_skip;
2918	int ret = 0;
2919
2920	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
2921		return 0;
2922
2923	delayed_refs = &trans->transaction->delayed_refs;
2924	qgroup_to_skip = delayed_refs->qgroup_to_skip;
2925	while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2926		record = rb_entry(node, struct btrfs_qgroup_extent_record,
2927				  node);
2928
2929		num_dirty_extents++;
2930		trace_btrfs_qgroup_account_extents(fs_info, record);
2931
2932		if (!ret && !(fs_info->qgroup_flags &
2933			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
2934			struct btrfs_backref_walk_ctx ctx = { 0 };
2935
2936			ctx.bytenr = record->bytenr;
2937			ctx.fs_info = fs_info;
2938
2939			/*
2940			 * Old roots should be searched when inserting qgroup
2941			 * extent record.
2942			 *
2943			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
2944			 * we may have some record inserted during
2945			 * NO_ACCOUNTING (thus no old_roots populated), but
2946			 * later we start rescan, which clears NO_ACCOUNTING,
2947			 * leaving some inserted records without old_roots
2948			 * populated.
2949			 *
2950			 * Those cases are rare and should not cause too much
2951			 * time spent during commit_transaction().
2952			 */
2953			if (!record->old_roots) {
2954				/* Search commit root to find old_roots */
2955				ret = btrfs_find_all_roots(&ctx, false);
2956				if (ret < 0)
2957					goto cleanup;
2958				record->old_roots = ctx.roots;
2959				ctx.roots = NULL;
2960			}
2961
2962			/* Free the reserved data space */
2963			btrfs_qgroup_free_refroot(fs_info,
2964					record->data_rsv_refroot,
2965					record->data_rsv,
2966					BTRFS_QGROUP_RSV_DATA);
2967			/*
2968			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
2969			 * which doesn't lock tree or delayed_refs and search
2970			 * current root. It's safe inside commit_transaction().
2971			 */
2972			ctx.trans = trans;
2973			ctx.time_seq = BTRFS_SEQ_LAST;
2974			ret = btrfs_find_all_roots(&ctx, false);
2975			if (ret < 0)
2976				goto cleanup;
2977			new_roots = ctx.roots;
2978			if (qgroup_to_skip) {
2979				ulist_del(new_roots, qgroup_to_skip, 0);
2980				ulist_del(record->old_roots, qgroup_to_skip,
2981					  0);
2982			}
2983			ret = btrfs_qgroup_account_extent(trans, record->bytenr,
2984							  record->num_bytes,
2985							  record->old_roots,
2986							  new_roots);
2987			record->old_roots = NULL;
2988			new_roots = NULL;
2989		}
 
 
 
 
 
2990cleanup:
2991		ulist_free(record->old_roots);
2992		ulist_free(new_roots);
2993		new_roots = NULL;
2994		rb_erase(node, &delayed_refs->dirty_extent_root);
2995		kfree(record);
2996
2997	}
2998	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
2999				       num_dirty_extents);
3000	return ret;
3001}
3002
3003/*
3004 * Writes all changed qgroups to disk.
3005 * Called by the transaction commit path and the qgroup assign ioctl.
3006 */
3007int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
3008{
3009	struct btrfs_fs_info *fs_info = trans->fs_info;
3010	int ret = 0;
3011
3012	/*
3013	 * In case we are called from the qgroup assign ioctl, assert that we
3014	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
3015	 * disable operation (ioctl) and access a freed quota root.
3016	 */
3017	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
3018		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
3019
3020	if (!fs_info->quota_root)
3021		return ret;
3022
3023	spin_lock(&fs_info->qgroup_lock);
3024	while (!list_empty(&fs_info->dirty_qgroups)) {
3025		struct btrfs_qgroup *qgroup;
3026		qgroup = list_first_entry(&fs_info->dirty_qgroups,
3027					  struct btrfs_qgroup, dirty);
3028		list_del_init(&qgroup->dirty);
3029		spin_unlock(&fs_info->qgroup_lock);
3030		ret = update_qgroup_info_item(trans, qgroup);
3031		if (ret)
3032			qgroup_mark_inconsistent(fs_info);
3033		ret = update_qgroup_limit_item(trans, qgroup);
3034		if (ret)
3035			qgroup_mark_inconsistent(fs_info);
3036		spin_lock(&fs_info->qgroup_lock);
3037	}
3038	if (btrfs_qgroup_enabled(fs_info))
3039		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
3040	else
3041		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
3042	spin_unlock(&fs_info->qgroup_lock);
3043
3044	ret = update_qgroup_status_item(trans);
3045	if (ret)
3046		qgroup_mark_inconsistent(fs_info);
3047
3048	return ret;
3049}
3050
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3051static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
3052			       u64 inode_rootid,
3053			       struct btrfs_qgroup_inherit **inherit)
3054{
3055	int i = 0;
3056	u64 num_qgroups = 0;
3057	struct btrfs_qgroup *inode_qg;
3058	struct btrfs_qgroup_list *qg_list;
3059	struct btrfs_qgroup_inherit *res;
3060	size_t struct_sz;
3061	u64 *qgids;
3062
3063	if (*inherit)
3064		return -EEXIST;
3065
3066	inode_qg = find_qgroup_rb(fs_info, inode_rootid);
3067	if (!inode_qg)
3068		return -ENOENT;
3069
3070	num_qgroups = list_count_nodes(&inode_qg->groups);
3071
3072	if (!num_qgroups)
3073		return 0;
3074
3075	struct_sz = struct_size(res, qgroups, num_qgroups);
3076	if (struct_sz == SIZE_MAX)
3077		return -ERANGE;
3078
3079	res = kzalloc(struct_sz, GFP_NOFS);
3080	if (!res)
3081		return -ENOMEM;
3082	res->num_qgroups = num_qgroups;
3083	qgids = res->qgroups;
3084
3085	list_for_each_entry(qg_list, &inode_qg->groups, next_group)
3086		qgids[i] = qg_list->group->qgroupid;
3087
3088	*inherit = res;
3089	return 0;
3090}
3091
3092/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3093 * Copy the accounting information between qgroups. This is necessary
3094 * when a snapshot or a subvolume is created. Throwing an error will
3095 * cause a transaction abort so we take extra care here to only error
3096 * when a readonly fs is a reasonable outcome.
3097 */
3098int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
3099			 u64 objectid, u64 inode_rootid,
3100			 struct btrfs_qgroup_inherit *inherit)
3101{
3102	int ret = 0;
3103	int i;
3104	u64 *i_qgroups;
3105	bool committing = false;
3106	struct btrfs_fs_info *fs_info = trans->fs_info;
3107	struct btrfs_root *quota_root;
3108	struct btrfs_qgroup *srcgroup;
3109	struct btrfs_qgroup *dstgroup;
3110	struct btrfs_qgroup *prealloc;
3111	struct btrfs_qgroup_list **qlist_prealloc = NULL;
3112	bool free_inherit = false;
3113	bool need_rescan = false;
3114	u32 level_size = 0;
3115	u64 nums;
3116
3117	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
3118	if (!prealloc)
3119		return -ENOMEM;
3120
3121	/*
3122	 * There are only two callers of this function.
3123	 *
3124	 * One in create_subvol() in the ioctl context, which needs to hold
3125	 * the qgroup_ioctl_lock.
3126	 *
3127	 * The other one in create_pending_snapshot() where no other qgroup
3128	 * code can modify the fs as they all need to either start a new trans
3129	 * or hold a trans handler, thus we don't need to hold
3130	 * qgroup_ioctl_lock.
3131	 * This would avoid long and complex lock chain and make lockdep happy.
3132	 */
3133	spin_lock(&fs_info->trans_lock);
3134	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
3135		committing = true;
3136	spin_unlock(&fs_info->trans_lock);
3137
3138	if (!committing)
3139		mutex_lock(&fs_info->qgroup_ioctl_lock);
3140	if (!btrfs_qgroup_enabled(fs_info))
3141		goto out;
3142
3143	quota_root = fs_info->quota_root;
3144	if (!quota_root) {
3145		ret = -EINVAL;
3146		goto out;
3147	}
3148
3149	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
3150		ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
3151		if (ret)
3152			goto out;
3153		free_inherit = true;
3154	}
3155
3156	if (inherit) {
3157		i_qgroups = (u64 *)(inherit + 1);
3158		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
3159		       2 * inherit->num_excl_copies;
3160		for (i = 0; i < nums; ++i) {
3161			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
3162
3163			/*
3164			 * Zero out invalid groups so we can ignore
3165			 * them later.
3166			 */
3167			if (!srcgroup ||
3168			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
3169				*i_qgroups = 0ULL;
3170
3171			++i_qgroups;
3172		}
3173	}
3174
3175	/*
3176	 * create a tracking group for the subvol itself
3177	 */
3178	ret = add_qgroup_item(trans, quota_root, objectid);
3179	if (ret)
3180		goto out;
3181
3182	/*
3183	 * add qgroup to all inherited groups
3184	 */
3185	if (inherit) {
3186		i_qgroups = (u64 *)(inherit + 1);
3187		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
3188			if (*i_qgroups == 0)
3189				continue;
3190			ret = add_qgroup_relation_item(trans, objectid,
3191						       *i_qgroups);
3192			if (ret && ret != -EEXIST)
3193				goto out;
3194			ret = add_qgroup_relation_item(trans, *i_qgroups,
3195						       objectid);
3196			if (ret && ret != -EEXIST)
3197				goto out;
3198		}
3199		ret = 0;
3200
3201		qlist_prealloc = kcalloc(inherit->num_qgroups,
3202					 sizeof(struct btrfs_qgroup_list *),
3203					 GFP_NOFS);
3204		if (!qlist_prealloc) {
3205			ret = -ENOMEM;
3206			goto out;
3207		}
3208		for (int i = 0; i < inherit->num_qgroups; i++) {
3209			qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
3210						    GFP_NOFS);
3211			if (!qlist_prealloc[i]) {
3212				ret = -ENOMEM;
3213				goto out;
3214			}
3215		}
3216	}
3217
3218	spin_lock(&fs_info->qgroup_lock);
3219
3220	dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
3221	prealloc = NULL;
3222
3223	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
3224		dstgroup->lim_flags = inherit->lim.flags;
3225		dstgroup->max_rfer = inherit->lim.max_rfer;
3226		dstgroup->max_excl = inherit->lim.max_excl;
3227		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
3228		dstgroup->rsv_excl = inherit->lim.rsv_excl;
3229
3230		qgroup_dirty(fs_info, dstgroup);
3231	}
3232
3233	if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
3234		srcgroup = find_qgroup_rb(fs_info, srcid);
3235		if (!srcgroup)
3236			goto unlock;
3237
3238		/*
3239		 * We call inherit after we clone the root in order to make sure
3240		 * our counts don't go crazy, so at this point the only
3241		 * difference between the two roots should be the root node.
3242		 */
3243		level_size = fs_info->nodesize;
3244		dstgroup->rfer = srcgroup->rfer;
3245		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
3246		dstgroup->excl = level_size;
3247		dstgroup->excl_cmpr = level_size;
3248		srcgroup->excl = level_size;
3249		srcgroup->excl_cmpr = level_size;
3250
3251		/* inherit the limit info */
3252		dstgroup->lim_flags = srcgroup->lim_flags;
3253		dstgroup->max_rfer = srcgroup->max_rfer;
3254		dstgroup->max_excl = srcgroup->max_excl;
3255		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
3256		dstgroup->rsv_excl = srcgroup->rsv_excl;
3257
3258		qgroup_dirty(fs_info, dstgroup);
3259		qgroup_dirty(fs_info, srcgroup);
 
 
 
 
 
 
 
3260	}
3261
3262	if (!inherit)
3263		goto unlock;
3264
3265	i_qgroups = (u64 *)(inherit + 1);
3266	for (i = 0; i < inherit->num_qgroups; ++i) {
3267		if (*i_qgroups) {
3268			ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
3269					      *i_qgroups);
3270			qlist_prealloc[i] = NULL;
3271			if (ret)
3272				goto unlock;
3273		}
 
 
 
 
 
 
 
 
 
3274		++i_qgroups;
3275
3276		/*
3277		 * If we're doing a snapshot, and adding the snapshot to a new
3278		 * qgroup, the numbers are guaranteed to be incorrect.
3279		 */
3280		if (srcid)
3281			need_rescan = true;
3282	}
3283
3284	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
3285		struct btrfs_qgroup *src;
3286		struct btrfs_qgroup *dst;
3287
3288		if (!i_qgroups[0] || !i_qgroups[1])
3289			continue;
3290
3291		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3292		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3293
3294		if (!src || !dst) {
3295			ret = -EINVAL;
3296			goto unlock;
3297		}
3298
3299		dst->rfer = src->rfer - level_size;
3300		dst->rfer_cmpr = src->rfer_cmpr - level_size;
3301
3302		/* Manually tweaking numbers certainly needs a rescan */
3303		need_rescan = true;
3304	}
3305	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
3306		struct btrfs_qgroup *src;
3307		struct btrfs_qgroup *dst;
3308
3309		if (!i_qgroups[0] || !i_qgroups[1])
3310			continue;
3311
3312		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3313		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3314
3315		if (!src || !dst) {
3316			ret = -EINVAL;
3317			goto unlock;
3318		}
3319
3320		dst->excl = src->excl + level_size;
3321		dst->excl_cmpr = src->excl_cmpr + level_size;
3322		need_rescan = true;
3323	}
3324
3325unlock:
3326	spin_unlock(&fs_info->qgroup_lock);
3327	if (!ret)
3328		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3329out:
3330	if (!committing)
3331		mutex_unlock(&fs_info->qgroup_ioctl_lock);
3332	if (need_rescan)
3333		qgroup_mark_inconsistent(fs_info);
3334	if (qlist_prealloc) {
3335		for (int i = 0; i < inherit->num_qgroups; i++)
3336			kfree(qlist_prealloc[i]);
3337		kfree(qlist_prealloc);
3338	}
3339	if (free_inherit)
3340		kfree(inherit);
3341	kfree(prealloc);
3342	return ret;
3343}
3344
3345static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3346{
3347	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3348	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3349		return false;
3350
3351	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3352	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3353		return false;
3354
3355	return true;
3356}
3357
3358static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3359			  enum btrfs_qgroup_rsv_type type)
3360{
3361	struct btrfs_qgroup *qgroup;
3362	struct btrfs_fs_info *fs_info = root->fs_info;
3363	u64 ref_root = root->root_key.objectid;
3364	int ret = 0;
3365	LIST_HEAD(qgroup_list);
3366
3367	if (!is_fstree(ref_root))
3368		return 0;
3369
3370	if (num_bytes == 0)
3371		return 0;
3372
3373	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3374	    capable(CAP_SYS_RESOURCE))
3375		enforce = false;
3376
3377	spin_lock(&fs_info->qgroup_lock);
3378	if (!fs_info->quota_root)
3379		goto out;
3380
3381	qgroup = find_qgroup_rb(fs_info, ref_root);
3382	if (!qgroup)
3383		goto out;
3384
3385	qgroup_iterator_add(&qgroup_list, qgroup);
3386	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3387		struct btrfs_qgroup_list *glist;
3388
3389		if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
3390			ret = -EDQUOT;
3391			goto out;
3392		}
3393
3394		list_for_each_entry(glist, &qgroup->groups, next_group)
3395			qgroup_iterator_add(&qgroup_list, glist->group);
3396	}
3397
3398	ret = 0;
3399	/*
3400	 * no limits exceeded, now record the reservation into all qgroups
3401	 */
3402	list_for_each_entry(qgroup, &qgroup_list, iterator)
3403		qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
3404
3405out:
3406	qgroup_iterator_clean(&qgroup_list);
3407	spin_unlock(&fs_info->qgroup_lock);
3408	return ret;
3409}
3410
3411/*
3412 * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3413 * qgroup).
3414 *
3415 * Will handle all higher level qgroup too.
3416 *
3417 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3418 * This special case is only used for META_PERTRANS type.
3419 */
3420void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3421			       u64 ref_root, u64 num_bytes,
3422			       enum btrfs_qgroup_rsv_type type)
3423{
3424	struct btrfs_qgroup *qgroup;
3425	LIST_HEAD(qgroup_list);
3426
3427	if (!is_fstree(ref_root))
3428		return;
3429
3430	if (num_bytes == 0)
3431		return;
3432
3433	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3434		WARN(1, "%s: Invalid type to free", __func__);
3435		return;
3436	}
3437	spin_lock(&fs_info->qgroup_lock);
3438
3439	if (!fs_info->quota_root)
3440		goto out;
3441
3442	qgroup = find_qgroup_rb(fs_info, ref_root);
3443	if (!qgroup)
3444		goto out;
3445
3446	if (num_bytes == (u64)-1)
3447		/*
3448		 * We're freeing all pertrans rsv, get reserved value from
3449		 * level 0 qgroup as real num_bytes to free.
3450		 */
3451		num_bytes = qgroup->rsv.values[type];
3452
3453	qgroup_iterator_add(&qgroup_list, qgroup);
3454	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3455		struct btrfs_qgroup_list *glist;
3456
3457		qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
3458		list_for_each_entry(glist, &qgroup->groups, next_group) {
3459			qgroup_iterator_add(&qgroup_list, glist->group);
3460		}
3461	}
3462out:
3463	qgroup_iterator_clean(&qgroup_list);
3464	spin_unlock(&fs_info->qgroup_lock);
3465}
3466
3467/*
3468 * Check if the leaf is the last leaf. Which means all node pointers
3469 * are at their last position.
3470 */
3471static bool is_last_leaf(struct btrfs_path *path)
3472{
3473	int i;
3474
3475	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3476		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3477			return false;
3478	}
3479	return true;
3480}
3481
3482/*
3483 * returns < 0 on error, 0 when more leafs are to be scanned.
3484 * returns 1 when done.
3485 */
3486static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3487			      struct btrfs_path *path)
3488{
3489	struct btrfs_fs_info *fs_info = trans->fs_info;
3490	struct btrfs_root *extent_root;
3491	struct btrfs_key found;
3492	struct extent_buffer *scratch_leaf = NULL;
3493	u64 num_bytes;
3494	bool done;
3495	int slot;
3496	int ret;
3497
3498	if (!btrfs_qgroup_full_accounting(fs_info))
3499		return 1;
3500
3501	mutex_lock(&fs_info->qgroup_rescan_lock);
3502	extent_root = btrfs_extent_root(fs_info,
3503				fs_info->qgroup_rescan_progress.objectid);
3504	ret = btrfs_search_slot_for_read(extent_root,
3505					 &fs_info->qgroup_rescan_progress,
3506					 path, 1, 0);
3507
3508	btrfs_debug(fs_info,
3509		"current progress key (%llu %u %llu), search_slot ret %d",
3510		fs_info->qgroup_rescan_progress.objectid,
3511		fs_info->qgroup_rescan_progress.type,
3512		fs_info->qgroup_rescan_progress.offset, ret);
3513
3514	if (ret) {
3515		/*
3516		 * The rescan is about to end, we will not be scanning any
3517		 * further blocks. We cannot unset the RESCAN flag here, because
3518		 * we want to commit the transaction if everything went well.
3519		 * To make the live accounting work in this phase, we set our
3520		 * scan progress pointer such that every real extent objectid
3521		 * will be smaller.
3522		 */
3523		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3524		btrfs_release_path(path);
3525		mutex_unlock(&fs_info->qgroup_rescan_lock);
3526		return ret;
3527	}
3528	done = is_last_leaf(path);
3529
3530	btrfs_item_key_to_cpu(path->nodes[0], &found,
3531			      btrfs_header_nritems(path->nodes[0]) - 1);
3532	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3533
3534	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3535	if (!scratch_leaf) {
3536		ret = -ENOMEM;
3537		mutex_unlock(&fs_info->qgroup_rescan_lock);
3538		goto out;
3539	}
3540	slot = path->slots[0];
3541	btrfs_release_path(path);
3542	mutex_unlock(&fs_info->qgroup_rescan_lock);
3543
3544	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3545		struct btrfs_backref_walk_ctx ctx = { 0 };
3546
3547		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3548		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3549		    found.type != BTRFS_METADATA_ITEM_KEY)
3550			continue;
3551		if (found.type == BTRFS_METADATA_ITEM_KEY)
3552			num_bytes = fs_info->nodesize;
3553		else
3554			num_bytes = found.offset;
3555
3556		ctx.bytenr = found.objectid;
3557		ctx.fs_info = fs_info;
3558
3559		ret = btrfs_find_all_roots(&ctx, false);
3560		if (ret < 0)
3561			goto out;
3562		/* For rescan, just pass old_roots as NULL */
3563		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3564						  num_bytes, NULL, ctx.roots);
3565		if (ret < 0)
3566			goto out;
3567	}
3568out:
3569	if (scratch_leaf)
3570		free_extent_buffer(scratch_leaf);
3571
3572	if (done && !ret) {
3573		ret = 1;
3574		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3575	}
3576	return ret;
3577}
3578
3579static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3580{
3581	if (btrfs_fs_closing(fs_info))
3582		return true;
3583	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
3584		return true;
3585	if (!btrfs_qgroup_enabled(fs_info))
3586		return true;
3587	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3588		return true;
3589	return false;
3590}
3591
3592static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3593{
3594	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3595						     qgroup_rescan_work);
3596	struct btrfs_path *path;
3597	struct btrfs_trans_handle *trans = NULL;
3598	int err = -ENOMEM;
3599	int ret = 0;
3600	bool stopped = false;
3601	bool did_leaf_rescans = false;
3602
3603	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3604		return;
3605
3606	path = btrfs_alloc_path();
3607	if (!path)
 
3608		goto out;
 
3609	/*
3610	 * Rescan should only search for commit root, and any later difference
3611	 * should be recorded by qgroup
3612	 */
3613	path->search_commit_root = 1;
3614	path->skip_locking = 1;
3615
3616	err = 0;
3617	while (!err && !(stopped = rescan_should_stop(fs_info))) {
3618		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3619		if (IS_ERR(trans)) {
3620			err = PTR_ERR(trans);
3621			break;
3622		}
3623
3624		err = qgroup_rescan_leaf(trans, path);
3625		did_leaf_rescans = true;
3626
3627		if (err > 0)
3628			btrfs_commit_transaction(trans);
3629		else
3630			btrfs_end_transaction(trans);
3631	}
3632
3633out:
3634	btrfs_free_path(path);
3635
3636	mutex_lock(&fs_info->qgroup_rescan_lock);
3637	if (err > 0 &&
3638	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3639		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3640	} else if (err < 0 || stopped) {
3641		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3642	}
3643	mutex_unlock(&fs_info->qgroup_rescan_lock);
3644
3645	/*
3646	 * Only update status, since the previous part has already updated the
3647	 * qgroup info, and only if we did any actual work. This also prevents
3648	 * race with a concurrent quota disable, which has already set
3649	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
3650	 * btrfs_quota_disable().
3651	 */
3652	if (did_leaf_rescans) {
3653		trans = btrfs_start_transaction(fs_info->quota_root, 1);
3654		if (IS_ERR(trans)) {
3655			err = PTR_ERR(trans);
3656			trans = NULL;
3657			btrfs_err(fs_info,
3658				  "fail to start transaction for status update: %d",
3659				  err);
3660		}
3661	} else {
3662		trans = NULL;
3663	}
3664
3665	mutex_lock(&fs_info->qgroup_rescan_lock);
3666	if (!stopped ||
3667	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3668		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3669	if (trans) {
3670		ret = update_qgroup_status_item(trans);
3671		if (ret < 0) {
3672			err = ret;
3673			btrfs_err(fs_info, "fail to update qgroup status: %d",
3674				  err);
3675		}
3676	}
3677	fs_info->qgroup_rescan_running = false;
3678	fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3679	complete_all(&fs_info->qgroup_rescan_completion);
3680	mutex_unlock(&fs_info->qgroup_rescan_lock);
3681
3682	if (!trans)
3683		return;
3684
3685	btrfs_end_transaction(trans);
3686
3687	if (stopped) {
3688		btrfs_info(fs_info, "qgroup scan paused");
3689	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
3690		btrfs_info(fs_info, "qgroup scan cancelled");
3691	} else if (err >= 0) {
3692		btrfs_info(fs_info, "qgroup scan completed%s",
3693			err > 0 ? " (inconsistency flag cleared)" : "");
3694	} else {
3695		btrfs_err(fs_info, "qgroup scan failed with %d", err);
3696	}
3697}
3698
3699/*
3700 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3701 * memory required for the rescan context.
3702 */
3703static int
3704qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3705		   int init_flags)
3706{
3707	int ret = 0;
3708
3709	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3710		btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
3711		return -EINVAL;
3712	}
3713
3714	if (!init_flags) {
3715		/* we're resuming qgroup rescan at mount time */
3716		if (!(fs_info->qgroup_flags &
3717		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3718			btrfs_warn(fs_info,
3719			"qgroup rescan init failed, qgroup rescan is not queued");
3720			ret = -EINVAL;
3721		} else if (!(fs_info->qgroup_flags &
3722			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3723			btrfs_warn(fs_info,
3724			"qgroup rescan init failed, qgroup is not enabled");
3725			ret = -EINVAL;
3726		}
3727
3728		if (ret)
3729			return ret;
3730	}
3731
3732	mutex_lock(&fs_info->qgroup_rescan_lock);
3733
3734	if (init_flags) {
3735		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3736			btrfs_warn(fs_info,
3737				   "qgroup rescan is already in progress");
3738			ret = -EINPROGRESS;
3739		} else if (!(fs_info->qgroup_flags &
3740			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3741			btrfs_warn(fs_info,
3742			"qgroup rescan init failed, qgroup is not enabled");
3743			ret = -EINVAL;
3744		} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
3745			/* Quota disable is in progress */
3746			ret = -EBUSY;
3747		}
3748
3749		if (ret) {
3750			mutex_unlock(&fs_info->qgroup_rescan_lock);
3751			return ret;
3752		}
3753		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3754	}
3755
3756	memset(&fs_info->qgroup_rescan_progress, 0,
3757		sizeof(fs_info->qgroup_rescan_progress));
3758	fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
3759				   BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
3760	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3761	init_completion(&fs_info->qgroup_rescan_completion);
3762	mutex_unlock(&fs_info->qgroup_rescan_lock);
3763
3764	btrfs_init_work(&fs_info->qgroup_rescan_work,
3765			btrfs_qgroup_rescan_worker, NULL);
3766	return 0;
3767}
3768
3769static void
3770qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3771{
3772	struct rb_node *n;
3773	struct btrfs_qgroup *qgroup;
3774
3775	spin_lock(&fs_info->qgroup_lock);
3776	/* clear all current qgroup tracking information */
3777	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3778		qgroup = rb_entry(n, struct btrfs_qgroup, node);
3779		qgroup->rfer = 0;
3780		qgroup->rfer_cmpr = 0;
3781		qgroup->excl = 0;
3782		qgroup->excl_cmpr = 0;
3783		qgroup_dirty(fs_info, qgroup);
3784	}
3785	spin_unlock(&fs_info->qgroup_lock);
3786}
3787
3788int
3789btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3790{
3791	int ret = 0;
3792	struct btrfs_trans_handle *trans;
3793
3794	ret = qgroup_rescan_init(fs_info, 0, 1);
3795	if (ret)
3796		return ret;
3797
3798	/*
3799	 * We have set the rescan_progress to 0, which means no more
3800	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3801	 * However, btrfs_qgroup_account_ref may be right after its call
3802	 * to btrfs_find_all_roots, in which case it would still do the
3803	 * accounting.
3804	 * To solve this, we're committing the transaction, which will
3805	 * ensure we run all delayed refs and only after that, we are
3806	 * going to clear all tracking information for a clean start.
3807	 */
3808
3809	trans = btrfs_attach_transaction_barrier(fs_info->fs_root);
3810	if (IS_ERR(trans) && trans != ERR_PTR(-ENOENT)) {
3811		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3812		return PTR_ERR(trans);
3813	} else if (trans != ERR_PTR(-ENOENT)) {
3814		ret = btrfs_commit_transaction(trans);
3815		if (ret) {
3816			fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3817			return ret;
3818		}
3819	}
3820
3821	qgroup_rescan_zero_tracking(fs_info);
3822
3823	mutex_lock(&fs_info->qgroup_rescan_lock);
3824	fs_info->qgroup_rescan_running = true;
3825	btrfs_queue_work(fs_info->qgroup_rescan_workers,
3826			 &fs_info->qgroup_rescan_work);
3827	mutex_unlock(&fs_info->qgroup_rescan_lock);
3828
3829	return 0;
3830}
3831
3832int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
3833				     bool interruptible)
3834{
3835	int running;
3836	int ret = 0;
3837
3838	mutex_lock(&fs_info->qgroup_rescan_lock);
3839	running = fs_info->qgroup_rescan_running;
3840	mutex_unlock(&fs_info->qgroup_rescan_lock);
3841
3842	if (!running)
3843		return 0;
3844
3845	if (interruptible)
3846		ret = wait_for_completion_interruptible(
3847					&fs_info->qgroup_rescan_completion);
3848	else
3849		wait_for_completion(&fs_info->qgroup_rescan_completion);
3850
3851	return ret;
3852}
3853
3854/*
3855 * this is only called from open_ctree where we're still single threaded, thus
3856 * locking is omitted here.
3857 */
3858void
3859btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
3860{
3861	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3862		mutex_lock(&fs_info->qgroup_rescan_lock);
3863		fs_info->qgroup_rescan_running = true;
3864		btrfs_queue_work(fs_info->qgroup_rescan_workers,
3865				 &fs_info->qgroup_rescan_work);
3866		mutex_unlock(&fs_info->qgroup_rescan_lock);
3867	}
3868}
3869
3870#define rbtree_iterate_from_safe(node, next, start)				\
3871       for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
3872
3873static int qgroup_unreserve_range(struct btrfs_inode *inode,
3874				  struct extent_changeset *reserved, u64 start,
3875				  u64 len)
3876{
3877	struct rb_node *node;
3878	struct rb_node *next;
3879	struct ulist_node *entry;
3880	int ret = 0;
3881
3882	node = reserved->range_changed.root.rb_node;
3883	if (!node)
3884		return 0;
3885	while (node) {
3886		entry = rb_entry(node, struct ulist_node, rb_node);
3887		if (entry->val < start)
3888			node = node->rb_right;
3889		else
3890			node = node->rb_left;
3891	}
3892
3893	if (entry->val > start && rb_prev(&entry->rb_node))
3894		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
3895				 rb_node);
3896
3897	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
3898		u64 entry_start;
3899		u64 entry_end;
3900		u64 entry_len;
3901		int clear_ret;
3902
3903		entry = rb_entry(node, struct ulist_node, rb_node);
3904		entry_start = entry->val;
3905		entry_end = entry->aux;
3906		entry_len = entry_end - entry_start + 1;
3907
3908		if (entry_start >= start + len)
3909			break;
3910		if (entry_start + entry_len <= start)
3911			continue;
3912		/*
3913		 * Now the entry is in [start, start + len), revert the
3914		 * EXTENT_QGROUP_RESERVED bit.
3915		 */
3916		clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
3917					      entry_end, EXTENT_QGROUP_RESERVED);
3918		if (!ret && clear_ret < 0)
3919			ret = clear_ret;
3920
3921		ulist_del(&reserved->range_changed, entry->val, entry->aux);
3922		if (likely(reserved->bytes_changed >= entry_len)) {
3923			reserved->bytes_changed -= entry_len;
3924		} else {
3925			WARN_ON(1);
3926			reserved->bytes_changed = 0;
3927		}
3928	}
3929
3930	return ret;
3931}
3932
3933/*
3934 * Try to free some space for qgroup.
3935 *
3936 * For qgroup, there are only 3 ways to free qgroup space:
3937 * - Flush nodatacow write
3938 *   Any nodatacow write will free its reserved data space at run_delalloc_range().
3939 *   In theory, we should only flush nodatacow inodes, but it's not yet
3940 *   possible, so we need to flush the whole root.
3941 *
3942 * - Wait for ordered extents
3943 *   When ordered extents are finished, their reserved metadata is finally
3944 *   converted to per_trans status, which can be freed by later commit
3945 *   transaction.
3946 *
3947 * - Commit transaction
3948 *   This would free the meta_per_trans space.
3949 *   In theory this shouldn't provide much space, but any more qgroup space
3950 *   is needed.
3951 */
3952static int try_flush_qgroup(struct btrfs_root *root)
3953{
3954	struct btrfs_trans_handle *trans;
3955	int ret;
3956
3957	/* Can't hold an open transaction or we run the risk of deadlocking. */
3958	ASSERT(current->journal_info == NULL);
3959	if (WARN_ON(current->journal_info))
3960		return 0;
3961
3962	/*
3963	 * We don't want to run flush again and again, so if there is a running
3964	 * one, we won't try to start a new flush, but exit directly.
3965	 */
3966	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
3967		wait_event(root->qgroup_flush_wait,
3968			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
3969		return 0;
3970	}
3971
3972	ret = btrfs_start_delalloc_snapshot(root, true);
3973	if (ret < 0)
3974		goto out;
3975	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
3976
3977	trans = btrfs_attach_transaction_barrier(root);
3978	if (IS_ERR(trans)) {
3979		ret = PTR_ERR(trans);
3980		if (ret == -ENOENT)
3981			ret = 0;
3982		goto out;
3983	}
 
3984
3985	ret = btrfs_commit_transaction(trans);
3986out:
3987	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
3988	wake_up(&root->qgroup_flush_wait);
3989	return ret;
3990}
3991
3992static int qgroup_reserve_data(struct btrfs_inode *inode,
3993			struct extent_changeset **reserved_ret, u64 start,
3994			u64 len)
3995{
3996	struct btrfs_root *root = inode->root;
3997	struct extent_changeset *reserved;
3998	bool new_reserved = false;
3999	u64 orig_reserved;
4000	u64 to_reserve;
4001	int ret;
4002
4003	if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4004	    !is_fstree(root->root_key.objectid) || len == 0)
4005		return 0;
4006
4007	/* @reserved parameter is mandatory for qgroup */
4008	if (WARN_ON(!reserved_ret))
4009		return -EINVAL;
4010	if (!*reserved_ret) {
4011		new_reserved = true;
4012		*reserved_ret = extent_changeset_alloc();
4013		if (!*reserved_ret)
4014			return -ENOMEM;
4015	}
4016	reserved = *reserved_ret;
4017	/* Record already reserved space */
4018	orig_reserved = reserved->bytes_changed;
4019	ret = set_record_extent_bits(&inode->io_tree, start,
4020			start + len -1, EXTENT_QGROUP_RESERVED, reserved);
4021
4022	/* Newly reserved space */
4023	to_reserve = reserved->bytes_changed - orig_reserved;
4024	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
4025					to_reserve, QGROUP_RESERVE);
4026	if (ret < 0)
4027		goto out;
4028	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
4029	if (ret < 0)
4030		goto cleanup;
4031
4032	return ret;
4033
4034cleanup:
4035	qgroup_unreserve_range(inode, reserved, start, len);
4036out:
4037	if (new_reserved) {
4038		extent_changeset_free(reserved);
4039		*reserved_ret = NULL;
4040	}
4041	return ret;
4042}
4043
4044/*
4045 * Reserve qgroup space for range [start, start + len).
4046 *
4047 * This function will either reserve space from related qgroups or do nothing
4048 * if the range is already reserved.
4049 *
4050 * Return 0 for successful reservation
4051 * Return <0 for error (including -EQUOT)
4052 *
4053 * NOTE: This function may sleep for memory allocation, dirty page flushing and
4054 *	 commit transaction. So caller should not hold any dirty page locked.
4055 */
4056int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
4057			struct extent_changeset **reserved_ret, u64 start,
4058			u64 len)
4059{
4060	int ret;
4061
4062	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
4063	if (ret <= 0 && ret != -EDQUOT)
4064		return ret;
4065
4066	ret = try_flush_qgroup(inode->root);
4067	if (ret < 0)
4068		return ret;
4069	return qgroup_reserve_data(inode, reserved_ret, start, len);
4070}
4071
4072/* Free ranges specified by @reserved, normally in error path */
4073static int qgroup_free_reserved_data(struct btrfs_inode *inode,
4074				     struct extent_changeset *reserved,
4075				     u64 start, u64 len, u64 *freed_ret)
4076{
4077	struct btrfs_root *root = inode->root;
4078	struct ulist_node *unode;
4079	struct ulist_iterator uiter;
4080	struct extent_changeset changeset;
4081	u64 freed = 0;
4082	int ret;
4083
4084	extent_changeset_init(&changeset);
4085	len = round_up(start + len, root->fs_info->sectorsize);
4086	start = round_down(start, root->fs_info->sectorsize);
4087
4088	ULIST_ITER_INIT(&uiter);
4089	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
4090		u64 range_start = unode->val;
4091		/* unode->aux is the inclusive end */
4092		u64 range_len = unode->aux - range_start + 1;
4093		u64 free_start;
4094		u64 free_len;
4095
4096		extent_changeset_release(&changeset);
4097
4098		/* Only free range in range [start, start + len) */
4099		if (range_start >= start + len ||
4100		    range_start + range_len <= start)
4101			continue;
4102		free_start = max(range_start, start);
4103		free_len = min(start + len, range_start + range_len) -
4104			   free_start;
4105		/*
4106		 * TODO: To also modify reserved->ranges_reserved to reflect
4107		 * the modification.
4108		 *
4109		 * However as long as we free qgroup reserved according to
4110		 * EXTENT_QGROUP_RESERVED, we won't double free.
4111		 * So not need to rush.
4112		 */
4113		ret = clear_record_extent_bits(&inode->io_tree, free_start,
4114				free_start + free_len - 1,
4115				EXTENT_QGROUP_RESERVED, &changeset);
4116		if (ret < 0)
4117			goto out;
4118		freed += changeset.bytes_changed;
4119	}
4120	btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
4121				  BTRFS_QGROUP_RSV_DATA);
4122	if (freed_ret)
4123		*freed_ret = freed;
4124	ret = 0;
4125out:
4126	extent_changeset_release(&changeset);
4127	return ret;
4128}
4129
4130static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
4131			struct extent_changeset *reserved, u64 start, u64 len,
4132			u64 *released, int free)
4133{
4134	struct extent_changeset changeset;
4135	int trace_op = QGROUP_RELEASE;
4136	int ret;
4137
4138	if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
4139		extent_changeset_init(&changeset);
4140		return clear_record_extent_bits(&inode->io_tree, start,
4141						start + len - 1,
4142						EXTENT_QGROUP_RESERVED, &changeset);
4143	}
4144
4145	/* In release case, we shouldn't have @reserved */
4146	WARN_ON(!free && reserved);
4147	if (free && reserved)
4148		return qgroup_free_reserved_data(inode, reserved, start, len, released);
4149	extent_changeset_init(&changeset);
4150	ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
4151				       EXTENT_QGROUP_RESERVED, &changeset);
4152	if (ret < 0)
4153		goto out;
4154
4155	if (free)
4156		trace_op = QGROUP_FREE;
4157	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
4158					changeset.bytes_changed, trace_op);
4159	if (free)
4160		btrfs_qgroup_free_refroot(inode->root->fs_info,
4161				inode->root->root_key.objectid,
4162				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4163	if (released)
4164		*released = changeset.bytes_changed;
4165out:
4166	extent_changeset_release(&changeset);
4167	return ret;
4168}
4169
4170/*
4171 * Free a reserved space range from io_tree and related qgroups
4172 *
4173 * Should be called when a range of pages get invalidated before reaching disk.
4174 * Or for error cleanup case.
4175 * if @reserved is given, only reserved range in [@start, @start + @len) will
4176 * be freed.
4177 *
4178 * For data written to disk, use btrfs_qgroup_release_data().
4179 *
4180 * NOTE: This function may sleep for memory allocation.
4181 */
4182int btrfs_qgroup_free_data(struct btrfs_inode *inode,
4183			   struct extent_changeset *reserved,
4184			   u64 start, u64 len, u64 *freed)
4185{
4186	return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
4187}
4188
4189/*
4190 * Release a reserved space range from io_tree only.
4191 *
4192 * Should be called when a range of pages get written to disk and corresponding
4193 * FILE_EXTENT is inserted into corresponding root.
4194 *
4195 * Since new qgroup accounting framework will only update qgroup numbers at
4196 * commit_transaction() time, its reserved space shouldn't be freed from
4197 * related qgroups.
4198 *
4199 * But we should release the range from io_tree, to allow further write to be
4200 * COWed.
4201 *
4202 * NOTE: This function may sleep for memory allocation.
4203 */
4204int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
4205{
4206	return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
4207}
4208
4209static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4210			      enum btrfs_qgroup_rsv_type type)
4211{
4212	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4213	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4214		return;
4215	if (num_bytes == 0)
4216		return;
4217
4218	spin_lock(&root->qgroup_meta_rsv_lock);
4219	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
4220		root->qgroup_meta_rsv_prealloc += num_bytes;
4221	else
4222		root->qgroup_meta_rsv_pertrans += num_bytes;
4223	spin_unlock(&root->qgroup_meta_rsv_lock);
4224}
4225
4226static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4227			     enum btrfs_qgroup_rsv_type type)
4228{
4229	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4230	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4231		return 0;
4232	if (num_bytes == 0)
4233		return 0;
4234
4235	spin_lock(&root->qgroup_meta_rsv_lock);
4236	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
4237		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
4238				  num_bytes);
4239		root->qgroup_meta_rsv_prealloc -= num_bytes;
4240	} else {
4241		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
4242				  num_bytes);
4243		root->qgroup_meta_rsv_pertrans -= num_bytes;
4244	}
4245	spin_unlock(&root->qgroup_meta_rsv_lock);
4246	return num_bytes;
4247}
4248
4249int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4250			      enum btrfs_qgroup_rsv_type type, bool enforce)
4251{
4252	struct btrfs_fs_info *fs_info = root->fs_info;
4253	int ret;
4254
4255	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4256	    !is_fstree(root->root_key.objectid) || num_bytes == 0)
4257		return 0;
4258
4259	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4260	trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
4261	ret = qgroup_reserve(root, num_bytes, enforce, type);
4262	if (ret < 0)
4263		return ret;
4264	/*
4265	 * Record what we have reserved into root.
4266	 *
4267	 * To avoid quota disabled->enabled underflow.
4268	 * In that case, we may try to free space we haven't reserved
4269	 * (since quota was disabled), so record what we reserved into root.
4270	 * And ensure later release won't underflow this number.
4271	 */
4272	add_root_meta_rsv(root, num_bytes, type);
4273	return ret;
4274}
4275
4276int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4277				enum btrfs_qgroup_rsv_type type, bool enforce,
4278				bool noflush)
4279{
4280	int ret;
4281
4282	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4283	if ((ret <= 0 && ret != -EDQUOT) || noflush)
4284		return ret;
4285
4286	ret = try_flush_qgroup(root);
4287	if (ret < 0)
4288		return ret;
4289	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4290}
4291
4292/*
4293 * Per-transaction meta reservation should be all freed at transaction commit
4294 * time
4295 */
4296void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
4297{
4298	struct btrfs_fs_info *fs_info = root->fs_info;
4299
4300	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4301	    !is_fstree(root->root_key.objectid))
4302		return;
4303
4304	/* TODO: Update trace point to handle such free */
4305	trace_qgroup_meta_free_all_pertrans(root);
4306	/* Special value -1 means to free all reserved space */
4307	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
4308				  BTRFS_QGROUP_RSV_META_PERTRANS);
4309}
4310
4311void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
4312			      enum btrfs_qgroup_rsv_type type)
4313{
4314	struct btrfs_fs_info *fs_info = root->fs_info;
4315
4316	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4317	    !is_fstree(root->root_key.objectid))
4318		return;
4319
4320	/*
4321	 * reservation for META_PREALLOC can happen before quota is enabled,
4322	 * which can lead to underflow.
4323	 * Here ensure we will only free what we really have reserved.
4324	 */
4325	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4326	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4327	trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4328	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
4329				  num_bytes, type);
4330}
4331
4332static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4333				int num_bytes)
4334{
4335	struct btrfs_qgroup *qgroup;
4336	LIST_HEAD(qgroup_list);
4337
4338	if (num_bytes == 0)
4339		return;
4340	if (!fs_info->quota_root)
4341		return;
4342
4343	spin_lock(&fs_info->qgroup_lock);
4344	qgroup = find_qgroup_rb(fs_info, ref_root);
4345	if (!qgroup)
4346		goto out;
4347
4348	qgroup_iterator_add(&qgroup_list, qgroup);
4349	list_for_each_entry(qgroup, &qgroup_list, iterator) {
4350		struct btrfs_qgroup_list *glist;
4351
4352		qgroup_rsv_release(fs_info, qgroup, num_bytes,
4353				BTRFS_QGROUP_RSV_META_PREALLOC);
4354		if (!sb_rdonly(fs_info->sb))
4355			qgroup_rsv_add(fs_info, qgroup, num_bytes,
4356				       BTRFS_QGROUP_RSV_META_PERTRANS);
4357
4358		list_for_each_entry(glist, &qgroup->groups, next_group)
4359			qgroup_iterator_add(&qgroup_list, glist->group);
4360	}
4361out:
4362	qgroup_iterator_clean(&qgroup_list);
4363	spin_unlock(&fs_info->qgroup_lock);
4364}
4365
4366/*
4367 * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
4368 *
4369 * This is called when preallocated meta reservation needs to be used.
4370 * Normally after btrfs_join_transaction() call.
4371 */
4372void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4373{
4374	struct btrfs_fs_info *fs_info = root->fs_info;
4375
4376	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4377	    !is_fstree(root->root_key.objectid))
4378		return;
4379	/* Same as btrfs_qgroup_free_meta_prealloc() */
4380	num_bytes = sub_root_meta_rsv(root, num_bytes,
4381				      BTRFS_QGROUP_RSV_META_PREALLOC);
4382	trace_qgroup_meta_convert(root, num_bytes);
4383	qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
 
 
4384}
4385
4386/*
4387 * Check qgroup reserved space leaking, normally at destroy inode
4388 * time
4389 */
4390void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4391{
4392	struct extent_changeset changeset;
4393	struct ulist_node *unode;
4394	struct ulist_iterator iter;
4395	int ret;
4396
4397	extent_changeset_init(&changeset);
4398	ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4399			EXTENT_QGROUP_RESERVED, &changeset);
4400
4401	WARN_ON(ret < 0);
4402	if (WARN_ON(changeset.bytes_changed)) {
4403		ULIST_ITER_INIT(&iter);
4404		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4405			btrfs_warn(inode->root->fs_info,
4406		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4407				btrfs_ino(inode), unode->val, unode->aux);
4408		}
4409		btrfs_qgroup_free_refroot(inode->root->fs_info,
4410				inode->root->root_key.objectid,
4411				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4412
4413	}
4414	extent_changeset_release(&changeset);
4415}
4416
4417void btrfs_qgroup_init_swapped_blocks(
4418	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4419{
4420	int i;
4421
4422	spin_lock_init(&swapped_blocks->lock);
4423	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4424		swapped_blocks->blocks[i] = RB_ROOT;
4425	swapped_blocks->swapped = false;
4426}
4427
4428/*
4429 * Delete all swapped blocks record of @root.
4430 * Every record here means we skipped a full subtree scan for qgroup.
4431 *
4432 * Gets called when committing one transaction.
4433 */
4434void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4435{
4436	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4437	int i;
4438
4439	swapped_blocks = &root->swapped_blocks;
4440
4441	spin_lock(&swapped_blocks->lock);
4442	if (!swapped_blocks->swapped)
4443		goto out;
4444	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4445		struct rb_root *cur_root = &swapped_blocks->blocks[i];
4446		struct btrfs_qgroup_swapped_block *entry;
4447		struct btrfs_qgroup_swapped_block *next;
4448
4449		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4450						     node)
4451			kfree(entry);
4452		swapped_blocks->blocks[i] = RB_ROOT;
4453	}
4454	swapped_blocks->swapped = false;
4455out:
4456	spin_unlock(&swapped_blocks->lock);
4457}
4458
4459/*
4460 * Add subtree roots record into @subvol_root.
4461 *
4462 * @subvol_root:	tree root of the subvolume tree get swapped
4463 * @bg:			block group under balance
4464 * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4465 * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4466 *			BOTH POINTERS ARE BEFORE TREE SWAP
4467 * @last_snapshot:	last snapshot generation of the subvolume tree
4468 */
4469int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
4470		struct btrfs_root *subvol_root,
4471		struct btrfs_block_group *bg,
4472		struct extent_buffer *subvol_parent, int subvol_slot,
4473		struct extent_buffer *reloc_parent, int reloc_slot,
4474		u64 last_snapshot)
4475{
4476	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4477	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4478	struct btrfs_qgroup_swapped_block *block;
4479	struct rb_node **cur;
4480	struct rb_node *parent = NULL;
4481	int level = btrfs_header_level(subvol_parent) - 1;
4482	int ret = 0;
4483
4484	if (!btrfs_qgroup_full_accounting(fs_info))
4485		return 0;
4486
4487	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4488	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4489		btrfs_err_rl(fs_info,
4490		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4491			__func__,
4492			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4493			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4494		return -EUCLEAN;
4495	}
4496
4497	block = kmalloc(sizeof(*block), GFP_NOFS);
4498	if (!block) {
4499		ret = -ENOMEM;
4500		goto out;
4501	}
4502
4503	/*
4504	 * @reloc_parent/slot is still before swap, while @block is going to
4505	 * record the bytenr after swap, so we do the swap here.
4506	 */
4507	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4508	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4509							     reloc_slot);
4510	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4511	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4512							    subvol_slot);
4513	block->last_snapshot = last_snapshot;
4514	block->level = level;
4515
4516	/*
4517	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4518	 * no one else can modify tree blocks thus we qgroup will not change
4519	 * no matter the value of trace_leaf.
4520	 */
4521	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4522		block->trace_leaf = true;
4523	else
4524		block->trace_leaf = false;
4525	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4526
4527	/* Insert @block into @blocks */
4528	spin_lock(&blocks->lock);
4529	cur = &blocks->blocks[level].rb_node;
4530	while (*cur) {
4531		struct btrfs_qgroup_swapped_block *entry;
4532
4533		parent = *cur;
4534		entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
4535				 node);
4536
4537		if (entry->subvol_bytenr < block->subvol_bytenr) {
4538			cur = &(*cur)->rb_left;
4539		} else if (entry->subvol_bytenr > block->subvol_bytenr) {
4540			cur = &(*cur)->rb_right;
4541		} else {
4542			if (entry->subvol_generation !=
4543					block->subvol_generation ||
4544			    entry->reloc_bytenr != block->reloc_bytenr ||
4545			    entry->reloc_generation !=
4546					block->reloc_generation) {
4547				/*
4548				 * Duplicated but mismatch entry found.
4549				 * Shouldn't happen.
4550				 *
4551				 * Marking qgroup inconsistent should be enough
4552				 * for end users.
4553				 */
4554				WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4555				ret = -EEXIST;
4556			}
4557			kfree(block);
4558			goto out_unlock;
4559		}
4560	}
4561	rb_link_node(&block->node, parent, cur);
4562	rb_insert_color(&block->node, &blocks->blocks[level]);
4563	blocks->swapped = true;
4564out_unlock:
4565	spin_unlock(&blocks->lock);
4566out:
4567	if (ret < 0)
4568		qgroup_mark_inconsistent(fs_info);
4569	return ret;
4570}
4571
4572/*
4573 * Check if the tree block is a subtree root, and if so do the needed
4574 * delayed subtree trace for qgroup.
4575 *
4576 * This is called during btrfs_cow_block().
4577 */
4578int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4579					 struct btrfs_root *root,
4580					 struct extent_buffer *subvol_eb)
4581{
4582	struct btrfs_fs_info *fs_info = root->fs_info;
4583	struct btrfs_tree_parent_check check = { 0 };
4584	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4585	struct btrfs_qgroup_swapped_block *block;
4586	struct extent_buffer *reloc_eb = NULL;
4587	struct rb_node *node;
4588	bool found = false;
4589	bool swapped = false;
4590	int level = btrfs_header_level(subvol_eb);
4591	int ret = 0;
4592	int i;
4593
4594	if (!btrfs_qgroup_full_accounting(fs_info))
4595		return 0;
4596	if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
4597		return 0;
4598
4599	spin_lock(&blocks->lock);
4600	if (!blocks->swapped) {
4601		spin_unlock(&blocks->lock);
4602		return 0;
4603	}
4604	node = blocks->blocks[level].rb_node;
4605
4606	while (node) {
4607		block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4608		if (block->subvol_bytenr < subvol_eb->start) {
4609			node = node->rb_left;
4610		} else if (block->subvol_bytenr > subvol_eb->start) {
4611			node = node->rb_right;
4612		} else {
4613			found = true;
4614			break;
4615		}
4616	}
4617	if (!found) {
4618		spin_unlock(&blocks->lock);
4619		goto out;
4620	}
4621	/* Found one, remove it from @blocks first and update blocks->swapped */
4622	rb_erase(&block->node, &blocks->blocks[level]);
4623	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4624		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4625			swapped = true;
4626			break;
4627		}
4628	}
4629	blocks->swapped = swapped;
4630	spin_unlock(&blocks->lock);
4631
4632	check.level = block->level;
4633	check.transid = block->reloc_generation;
4634	check.has_first_key = true;
4635	memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
4636
4637	/* Read out reloc subtree root */
4638	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
4639	if (IS_ERR(reloc_eb)) {
4640		ret = PTR_ERR(reloc_eb);
4641		reloc_eb = NULL;
4642		goto free_out;
4643	}
4644	if (!extent_buffer_uptodate(reloc_eb)) {
4645		ret = -EIO;
4646		goto free_out;
4647	}
4648
4649	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4650			block->last_snapshot, block->trace_leaf);
4651free_out:
4652	kfree(block);
4653	free_extent_buffer(reloc_eb);
4654out:
4655	if (ret < 0) {
4656		btrfs_err_rl(fs_info,
4657			     "failed to account subtree at bytenr %llu: %d",
4658			     subvol_eb->start, ret);
4659		qgroup_mark_inconsistent(fs_info);
4660	}
4661	return ret;
4662}
4663
4664void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4665{
4666	struct btrfs_qgroup_extent_record *entry;
4667	struct btrfs_qgroup_extent_record *next;
4668	struct rb_root *root;
4669
4670	root = &trans->delayed_refs.dirty_extent_root;
4671	rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
4672		ulist_free(entry->old_roots);
4673		kfree(entry);
4674	}
4675	*root = RB_ROOT;
4676}
4677
4678void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes)
4679{
4680	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4681		return;
4682
4683	if (!is_fstree(root))
4684		return;
4685
4686	btrfs_qgroup_free_refroot(fs_info, root, rsv_bytes, BTRFS_QGROUP_RSV_DATA);
4687}
4688
4689int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
4690			      struct btrfs_squota_delta *delta)
4691{
4692	int ret;
4693	struct btrfs_qgroup *qgroup;
4694	struct btrfs_qgroup *qg;
4695	LIST_HEAD(qgroup_list);
4696	u64 root = delta->root;
4697	u64 num_bytes = delta->num_bytes;
4698	const int sign = (delta->is_inc ? 1 : -1);
4699
4700	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4701		return 0;
4702
4703	if (!is_fstree(root))
4704		return 0;
4705
4706	/* If the extent predates enabling quotas, don't count it. */
4707	if (delta->generation < fs_info->qgroup_enable_gen)
4708		return 0;
4709
4710	spin_lock(&fs_info->qgroup_lock);
4711	qgroup = find_qgroup_rb(fs_info, root);
4712	if (!qgroup) {
4713		ret = -ENOENT;
4714		goto out;
4715	}
4716
4717	ret = 0;
4718	qgroup_iterator_add(&qgroup_list, qgroup);
4719	list_for_each_entry(qg, &qgroup_list, iterator) {
4720		struct btrfs_qgroup_list *glist;
4721
4722		qg->excl += num_bytes * sign;
4723		qg->rfer += num_bytes * sign;
4724		qgroup_dirty(fs_info, qg);
4725
4726		list_for_each_entry(glist, &qg->groups, next_group)
4727			qgroup_iterator_add(&qgroup_list, glist->group);
4728	}
4729	qgroup_iterator_clean(&qgroup_list);
4730
4731out:
4732	spin_unlock(&fs_info->qgroup_lock);
4733	return ret;
4734}