Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1/*
   2 * Copyright (C) 2011 STRATO.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/sched.h>
  20#include <linux/pagemap.h>
  21#include <linux/writeback.h>
  22#include <linux/blkdev.h>
  23#include <linux/rbtree.h>
  24#include <linux/slab.h>
  25#include <linux/workqueue.h>
  26#include <linux/btrfs.h>
  27
  28#include "ctree.h"
  29#include "transaction.h"
  30#include "disk-io.h"
  31#include "locking.h"
  32#include "ulist.h"
  33#include "backref.h"
  34#include "extent_io.h"
  35#include "qgroup.h"
  36
  37
  38/* TODO XXX FIXME
  39 *  - subvol delete -> delete when ref goes to 0? delete limits also?
  40 *  - reorganize keys
  41 *  - compressed
  42 *  - sync
  43 *  - copy also limits on subvol creation
  44 *  - limit
  45 *  - caches fuer ulists
  46 *  - performance benchmarks
  47 *  - check all ioctl parameters
  48 */
  49
  50/*
  51 * one struct for each qgroup, organized in fs_info->qgroup_tree.
  52 */
  53struct btrfs_qgroup {
  54	u64 qgroupid;
  55
  56	/*
  57	 * state
  58	 */
  59	u64 rfer;	/* referenced */
  60	u64 rfer_cmpr;	/* referenced compressed */
  61	u64 excl;	/* exclusive */
  62	u64 excl_cmpr;	/* exclusive compressed */
  63
  64	/*
  65	 * limits
  66	 */
  67	u64 lim_flags;	/* which limits are set */
  68	u64 max_rfer;
  69	u64 max_excl;
  70	u64 rsv_rfer;
  71	u64 rsv_excl;
  72
  73	/*
  74	 * reservation tracking
  75	 */
  76	u64 reserved;
  77
  78	/*
  79	 * lists
  80	 */
  81	struct list_head groups;  /* groups this group is member of */
  82	struct list_head members; /* groups that are members of this group */
  83	struct list_head dirty;   /* dirty groups */
  84	struct rb_node node;	  /* tree of qgroups */
  85
  86	/*
  87	 * temp variables for accounting operations
  88	 * Refer to qgroup_shared_accouting() for details.
  89	 */
  90	u64 old_refcnt;
  91	u64 new_refcnt;
  92};
  93
  94static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
  95					   int mod)
  96{
  97	if (qg->old_refcnt < seq)
  98		qg->old_refcnt = seq;
  99	qg->old_refcnt += mod;
 100}
 101
 102static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
 103					   int mod)
 104{
 105	if (qg->new_refcnt < seq)
 106		qg->new_refcnt = seq;
 107	qg->new_refcnt += mod;
 108}
 109
 110static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
 111{
 112	if (qg->old_refcnt < seq)
 113		return 0;
 114	return qg->old_refcnt - seq;
 115}
 116
 117static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
 118{
 119	if (qg->new_refcnt < seq)
 120		return 0;
 121	return qg->new_refcnt - seq;
 122}
 123
 124/*
 125 * glue structure to represent the relations between qgroups.
 126 */
 127struct btrfs_qgroup_list {
 128	struct list_head next_group;
 129	struct list_head next_member;
 130	struct btrfs_qgroup *group;
 131	struct btrfs_qgroup *member;
 132};
 133
 134#define ptr_to_u64(x) ((u64)(uintptr_t)x)
 135#define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
 136
 137static int
 138qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
 139		   int init_flags);
 140static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
 141
 142/* must be called with qgroup_ioctl_lock held */
 143static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
 144					   u64 qgroupid)
 145{
 146	struct rb_node *n = fs_info->qgroup_tree.rb_node;
 147	struct btrfs_qgroup *qgroup;
 148
 149	while (n) {
 150		qgroup = rb_entry(n, struct btrfs_qgroup, node);
 151		if (qgroup->qgroupid < qgroupid)
 152			n = n->rb_left;
 153		else if (qgroup->qgroupid > qgroupid)
 154			n = n->rb_right;
 155		else
 156			return qgroup;
 157	}
 158	return NULL;
 159}
 160
 161/* must be called with qgroup_lock held */
 162static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
 163					  u64 qgroupid)
 164{
 165	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
 166	struct rb_node *parent = NULL;
 167	struct btrfs_qgroup *qgroup;
 168
 169	while (*p) {
 170		parent = *p;
 171		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
 172
 173		if (qgroup->qgroupid < qgroupid)
 174			p = &(*p)->rb_left;
 175		else if (qgroup->qgroupid > qgroupid)
 176			p = &(*p)->rb_right;
 177		else
 178			return qgroup;
 179	}
 180
 181	qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
 182	if (!qgroup)
 183		return ERR_PTR(-ENOMEM);
 184
 185	qgroup->qgroupid = qgroupid;
 186	INIT_LIST_HEAD(&qgroup->groups);
 187	INIT_LIST_HEAD(&qgroup->members);
 188	INIT_LIST_HEAD(&qgroup->dirty);
 189
 190	rb_link_node(&qgroup->node, parent, p);
 191	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
 192
 193	return qgroup;
 194}
 195
 196static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
 197{
 198	struct btrfs_qgroup_list *list;
 199
 200	list_del(&qgroup->dirty);
 201	while (!list_empty(&qgroup->groups)) {
 202		list = list_first_entry(&qgroup->groups,
 203					struct btrfs_qgroup_list, next_group);
 204		list_del(&list->next_group);
 205		list_del(&list->next_member);
 206		kfree(list);
 207	}
 208
 209	while (!list_empty(&qgroup->members)) {
 210		list = list_first_entry(&qgroup->members,
 211					struct btrfs_qgroup_list, next_member);
 212		list_del(&list->next_group);
 213		list_del(&list->next_member);
 214		kfree(list);
 215	}
 216	kfree(qgroup);
 217}
 218
 219/* must be called with qgroup_lock held */
 220static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
 221{
 222	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
 223
 224	if (!qgroup)
 225		return -ENOENT;
 226
 227	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
 228	__del_qgroup_rb(qgroup);
 229	return 0;
 230}
 231
 232/* must be called with qgroup_lock held */
 233static int add_relation_rb(struct btrfs_fs_info *fs_info,
 234			   u64 memberid, u64 parentid)
 235{
 236	struct btrfs_qgroup *member;
 237	struct btrfs_qgroup *parent;
 238	struct btrfs_qgroup_list *list;
 239
 240	member = find_qgroup_rb(fs_info, memberid);
 241	parent = find_qgroup_rb(fs_info, parentid);
 242	if (!member || !parent)
 243		return -ENOENT;
 244
 245	list = kzalloc(sizeof(*list), GFP_ATOMIC);
 246	if (!list)
 247		return -ENOMEM;
 248
 249	list->group = parent;
 250	list->member = member;
 251	list_add_tail(&list->next_group, &member->groups);
 252	list_add_tail(&list->next_member, &parent->members);
 253
 254	return 0;
 255}
 256
 257/* must be called with qgroup_lock held */
 258static int del_relation_rb(struct btrfs_fs_info *fs_info,
 259			   u64 memberid, u64 parentid)
 260{
 261	struct btrfs_qgroup *member;
 262	struct btrfs_qgroup *parent;
 263	struct btrfs_qgroup_list *list;
 264
 265	member = find_qgroup_rb(fs_info, memberid);
 266	parent = find_qgroup_rb(fs_info, parentid);
 267	if (!member || !parent)
 268		return -ENOENT;
 269
 270	list_for_each_entry(list, &member->groups, next_group) {
 271		if (list->group == parent) {
 272			list_del(&list->next_group);
 273			list_del(&list->next_member);
 274			kfree(list);
 275			return 0;
 276		}
 277	}
 278	return -ENOENT;
 279}
 280
 281#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 282int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
 283			       u64 rfer, u64 excl)
 284{
 285	struct btrfs_qgroup *qgroup;
 286
 287	qgroup = find_qgroup_rb(fs_info, qgroupid);
 288	if (!qgroup)
 289		return -EINVAL;
 290	if (qgroup->rfer != rfer || qgroup->excl != excl)
 291		return -EINVAL;
 292	return 0;
 293}
 294#endif
 295
 296/*
 297 * The full config is read in one go, only called from open_ctree()
 298 * It doesn't use any locking, as at this point we're still single-threaded
 299 */
 300int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
 301{
 302	struct btrfs_key key;
 303	struct btrfs_key found_key;
 304	struct btrfs_root *quota_root = fs_info->quota_root;
 305	struct btrfs_path *path = NULL;
 306	struct extent_buffer *l;
 307	int slot;
 308	int ret = 0;
 309	u64 flags = 0;
 310	u64 rescan_progress = 0;
 311
 312	if (!fs_info->quota_enabled)
 313		return 0;
 314
 315	fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
 316	if (!fs_info->qgroup_ulist) {
 317		ret = -ENOMEM;
 318		goto out;
 319	}
 320
 321	path = btrfs_alloc_path();
 322	if (!path) {
 323		ret = -ENOMEM;
 324		goto out;
 325	}
 326
 327	/* default this to quota off, in case no status key is found */
 328	fs_info->qgroup_flags = 0;
 329
 330	/*
 331	 * pass 1: read status, all qgroup infos and limits
 332	 */
 333	key.objectid = 0;
 334	key.type = 0;
 335	key.offset = 0;
 336	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
 337	if (ret)
 338		goto out;
 339
 340	while (1) {
 341		struct btrfs_qgroup *qgroup;
 342
 343		slot = path->slots[0];
 344		l = path->nodes[0];
 345		btrfs_item_key_to_cpu(l, &found_key, slot);
 346
 347		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
 348			struct btrfs_qgroup_status_item *ptr;
 349
 350			ptr = btrfs_item_ptr(l, slot,
 351					     struct btrfs_qgroup_status_item);
 352
 353			if (btrfs_qgroup_status_version(l, ptr) !=
 354			    BTRFS_QGROUP_STATUS_VERSION) {
 355				btrfs_err(fs_info,
 356				 "old qgroup version, quota disabled");
 357				goto out;
 358			}
 359			if (btrfs_qgroup_status_generation(l, ptr) !=
 360			    fs_info->generation) {
 361				flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
 362				btrfs_err(fs_info,
 363					"qgroup generation mismatch, "
 364					"marked as inconsistent");
 365			}
 366			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
 367									  ptr);
 368			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
 369			goto next1;
 370		}
 371
 372		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
 373		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
 374			goto next1;
 375
 376		qgroup = find_qgroup_rb(fs_info, found_key.offset);
 377		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
 378		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
 379			btrfs_err(fs_info, "inconsistent qgroup config");
 380			flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
 381		}
 382		if (!qgroup) {
 383			qgroup = add_qgroup_rb(fs_info, found_key.offset);
 384			if (IS_ERR(qgroup)) {
 385				ret = PTR_ERR(qgroup);
 386				goto out;
 387			}
 388		}
 389		switch (found_key.type) {
 390		case BTRFS_QGROUP_INFO_KEY: {
 391			struct btrfs_qgroup_info_item *ptr;
 392
 393			ptr = btrfs_item_ptr(l, slot,
 394					     struct btrfs_qgroup_info_item);
 395			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
 396			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
 397			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
 398			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
 399			/* generation currently unused */
 400			break;
 401		}
 402		case BTRFS_QGROUP_LIMIT_KEY: {
 403			struct btrfs_qgroup_limit_item *ptr;
 404
 405			ptr = btrfs_item_ptr(l, slot,
 406					     struct btrfs_qgroup_limit_item);
 407			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
 408			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
 409			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
 410			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
 411			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
 412			break;
 413		}
 414		}
 415next1:
 416		ret = btrfs_next_item(quota_root, path);
 417		if (ret < 0)
 418			goto out;
 419		if (ret)
 420			break;
 421	}
 422	btrfs_release_path(path);
 423
 424	/*
 425	 * pass 2: read all qgroup relations
 426	 */
 427	key.objectid = 0;
 428	key.type = BTRFS_QGROUP_RELATION_KEY;
 429	key.offset = 0;
 430	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
 431	if (ret)
 432		goto out;
 433	while (1) {
 434		slot = path->slots[0];
 435		l = path->nodes[0];
 436		btrfs_item_key_to_cpu(l, &found_key, slot);
 437
 438		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
 439			goto next2;
 440
 441		if (found_key.objectid > found_key.offset) {
 442			/* parent <- member, not needed to build config */
 443			/* FIXME should we omit the key completely? */
 444			goto next2;
 445		}
 446
 447		ret = add_relation_rb(fs_info, found_key.objectid,
 448				      found_key.offset);
 449		if (ret == -ENOENT) {
 450			btrfs_warn(fs_info,
 451				"orphan qgroup relation 0x%llx->0x%llx",
 452				found_key.objectid, found_key.offset);
 453			ret = 0;	/* ignore the error */
 454		}
 455		if (ret)
 456			goto out;
 457next2:
 458		ret = btrfs_next_item(quota_root, path);
 459		if (ret < 0)
 460			goto out;
 461		if (ret)
 462			break;
 463	}
 464out:
 465	fs_info->qgroup_flags |= flags;
 466	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
 467		fs_info->quota_enabled = 0;
 468		fs_info->pending_quota_state = 0;
 469	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
 470		   ret >= 0) {
 471		ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
 472	}
 473	btrfs_free_path(path);
 474
 475	if (ret < 0) {
 476		ulist_free(fs_info->qgroup_ulist);
 477		fs_info->qgroup_ulist = NULL;
 478		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
 479	}
 480
 481	return ret < 0 ? ret : 0;
 482}
 483
 484/*
 485 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
 486 * first two are in single-threaded paths.And for the third one, we have set
 487 * quota_root to be null with qgroup_lock held before, so it is safe to clean
 488 * up the in-memory structures without qgroup_lock held.
 489 */
 490void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
 491{
 492	struct rb_node *n;
 493	struct btrfs_qgroup *qgroup;
 494
 495	while ((n = rb_first(&fs_info->qgroup_tree))) {
 496		qgroup = rb_entry(n, struct btrfs_qgroup, node);
 497		rb_erase(n, &fs_info->qgroup_tree);
 498		__del_qgroup_rb(qgroup);
 499	}
 500	/*
 501	 * we call btrfs_free_qgroup_config() when umounting
 502	 * filesystem and disabling quota, so we set qgroup_ulit
 503	 * to be null here to avoid double free.
 504	 */
 505	ulist_free(fs_info->qgroup_ulist);
 506	fs_info->qgroup_ulist = NULL;
 507}
 508
 509static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
 510				    struct btrfs_root *quota_root,
 511				    u64 src, u64 dst)
 512{
 513	int ret;
 514	struct btrfs_path *path;
 515	struct btrfs_key key;
 516
 517	path = btrfs_alloc_path();
 518	if (!path)
 519		return -ENOMEM;
 520
 521	key.objectid = src;
 522	key.type = BTRFS_QGROUP_RELATION_KEY;
 523	key.offset = dst;
 524
 525	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
 526
 527	btrfs_mark_buffer_dirty(path->nodes[0]);
 528
 529	btrfs_free_path(path);
 530	return ret;
 531}
 532
 533static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
 534				    struct btrfs_root *quota_root,
 535				    u64 src, u64 dst)
 536{
 537	int ret;
 538	struct btrfs_path *path;
 539	struct btrfs_key key;
 540
 541	path = btrfs_alloc_path();
 542	if (!path)
 543		return -ENOMEM;
 544
 545	key.objectid = src;
 546	key.type = BTRFS_QGROUP_RELATION_KEY;
 547	key.offset = dst;
 548
 549	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
 550	if (ret < 0)
 551		goto out;
 552
 553	if (ret > 0) {
 554		ret = -ENOENT;
 555		goto out;
 556	}
 557
 558	ret = btrfs_del_item(trans, quota_root, path);
 559out:
 560	btrfs_free_path(path);
 561	return ret;
 562}
 563
 564static int add_qgroup_item(struct btrfs_trans_handle *trans,
 565			   struct btrfs_root *quota_root, u64 qgroupid)
 566{
 567	int ret;
 568	struct btrfs_path *path;
 569	struct btrfs_qgroup_info_item *qgroup_info;
 570	struct btrfs_qgroup_limit_item *qgroup_limit;
 571	struct extent_buffer *leaf;
 572	struct btrfs_key key;
 573
 574	if (btrfs_test_is_dummy_root(quota_root))
 575		return 0;
 576
 577	path = btrfs_alloc_path();
 578	if (!path)
 579		return -ENOMEM;
 580
 581	key.objectid = 0;
 582	key.type = BTRFS_QGROUP_INFO_KEY;
 583	key.offset = qgroupid;
 584
 585	/*
 586	 * Avoid a transaction abort by catching -EEXIST here. In that
 587	 * case, we proceed by re-initializing the existing structure
 588	 * on disk.
 589	 */
 590
 591	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
 592				      sizeof(*qgroup_info));
 593	if (ret && ret != -EEXIST)
 594		goto out;
 595
 596	leaf = path->nodes[0];
 597	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
 598				 struct btrfs_qgroup_info_item);
 599	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
 600	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
 601	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
 602	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
 603	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
 604
 605	btrfs_mark_buffer_dirty(leaf);
 606
 607	btrfs_release_path(path);
 608
 609	key.type = BTRFS_QGROUP_LIMIT_KEY;
 610	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
 611				      sizeof(*qgroup_limit));
 612	if (ret && ret != -EEXIST)
 613		goto out;
 614
 615	leaf = path->nodes[0];
 616	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
 617				  struct btrfs_qgroup_limit_item);
 618	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
 619	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
 620	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
 621	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
 622	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
 623
 624	btrfs_mark_buffer_dirty(leaf);
 625
 626	ret = 0;
 627out:
 628	btrfs_free_path(path);
 629	return ret;
 630}
 631
 632static int del_qgroup_item(struct btrfs_trans_handle *trans,
 633			   struct btrfs_root *quota_root, u64 qgroupid)
 634{
 635	int ret;
 636	struct btrfs_path *path;
 637	struct btrfs_key key;
 638
 639	path = btrfs_alloc_path();
 640	if (!path)
 641		return -ENOMEM;
 642
 643	key.objectid = 0;
 644	key.type = BTRFS_QGROUP_INFO_KEY;
 645	key.offset = qgroupid;
 646	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
 647	if (ret < 0)
 648		goto out;
 649
 650	if (ret > 0) {
 651		ret = -ENOENT;
 652		goto out;
 653	}
 654
 655	ret = btrfs_del_item(trans, quota_root, path);
 656	if (ret)
 657		goto out;
 658
 659	btrfs_release_path(path);
 660
 661	key.type = BTRFS_QGROUP_LIMIT_KEY;
 662	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
 663	if (ret < 0)
 664		goto out;
 665
 666	if (ret > 0) {
 667		ret = -ENOENT;
 668		goto out;
 669	}
 670
 671	ret = btrfs_del_item(trans, quota_root, path);
 672
 673out:
 674	btrfs_free_path(path);
 675	return ret;
 676}
 677
 678static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
 679				    struct btrfs_root *root,
 680				    struct btrfs_qgroup *qgroup)
 681{
 682	struct btrfs_path *path;
 683	struct btrfs_key key;
 684	struct extent_buffer *l;
 685	struct btrfs_qgroup_limit_item *qgroup_limit;
 686	int ret;
 687	int slot;
 688
 689	key.objectid = 0;
 690	key.type = BTRFS_QGROUP_LIMIT_KEY;
 691	key.offset = qgroup->qgroupid;
 692
 693	path = btrfs_alloc_path();
 694	if (!path)
 695		return -ENOMEM;
 696
 697	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 698	if (ret > 0)
 699		ret = -ENOENT;
 700
 701	if (ret)
 702		goto out;
 703
 704	l = path->nodes[0];
 705	slot = path->slots[0];
 706	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
 707	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
 708	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
 709	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
 710	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
 711	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
 712
 713	btrfs_mark_buffer_dirty(l);
 714
 715out:
 716	btrfs_free_path(path);
 717	return ret;
 718}
 719
 720static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
 721				   struct btrfs_root *root,
 722				   struct btrfs_qgroup *qgroup)
 723{
 724	struct btrfs_path *path;
 725	struct btrfs_key key;
 726	struct extent_buffer *l;
 727	struct btrfs_qgroup_info_item *qgroup_info;
 728	int ret;
 729	int slot;
 730
 731	if (btrfs_test_is_dummy_root(root))
 732		return 0;
 733
 734	key.objectid = 0;
 735	key.type = BTRFS_QGROUP_INFO_KEY;
 736	key.offset = qgroup->qgroupid;
 737
 738	path = btrfs_alloc_path();
 739	if (!path)
 740		return -ENOMEM;
 741
 742	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 743	if (ret > 0)
 744		ret = -ENOENT;
 745
 746	if (ret)
 747		goto out;
 748
 749	l = path->nodes[0];
 750	slot = path->slots[0];
 751	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
 752	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
 753	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
 754	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
 755	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
 756	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
 757
 758	btrfs_mark_buffer_dirty(l);
 759
 760out:
 761	btrfs_free_path(path);
 762	return ret;
 763}
 764
 765static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
 766				     struct btrfs_fs_info *fs_info,
 767				    struct btrfs_root *root)
 768{
 769	struct btrfs_path *path;
 770	struct btrfs_key key;
 771	struct extent_buffer *l;
 772	struct btrfs_qgroup_status_item *ptr;
 773	int ret;
 774	int slot;
 775
 776	key.objectid = 0;
 777	key.type = BTRFS_QGROUP_STATUS_KEY;
 778	key.offset = 0;
 779
 780	path = btrfs_alloc_path();
 781	if (!path)
 782		return -ENOMEM;
 783
 784	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 785	if (ret > 0)
 786		ret = -ENOENT;
 787
 788	if (ret)
 789		goto out;
 790
 791	l = path->nodes[0];
 792	slot = path->slots[0];
 793	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
 794	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
 795	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
 796	btrfs_set_qgroup_status_rescan(l, ptr,
 797				fs_info->qgroup_rescan_progress.objectid);
 798
 799	btrfs_mark_buffer_dirty(l);
 800
 801out:
 802	btrfs_free_path(path);
 803	return ret;
 804}
 805
 806/*
 807 * called with qgroup_lock held
 808 */
 809static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
 810				  struct btrfs_root *root)
 811{
 812	struct btrfs_path *path;
 813	struct btrfs_key key;
 814	struct extent_buffer *leaf = NULL;
 815	int ret;
 816	int nr = 0;
 817
 818	path = btrfs_alloc_path();
 819	if (!path)
 820		return -ENOMEM;
 821
 822	path->leave_spinning = 1;
 823
 824	key.objectid = 0;
 825	key.offset = 0;
 826	key.type = 0;
 827
 828	while (1) {
 829		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
 830		if (ret < 0)
 831			goto out;
 832		leaf = path->nodes[0];
 833		nr = btrfs_header_nritems(leaf);
 834		if (!nr)
 835			break;
 836		/*
 837		 * delete the leaf one by one
 838		 * since the whole tree is going
 839		 * to be deleted.
 840		 */
 841		path->slots[0] = 0;
 842		ret = btrfs_del_items(trans, root, path, 0, nr);
 843		if (ret)
 844			goto out;
 845
 846		btrfs_release_path(path);
 847	}
 848	ret = 0;
 849out:
 850	root->fs_info->pending_quota_state = 0;
 851	btrfs_free_path(path);
 852	return ret;
 853}
 854
 855int btrfs_quota_enable(struct btrfs_trans_handle *trans,
 856		       struct btrfs_fs_info *fs_info)
 857{
 858	struct btrfs_root *quota_root;
 859	struct btrfs_root *tree_root = fs_info->tree_root;
 860	struct btrfs_path *path = NULL;
 861	struct btrfs_qgroup_status_item *ptr;
 862	struct extent_buffer *leaf;
 863	struct btrfs_key key;
 864	struct btrfs_key found_key;
 865	struct btrfs_qgroup *qgroup = NULL;
 866	int ret = 0;
 867	int slot;
 868
 869	mutex_lock(&fs_info->qgroup_ioctl_lock);
 870	if (fs_info->quota_root) {
 871		fs_info->pending_quota_state = 1;
 872		goto out;
 873	}
 874
 875	fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
 876	if (!fs_info->qgroup_ulist) {
 877		ret = -ENOMEM;
 878		goto out;
 879	}
 880
 881	/*
 882	 * initially create the quota tree
 883	 */
 884	quota_root = btrfs_create_tree(trans, fs_info,
 885				       BTRFS_QUOTA_TREE_OBJECTID);
 886	if (IS_ERR(quota_root)) {
 887		ret =  PTR_ERR(quota_root);
 888		goto out;
 889	}
 890
 891	path = btrfs_alloc_path();
 892	if (!path) {
 893		ret = -ENOMEM;
 894		goto out_free_root;
 895	}
 896
 897	key.objectid = 0;
 898	key.type = BTRFS_QGROUP_STATUS_KEY;
 899	key.offset = 0;
 900
 901	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
 902				      sizeof(*ptr));
 903	if (ret)
 904		goto out_free_path;
 905
 906	leaf = path->nodes[0];
 907	ptr = btrfs_item_ptr(leaf, path->slots[0],
 908				 struct btrfs_qgroup_status_item);
 909	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
 910	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
 911	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
 912				BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
 913	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
 914	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
 915
 916	btrfs_mark_buffer_dirty(leaf);
 917
 918	key.objectid = 0;
 919	key.type = BTRFS_ROOT_REF_KEY;
 920	key.offset = 0;
 921
 922	btrfs_release_path(path);
 923	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
 924	if (ret > 0)
 925		goto out_add_root;
 926	if (ret < 0)
 927		goto out_free_path;
 928
 929
 930	while (1) {
 931		slot = path->slots[0];
 932		leaf = path->nodes[0];
 933		btrfs_item_key_to_cpu(leaf, &found_key, slot);
 934
 935		if (found_key.type == BTRFS_ROOT_REF_KEY) {
 936			ret = add_qgroup_item(trans, quota_root,
 937					      found_key.offset);
 938			if (ret)
 939				goto out_free_path;
 940
 941			qgroup = add_qgroup_rb(fs_info, found_key.offset);
 942			if (IS_ERR(qgroup)) {
 943				ret = PTR_ERR(qgroup);
 944				goto out_free_path;
 945			}
 946		}
 947		ret = btrfs_next_item(tree_root, path);
 948		if (ret < 0)
 949			goto out_free_path;
 950		if (ret)
 951			break;
 952	}
 953
 954out_add_root:
 955	btrfs_release_path(path);
 956	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
 957	if (ret)
 958		goto out_free_path;
 959
 960	qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
 961	if (IS_ERR(qgroup)) {
 962		ret = PTR_ERR(qgroup);
 963		goto out_free_path;
 964	}
 965	spin_lock(&fs_info->qgroup_lock);
 966	fs_info->quota_root = quota_root;
 967	fs_info->pending_quota_state = 1;
 968	spin_unlock(&fs_info->qgroup_lock);
 969out_free_path:
 970	btrfs_free_path(path);
 971out_free_root:
 972	if (ret) {
 973		free_extent_buffer(quota_root->node);
 974		free_extent_buffer(quota_root->commit_root);
 975		kfree(quota_root);
 976	}
 977out:
 978	if (ret) {
 979		ulist_free(fs_info->qgroup_ulist);
 980		fs_info->qgroup_ulist = NULL;
 981	}
 982	mutex_unlock(&fs_info->qgroup_ioctl_lock);
 983	return ret;
 984}
 985
 986int btrfs_quota_disable(struct btrfs_trans_handle *trans,
 987			struct btrfs_fs_info *fs_info)
 988{
 989	struct btrfs_root *tree_root = fs_info->tree_root;
 990	struct btrfs_root *quota_root;
 991	int ret = 0;
 992
 993	mutex_lock(&fs_info->qgroup_ioctl_lock);
 994	if (!fs_info->quota_root)
 995		goto out;
 996	fs_info->quota_enabled = 0;
 997	fs_info->pending_quota_state = 0;
 998	btrfs_qgroup_wait_for_completion(fs_info);
 999	spin_lock(&fs_info->qgroup_lock);
1000	quota_root = fs_info->quota_root;
1001	fs_info->quota_root = NULL;
1002	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1003	spin_unlock(&fs_info->qgroup_lock);
1004
1005	btrfs_free_qgroup_config(fs_info);
1006
1007	ret = btrfs_clean_quota_tree(trans, quota_root);
1008	if (ret)
1009		goto out;
1010
1011	ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
1012	if (ret)
1013		goto out;
1014
1015	list_del(&quota_root->dirty_list);
1016
1017	btrfs_tree_lock(quota_root->node);
1018	clean_tree_block(trans, tree_root->fs_info, quota_root->node);
1019	btrfs_tree_unlock(quota_root->node);
1020	btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1021
1022	free_extent_buffer(quota_root->node);
1023	free_extent_buffer(quota_root->commit_root);
1024	kfree(quota_root);
1025out:
1026	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1027	return ret;
1028}
1029
1030static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1031			 struct btrfs_qgroup *qgroup)
1032{
1033	if (list_empty(&qgroup->dirty))
1034		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1035}
1036
1037/*
1038 * The easy accounting, if we are adding/removing the only ref for an extent
1039 * then this qgroup and all of the parent qgroups get their refrence and
1040 * exclusive counts adjusted.
1041 *
1042 * Caller should hold fs_info->qgroup_lock.
1043 */
1044static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1045				    struct ulist *tmp, u64 ref_root,
1046				    u64 num_bytes, int sign)
1047{
1048	struct btrfs_qgroup *qgroup;
1049	struct btrfs_qgroup_list *glist;
1050	struct ulist_node *unode;
1051	struct ulist_iterator uiter;
1052	int ret = 0;
1053
1054	qgroup = find_qgroup_rb(fs_info, ref_root);
1055	if (!qgroup)
1056		goto out;
1057
1058	qgroup->rfer += sign * num_bytes;
1059	qgroup->rfer_cmpr += sign * num_bytes;
1060
1061	WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1062	qgroup->excl += sign * num_bytes;
1063	qgroup->excl_cmpr += sign * num_bytes;
1064	if (sign > 0)
1065		qgroup->reserved -= num_bytes;
1066
1067	qgroup_dirty(fs_info, qgroup);
1068
1069	/* Get all of the parent groups that contain this qgroup */
1070	list_for_each_entry(glist, &qgroup->groups, next_group) {
1071		ret = ulist_add(tmp, glist->group->qgroupid,
1072				ptr_to_u64(glist->group), GFP_ATOMIC);
1073		if (ret < 0)
1074			goto out;
1075	}
1076
1077	/* Iterate all of the parents and adjust their reference counts */
1078	ULIST_ITER_INIT(&uiter);
1079	while ((unode = ulist_next(tmp, &uiter))) {
1080		qgroup = u64_to_ptr(unode->aux);
1081		qgroup->rfer += sign * num_bytes;
1082		qgroup->rfer_cmpr += sign * num_bytes;
1083		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1084		qgroup->excl += sign * num_bytes;
1085		if (sign > 0)
1086			qgroup->reserved -= num_bytes;
1087		qgroup->excl_cmpr += sign * num_bytes;
1088		qgroup_dirty(fs_info, qgroup);
1089
1090		/* Add any parents of the parents */
1091		list_for_each_entry(glist, &qgroup->groups, next_group) {
1092			ret = ulist_add(tmp, glist->group->qgroupid,
1093					ptr_to_u64(glist->group), GFP_ATOMIC);
1094			if (ret < 0)
1095				goto out;
1096		}
1097	}
1098	ret = 0;
1099out:
1100	return ret;
1101}
1102
1103
1104/*
1105 * Quick path for updating qgroup with only excl refs.
1106 *
1107 * In that case, just update all parent will be enough.
1108 * Or we needs to do a full rescan.
1109 * Caller should also hold fs_info->qgroup_lock.
1110 *
1111 * Return 0 for quick update, return >0 for need to full rescan
1112 * and mark INCONSISTENT flag.
1113 * Return < 0 for other error.
1114 */
1115static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1116				   struct ulist *tmp, u64 src, u64 dst,
1117				   int sign)
1118{
1119	struct btrfs_qgroup *qgroup;
1120	int ret = 1;
1121	int err = 0;
1122
1123	qgroup = find_qgroup_rb(fs_info, src);
1124	if (!qgroup)
1125		goto out;
1126	if (qgroup->excl == qgroup->rfer) {
1127		ret = 0;
1128		err = __qgroup_excl_accounting(fs_info, tmp, dst,
1129					       qgroup->excl, sign);
1130		if (err < 0) {
1131			ret = err;
1132			goto out;
1133		}
1134	}
1135out:
1136	if (ret)
1137		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1138	return ret;
1139}
1140
1141int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1142			      struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1143{
1144	struct btrfs_root *quota_root;
1145	struct btrfs_qgroup *parent;
1146	struct btrfs_qgroup *member;
1147	struct btrfs_qgroup_list *list;
1148	struct ulist *tmp;
1149	int ret = 0;
1150
1151	/* Check the level of src and dst first */
1152	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1153		return -EINVAL;
1154
1155	tmp = ulist_alloc(GFP_NOFS);
1156	if (!tmp)
1157		return -ENOMEM;
1158
1159	mutex_lock(&fs_info->qgroup_ioctl_lock);
1160	quota_root = fs_info->quota_root;
1161	if (!quota_root) {
1162		ret = -EINVAL;
1163		goto out;
1164	}
1165	member = find_qgroup_rb(fs_info, src);
1166	parent = find_qgroup_rb(fs_info, dst);
1167	if (!member || !parent) {
1168		ret = -EINVAL;
1169		goto out;
1170	}
1171
1172	/* check if such qgroup relation exist firstly */
1173	list_for_each_entry(list, &member->groups, next_group) {
1174		if (list->group == parent) {
1175			ret = -EEXIST;
1176			goto out;
1177		}
1178	}
1179
1180	ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1181	if (ret)
1182		goto out;
1183
1184	ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1185	if (ret) {
1186		del_qgroup_relation_item(trans, quota_root, src, dst);
1187		goto out;
1188	}
1189
1190	spin_lock(&fs_info->qgroup_lock);
1191	ret = add_relation_rb(quota_root->fs_info, src, dst);
1192	if (ret < 0) {
1193		spin_unlock(&fs_info->qgroup_lock);
1194		goto out;
1195	}
1196	ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1197	spin_unlock(&fs_info->qgroup_lock);
1198out:
1199	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1200	ulist_free(tmp);
1201	return ret;
1202}
1203
1204int __del_qgroup_relation(struct btrfs_trans_handle *trans,
1205			      struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1206{
1207	struct btrfs_root *quota_root;
1208	struct btrfs_qgroup *parent;
1209	struct btrfs_qgroup *member;
1210	struct btrfs_qgroup_list *list;
1211	struct ulist *tmp;
1212	int ret = 0;
1213	int err;
1214
1215	tmp = ulist_alloc(GFP_NOFS);
1216	if (!tmp)
1217		return -ENOMEM;
1218
1219	quota_root = fs_info->quota_root;
1220	if (!quota_root) {
1221		ret = -EINVAL;
1222		goto out;
1223	}
1224
1225	member = find_qgroup_rb(fs_info, src);
1226	parent = find_qgroup_rb(fs_info, dst);
1227	if (!member || !parent) {
1228		ret = -EINVAL;
1229		goto out;
1230	}
1231
1232	/* check if such qgroup relation exist firstly */
1233	list_for_each_entry(list, &member->groups, next_group) {
1234		if (list->group == parent)
1235			goto exist;
1236	}
1237	ret = -ENOENT;
1238	goto out;
1239exist:
1240	ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1241	err = del_qgroup_relation_item(trans, quota_root, dst, src);
1242	if (err && !ret)
1243		ret = err;
1244
1245	spin_lock(&fs_info->qgroup_lock);
1246	del_relation_rb(fs_info, src, dst);
1247	ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1248	spin_unlock(&fs_info->qgroup_lock);
1249out:
1250	ulist_free(tmp);
1251	return ret;
1252}
1253
1254int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1255			      struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1256{
1257	int ret = 0;
1258
1259	mutex_lock(&fs_info->qgroup_ioctl_lock);
1260	ret = __del_qgroup_relation(trans, fs_info, src, dst);
1261	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1262
1263	return ret;
1264}
1265
1266int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1267			struct btrfs_fs_info *fs_info, u64 qgroupid)
1268{
1269	struct btrfs_root *quota_root;
1270	struct btrfs_qgroup *qgroup;
1271	int ret = 0;
1272
1273	mutex_lock(&fs_info->qgroup_ioctl_lock);
1274	quota_root = fs_info->quota_root;
1275	if (!quota_root) {
1276		ret = -EINVAL;
1277		goto out;
1278	}
1279	qgroup = find_qgroup_rb(fs_info, qgroupid);
1280	if (qgroup) {
1281		ret = -EEXIST;
1282		goto out;
1283	}
1284
1285	ret = add_qgroup_item(trans, quota_root, qgroupid);
1286	if (ret)
1287		goto out;
1288
1289	spin_lock(&fs_info->qgroup_lock);
1290	qgroup = add_qgroup_rb(fs_info, qgroupid);
1291	spin_unlock(&fs_info->qgroup_lock);
1292
1293	if (IS_ERR(qgroup))
1294		ret = PTR_ERR(qgroup);
1295out:
1296	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1297	return ret;
1298}
1299
1300int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1301			struct btrfs_fs_info *fs_info, u64 qgroupid)
1302{
1303	struct btrfs_root *quota_root;
1304	struct btrfs_qgroup *qgroup;
1305	struct btrfs_qgroup_list *list;
1306	int ret = 0;
1307
1308	mutex_lock(&fs_info->qgroup_ioctl_lock);
1309	quota_root = fs_info->quota_root;
1310	if (!quota_root) {
1311		ret = -EINVAL;
1312		goto out;
1313	}
1314
1315	qgroup = find_qgroup_rb(fs_info, qgroupid);
1316	if (!qgroup) {
1317		ret = -ENOENT;
1318		goto out;
1319	} else {
1320		/* check if there are no children of this qgroup */
1321		if (!list_empty(&qgroup->members)) {
1322			ret = -EBUSY;
1323			goto out;
1324		}
1325	}
1326	ret = del_qgroup_item(trans, quota_root, qgroupid);
1327
1328	while (!list_empty(&qgroup->groups)) {
1329		list = list_first_entry(&qgroup->groups,
1330					struct btrfs_qgroup_list, next_group);
1331		ret = __del_qgroup_relation(trans, fs_info,
1332					   qgroupid,
1333					   list->group->qgroupid);
1334		if (ret)
1335			goto out;
1336	}
1337
1338	spin_lock(&fs_info->qgroup_lock);
1339	del_qgroup_rb(quota_root->fs_info, qgroupid);
1340	spin_unlock(&fs_info->qgroup_lock);
1341out:
1342	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1343	return ret;
1344}
1345
1346int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1347		       struct btrfs_fs_info *fs_info, u64 qgroupid,
1348		       struct btrfs_qgroup_limit *limit)
1349{
1350	struct btrfs_root *quota_root;
1351	struct btrfs_qgroup *qgroup;
1352	int ret = 0;
1353	/* Sometimes we would want to clear the limit on this qgroup.
1354	 * To meet this requirement, we treat the -1 as a special value
1355	 * which tell kernel to clear the limit on this qgroup.
1356	 */
1357	const u64 CLEAR_VALUE = -1;
1358
1359	mutex_lock(&fs_info->qgroup_ioctl_lock);
1360	quota_root = fs_info->quota_root;
1361	if (!quota_root) {
1362		ret = -EINVAL;
1363		goto out;
1364	}
1365
1366	qgroup = find_qgroup_rb(fs_info, qgroupid);
1367	if (!qgroup) {
1368		ret = -ENOENT;
1369		goto out;
1370	}
1371
1372	spin_lock(&fs_info->qgroup_lock);
1373	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1374		if (limit->max_rfer == CLEAR_VALUE) {
1375			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1376			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1377			qgroup->max_rfer = 0;
1378		} else {
1379			qgroup->max_rfer = limit->max_rfer;
1380		}
1381	}
1382	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1383		if (limit->max_excl == CLEAR_VALUE) {
1384			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1385			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1386			qgroup->max_excl = 0;
1387		} else {
1388			qgroup->max_excl = limit->max_excl;
1389		}
1390	}
1391	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1392		if (limit->rsv_rfer == CLEAR_VALUE) {
1393			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1394			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1395			qgroup->rsv_rfer = 0;
1396		} else {
1397			qgroup->rsv_rfer = limit->rsv_rfer;
1398		}
1399	}
1400	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1401		if (limit->rsv_excl == CLEAR_VALUE) {
1402			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1403			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1404			qgroup->rsv_excl = 0;
1405		} else {
1406			qgroup->rsv_excl = limit->rsv_excl;
1407		}
1408	}
1409	qgroup->lim_flags |= limit->flags;
1410
1411	spin_unlock(&fs_info->qgroup_lock);
1412
1413	ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1414	if (ret) {
1415		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1416		btrfs_info(fs_info, "unable to update quota limit for %llu",
1417		       qgroupid);
1418	}
1419
1420out:
1421	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1422	return ret;
1423}
1424
1425int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
1426					 struct btrfs_fs_info *fs_info)
1427{
1428	struct btrfs_qgroup_extent_record *record;
1429	struct btrfs_delayed_ref_root *delayed_refs;
1430	struct rb_node *node;
1431	u64 qgroup_to_skip;
1432	int ret = 0;
1433
1434	delayed_refs = &trans->transaction->delayed_refs;
1435	qgroup_to_skip = delayed_refs->qgroup_to_skip;
1436
1437	/*
1438	 * No need to do lock, since this function will only be called in
1439	 * btrfs_commmit_transaction().
1440	 */
1441	node = rb_first(&delayed_refs->dirty_extent_root);
1442	while (node) {
1443		record = rb_entry(node, struct btrfs_qgroup_extent_record,
1444				  node);
1445		ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0,
1446					   &record->old_roots);
1447		if (ret < 0)
1448			break;
1449		if (qgroup_to_skip)
1450			ulist_del(record->old_roots, qgroup_to_skip, 0);
1451		node = rb_next(node);
1452	}
1453	return ret;
1454}
1455
1456struct btrfs_qgroup_extent_record
1457*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
1458				  struct btrfs_qgroup_extent_record *record)
1459{
1460	struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1461	struct rb_node *parent_node = NULL;
1462	struct btrfs_qgroup_extent_record *entry;
1463	u64 bytenr = record->bytenr;
1464
1465	assert_spin_locked(&delayed_refs->lock);
1466	trace_btrfs_qgroup_insert_dirty_extent(record);
1467
1468	while (*p) {
1469		parent_node = *p;
1470		entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1471				 node);
1472		if (bytenr < entry->bytenr)
1473			p = &(*p)->rb_left;
1474		else if (bytenr > entry->bytenr)
1475			p = &(*p)->rb_right;
1476		else
1477			return entry;
1478	}
1479
1480	rb_link_node(&record->node, parent_node, p);
1481	rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1482	return NULL;
1483}
1484
1485#define UPDATE_NEW	0
1486#define UPDATE_OLD	1
1487/*
1488 * Walk all of the roots that points to the bytenr and adjust their refcnts.
1489 */
1490static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
1491				struct ulist *roots, struct ulist *tmp,
1492				struct ulist *qgroups, u64 seq, int update_old)
1493{
1494	struct ulist_node *unode;
1495	struct ulist_iterator uiter;
1496	struct ulist_node *tmp_unode;
1497	struct ulist_iterator tmp_uiter;
1498	struct btrfs_qgroup *qg;
1499	int ret = 0;
1500
1501	if (!roots)
1502		return 0;
1503	ULIST_ITER_INIT(&uiter);
1504	while ((unode = ulist_next(roots, &uiter))) {
1505		qg = find_qgroup_rb(fs_info, unode->val);
1506		if (!qg)
1507			continue;
1508
1509		ulist_reinit(tmp);
1510		ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1511				GFP_ATOMIC);
1512		if (ret < 0)
1513			return ret;
1514		ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
1515		if (ret < 0)
1516			return ret;
1517		ULIST_ITER_INIT(&tmp_uiter);
1518		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1519			struct btrfs_qgroup_list *glist;
1520
1521			qg = u64_to_ptr(tmp_unode->aux);
1522			if (update_old)
1523				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
1524			else
1525				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
1526			list_for_each_entry(glist, &qg->groups, next_group) {
1527				ret = ulist_add(qgroups, glist->group->qgroupid,
1528						ptr_to_u64(glist->group),
1529						GFP_ATOMIC);
1530				if (ret < 0)
1531					return ret;
1532				ret = ulist_add(tmp, glist->group->qgroupid,
1533						ptr_to_u64(glist->group),
1534						GFP_ATOMIC);
1535				if (ret < 0)
1536					return ret;
1537			}
1538		}
1539	}
1540	return 0;
1541}
1542
1543/*
1544 * Update qgroup rfer/excl counters.
1545 * Rfer update is easy, codes can explain themselves.
1546 *
1547 * Excl update is tricky, the update is split into 2 part.
1548 * Part 1: Possible exclusive <-> sharing detect:
1549 *	|	A	|	!A	|
1550 *  -------------------------------------
1551 *  B	|	*	|	-	|
1552 *  -------------------------------------
1553 *  !B	|	+	|	**	|
1554 *  -------------------------------------
1555 *
1556 * Conditions:
1557 * A:	cur_old_roots < nr_old_roots	(not exclusive before)
1558 * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
1559 * B:	cur_new_roots < nr_new_roots	(not exclusive now)
1560 * !B:	cur_new_roots == nr_new_roots	(possible exclsuive now)
1561 *
1562 * Results:
1563 * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
1564 * *: Definitely not changed.		**: Possible unchanged.
1565 *
1566 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
1567 *
1568 * To make the logic clear, we first use condition A and B to split
1569 * combination into 4 results.
1570 *
1571 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
1572 * only on variant maybe 0.
1573 *
1574 * Lastly, check result **, since there are 2 variants maybe 0, split them
1575 * again(2x2).
1576 * But this time we don't need to consider other things, the codes and logic
1577 * is easy to understand now.
1578 */
1579static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1580				  struct ulist *qgroups,
1581				  u64 nr_old_roots,
1582				  u64 nr_new_roots,
1583				  u64 num_bytes, u64 seq)
1584{
1585	struct ulist_node *unode;
1586	struct ulist_iterator uiter;
1587	struct btrfs_qgroup *qg;
1588	u64 cur_new_count, cur_old_count;
1589
1590	ULIST_ITER_INIT(&uiter);
1591	while ((unode = ulist_next(qgroups, &uiter))) {
1592		bool dirty = false;
1593
1594		qg = u64_to_ptr(unode->aux);
1595		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
1596		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
1597
1598		trace_qgroup_update_counters(qg->qgroupid, cur_old_count,
1599					     cur_new_count);
1600
1601		/* Rfer update part */
1602		if (cur_old_count == 0 && cur_new_count > 0) {
1603			qg->rfer += num_bytes;
1604			qg->rfer_cmpr += num_bytes;
1605			dirty = true;
1606		}
1607		if (cur_old_count > 0 && cur_new_count == 0) {
1608			qg->rfer -= num_bytes;
1609			qg->rfer_cmpr -= num_bytes;
1610			dirty = true;
1611		}
1612
1613		/* Excl update part */
1614		/* Exclusive/none -> shared case */
1615		if (cur_old_count == nr_old_roots &&
1616		    cur_new_count < nr_new_roots) {
1617			/* Exclusive -> shared */
1618			if (cur_old_count != 0) {
1619				qg->excl -= num_bytes;
1620				qg->excl_cmpr -= num_bytes;
1621				dirty = true;
1622			}
1623		}
1624
1625		/* Shared -> exclusive/none case */
1626		if (cur_old_count < nr_old_roots &&
1627		    cur_new_count == nr_new_roots) {
1628			/* Shared->exclusive */
1629			if (cur_new_count != 0) {
1630				qg->excl += num_bytes;
1631				qg->excl_cmpr += num_bytes;
1632				dirty = true;
1633			}
1634		}
1635
1636		/* Exclusive/none -> exclusive/none case */
1637		if (cur_old_count == nr_old_roots &&
1638		    cur_new_count == nr_new_roots) {
1639			if (cur_old_count == 0) {
1640				/* None -> exclusive/none */
1641
1642				if (cur_new_count != 0) {
1643					/* None -> exclusive */
1644					qg->excl += num_bytes;
1645					qg->excl_cmpr += num_bytes;
1646					dirty = true;
1647				}
1648				/* None -> none, nothing changed */
1649			} else {
1650				/* Exclusive -> exclusive/none */
1651
1652				if (cur_new_count == 0) {
1653					/* Exclusive -> none */
1654					qg->excl -= num_bytes;
1655					qg->excl_cmpr -= num_bytes;
1656					dirty = true;
1657				}
1658				/* Exclusive -> exclusive, nothing changed */
1659			}
1660		}
1661
1662		if (dirty)
1663			qgroup_dirty(fs_info, qg);
1664	}
1665	return 0;
1666}
1667
1668int
1669btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
1670			    struct btrfs_fs_info *fs_info,
1671			    u64 bytenr, u64 num_bytes,
1672			    struct ulist *old_roots, struct ulist *new_roots)
1673{
1674	struct ulist *qgroups = NULL;
1675	struct ulist *tmp = NULL;
1676	u64 seq;
1677	u64 nr_new_roots = 0;
1678	u64 nr_old_roots = 0;
1679	int ret = 0;
1680
1681	if (new_roots)
1682		nr_new_roots = new_roots->nnodes;
1683	if (old_roots)
1684		nr_old_roots = old_roots->nnodes;
1685
1686	if (!fs_info->quota_enabled)
1687		goto out_free;
1688	BUG_ON(!fs_info->quota_root);
1689
1690	trace_btrfs_qgroup_account_extent(bytenr, num_bytes, nr_old_roots,
1691					  nr_new_roots);
1692
1693	qgroups = ulist_alloc(GFP_NOFS);
1694	if (!qgroups) {
1695		ret = -ENOMEM;
1696		goto out_free;
1697	}
1698	tmp = ulist_alloc(GFP_NOFS);
1699	if (!tmp) {
1700		ret = -ENOMEM;
1701		goto out_free;
1702	}
1703
1704	mutex_lock(&fs_info->qgroup_rescan_lock);
1705	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
1706		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
1707			mutex_unlock(&fs_info->qgroup_rescan_lock);
1708			ret = 0;
1709			goto out_free;
1710		}
1711	}
1712	mutex_unlock(&fs_info->qgroup_rescan_lock);
1713
1714	spin_lock(&fs_info->qgroup_lock);
1715	seq = fs_info->qgroup_seq;
1716
1717	/* Update old refcnts using old_roots */
1718	ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
1719				   UPDATE_OLD);
1720	if (ret < 0)
1721		goto out;
1722
1723	/* Update new refcnts using new_roots */
1724	ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
1725				   UPDATE_NEW);
1726	if (ret < 0)
1727		goto out;
1728
1729	qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
1730			       num_bytes, seq);
1731
1732	/*
1733	 * Bump qgroup_seq to avoid seq overlap
1734	 */
1735	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
1736out:
1737	spin_unlock(&fs_info->qgroup_lock);
1738out_free:
1739	ulist_free(tmp);
1740	ulist_free(qgroups);
1741	ulist_free(old_roots);
1742	ulist_free(new_roots);
1743	return ret;
1744}
1745
1746int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
1747				 struct btrfs_fs_info *fs_info)
1748{
1749	struct btrfs_qgroup_extent_record *record;
1750	struct btrfs_delayed_ref_root *delayed_refs;
1751	struct ulist *new_roots = NULL;
1752	struct rb_node *node;
1753	u64 qgroup_to_skip;
1754	int ret = 0;
1755
1756	delayed_refs = &trans->transaction->delayed_refs;
1757	qgroup_to_skip = delayed_refs->qgroup_to_skip;
1758	while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
1759		record = rb_entry(node, struct btrfs_qgroup_extent_record,
1760				  node);
1761
1762		trace_btrfs_qgroup_account_extents(record);
1763
1764		if (!ret) {
1765			/*
1766			 * Use (u64)-1 as time_seq to do special search, which
1767			 * doesn't lock tree or delayed_refs and search current
1768			 * root. It's safe inside commit_transaction().
1769			 */
1770			ret = btrfs_find_all_roots(trans, fs_info,
1771					record->bytenr, (u64)-1, &new_roots);
1772			if (ret < 0)
1773				goto cleanup;
1774			if (qgroup_to_skip)
1775				ulist_del(new_roots, qgroup_to_skip, 0);
1776			ret = btrfs_qgroup_account_extent(trans, fs_info,
1777					record->bytenr, record->num_bytes,
1778					record->old_roots, new_roots);
1779			record->old_roots = NULL;
1780			new_roots = NULL;
1781		}
1782cleanup:
1783		ulist_free(record->old_roots);
1784		ulist_free(new_roots);
1785		new_roots = NULL;
1786		rb_erase(node, &delayed_refs->dirty_extent_root);
1787		kfree(record);
1788
1789	}
1790	return ret;
1791}
1792
1793/*
1794 * called from commit_transaction. Writes all changed qgroups to disk.
1795 */
1796int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1797		      struct btrfs_fs_info *fs_info)
1798{
1799	struct btrfs_root *quota_root = fs_info->quota_root;
1800	int ret = 0;
1801	int start_rescan_worker = 0;
1802
1803	if (!quota_root)
1804		goto out;
1805
1806	if (!fs_info->quota_enabled && fs_info->pending_quota_state)
1807		start_rescan_worker = 1;
1808
1809	fs_info->quota_enabled = fs_info->pending_quota_state;
1810
1811	spin_lock(&fs_info->qgroup_lock);
1812	while (!list_empty(&fs_info->dirty_qgroups)) {
1813		struct btrfs_qgroup *qgroup;
1814		qgroup = list_first_entry(&fs_info->dirty_qgroups,
1815					  struct btrfs_qgroup, dirty);
1816		list_del_init(&qgroup->dirty);
1817		spin_unlock(&fs_info->qgroup_lock);
1818		ret = update_qgroup_info_item(trans, quota_root, qgroup);
1819		if (ret)
1820			fs_info->qgroup_flags |=
1821					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1822		ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1823		if (ret)
1824			fs_info->qgroup_flags |=
1825					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1826		spin_lock(&fs_info->qgroup_lock);
1827	}
1828	if (fs_info->quota_enabled)
1829		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1830	else
1831		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1832	spin_unlock(&fs_info->qgroup_lock);
1833
1834	ret = update_qgroup_status_item(trans, fs_info, quota_root);
1835	if (ret)
1836		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1837
1838	if (!ret && start_rescan_worker) {
1839		ret = qgroup_rescan_init(fs_info, 0, 1);
1840		if (!ret) {
1841			qgroup_rescan_zero_tracking(fs_info);
1842			btrfs_queue_work(fs_info->qgroup_rescan_workers,
1843					 &fs_info->qgroup_rescan_work);
1844		}
1845		ret = 0;
1846	}
1847
1848out:
1849
1850	return ret;
1851}
1852
1853/*
1854 * Copy the acounting information between qgroups. This is necessary
1855 * when a snapshot or a subvolume is created. Throwing an error will
1856 * cause a transaction abort so we take extra care here to only error
1857 * when a readonly fs is a reasonable outcome.
1858 */
1859int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1860			 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1861			 struct btrfs_qgroup_inherit *inherit)
1862{
1863	int ret = 0;
1864	int i;
1865	u64 *i_qgroups;
1866	struct btrfs_root *quota_root = fs_info->quota_root;
1867	struct btrfs_qgroup *srcgroup;
1868	struct btrfs_qgroup *dstgroup;
1869	u32 level_size = 0;
1870	u64 nums;
1871
1872	mutex_lock(&fs_info->qgroup_ioctl_lock);
1873	if (!fs_info->quota_enabled)
1874		goto out;
1875
1876	if (!quota_root) {
1877		ret = -EINVAL;
1878		goto out;
1879	}
1880
1881	if (inherit) {
1882		i_qgroups = (u64 *)(inherit + 1);
1883		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1884		       2 * inherit->num_excl_copies;
1885		for (i = 0; i < nums; ++i) {
1886			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1887
1888			/*
1889			 * Zero out invalid groups so we can ignore
1890			 * them later.
1891			 */
1892			if (!srcgroup ||
1893			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
1894				*i_qgroups = 0ULL;
1895
1896			++i_qgroups;
1897		}
1898	}
1899
1900	/*
1901	 * create a tracking group for the subvol itself
1902	 */
1903	ret = add_qgroup_item(trans, quota_root, objectid);
1904	if (ret)
1905		goto out;
1906
1907	if (srcid) {
1908		struct btrfs_root *srcroot;
1909		struct btrfs_key srckey;
1910
1911		srckey.objectid = srcid;
1912		srckey.type = BTRFS_ROOT_ITEM_KEY;
1913		srckey.offset = (u64)-1;
1914		srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
1915		if (IS_ERR(srcroot)) {
1916			ret = PTR_ERR(srcroot);
1917			goto out;
1918		}
1919
1920		rcu_read_lock();
1921		level_size = srcroot->nodesize;
1922		rcu_read_unlock();
1923	}
1924
1925	/*
1926	 * add qgroup to all inherited groups
1927	 */
1928	if (inherit) {
1929		i_qgroups = (u64 *)(inherit + 1);
1930		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
1931			if (*i_qgroups == 0)
1932				continue;
1933			ret = add_qgroup_relation_item(trans, quota_root,
1934						       objectid, *i_qgroups);
1935			if (ret && ret != -EEXIST)
1936				goto out;
1937			ret = add_qgroup_relation_item(trans, quota_root,
1938						       *i_qgroups, objectid);
1939			if (ret && ret != -EEXIST)
1940				goto out;
1941		}
1942		ret = 0;
1943	}
1944
1945
1946	spin_lock(&fs_info->qgroup_lock);
1947
1948	dstgroup = add_qgroup_rb(fs_info, objectid);
1949	if (IS_ERR(dstgroup)) {
1950		ret = PTR_ERR(dstgroup);
1951		goto unlock;
1952	}
1953
1954	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
1955		dstgroup->lim_flags = inherit->lim.flags;
1956		dstgroup->max_rfer = inherit->lim.max_rfer;
1957		dstgroup->max_excl = inherit->lim.max_excl;
1958		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
1959		dstgroup->rsv_excl = inherit->lim.rsv_excl;
1960
1961		ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
1962		if (ret) {
1963			fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1964			btrfs_info(fs_info, "unable to update quota limit for %llu",
1965			       dstgroup->qgroupid);
1966			goto unlock;
1967		}
1968	}
1969
1970	if (srcid) {
1971		srcgroup = find_qgroup_rb(fs_info, srcid);
1972		if (!srcgroup)
1973			goto unlock;
1974
1975		/*
1976		 * We call inherit after we clone the root in order to make sure
1977		 * our counts don't go crazy, so at this point the only
1978		 * difference between the two roots should be the root node.
1979		 */
1980		dstgroup->rfer = srcgroup->rfer;
1981		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
1982		dstgroup->excl = level_size;
1983		dstgroup->excl_cmpr = level_size;
1984		srcgroup->excl = level_size;
1985		srcgroup->excl_cmpr = level_size;
1986
1987		/* inherit the limit info */
1988		dstgroup->lim_flags = srcgroup->lim_flags;
1989		dstgroup->max_rfer = srcgroup->max_rfer;
1990		dstgroup->max_excl = srcgroup->max_excl;
1991		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
1992		dstgroup->rsv_excl = srcgroup->rsv_excl;
1993
1994		qgroup_dirty(fs_info, dstgroup);
1995		qgroup_dirty(fs_info, srcgroup);
1996	}
1997
1998	if (!inherit)
1999		goto unlock;
2000
2001	i_qgroups = (u64 *)(inherit + 1);
2002	for (i = 0; i < inherit->num_qgroups; ++i) {
2003		if (*i_qgroups) {
2004			ret = add_relation_rb(quota_root->fs_info, objectid,
2005					      *i_qgroups);
2006			if (ret)
2007				goto unlock;
2008		}
2009		++i_qgroups;
2010	}
2011
2012	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2013		struct btrfs_qgroup *src;
2014		struct btrfs_qgroup *dst;
2015
2016		if (!i_qgroups[0] || !i_qgroups[1])
2017			continue;
2018
2019		src = find_qgroup_rb(fs_info, i_qgroups[0]);
2020		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2021
2022		if (!src || !dst) {
2023			ret = -EINVAL;
2024			goto unlock;
2025		}
2026
2027		dst->rfer = src->rfer - level_size;
2028		dst->rfer_cmpr = src->rfer_cmpr - level_size;
2029	}
2030	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2031		struct btrfs_qgroup *src;
2032		struct btrfs_qgroup *dst;
2033
2034		if (!i_qgroups[0] || !i_qgroups[1])
2035			continue;
2036
2037		src = find_qgroup_rb(fs_info, i_qgroups[0]);
2038		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2039
2040		if (!src || !dst) {
2041			ret = -EINVAL;
2042			goto unlock;
2043		}
2044
2045		dst->excl = src->excl + level_size;
2046		dst->excl_cmpr = src->excl_cmpr + level_size;
2047	}
2048
2049unlock:
2050	spin_unlock(&fs_info->qgroup_lock);
2051out:
2052	mutex_unlock(&fs_info->qgroup_ioctl_lock);
2053	return ret;
2054}
2055
2056static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
2057{
2058	struct btrfs_root *quota_root;
2059	struct btrfs_qgroup *qgroup;
2060	struct btrfs_fs_info *fs_info = root->fs_info;
2061	u64 ref_root = root->root_key.objectid;
2062	int ret = 0;
2063	struct ulist_node *unode;
2064	struct ulist_iterator uiter;
2065
2066	if (!is_fstree(ref_root))
2067		return 0;
2068
2069	if (num_bytes == 0)
2070		return 0;
2071
2072	spin_lock(&fs_info->qgroup_lock);
2073	quota_root = fs_info->quota_root;
2074	if (!quota_root)
2075		goto out;
2076
2077	qgroup = find_qgroup_rb(fs_info, ref_root);
2078	if (!qgroup)
2079		goto out;
2080
2081	/*
2082	 * in a first step, we check all affected qgroups if any limits would
2083	 * be exceeded
2084	 */
2085	ulist_reinit(fs_info->qgroup_ulist);
2086	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2087			(uintptr_t)qgroup, GFP_ATOMIC);
2088	if (ret < 0)
2089		goto out;
2090	ULIST_ITER_INIT(&uiter);
2091	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2092		struct btrfs_qgroup *qg;
2093		struct btrfs_qgroup_list *glist;
2094
2095		qg = u64_to_ptr(unode->aux);
2096
2097		if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2098		    qg->reserved + (s64)qg->rfer + num_bytes >
2099		    qg->max_rfer) {
2100			ret = -EDQUOT;
2101			goto out;
2102		}
2103
2104		if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2105		    qg->reserved + (s64)qg->excl + num_bytes >
2106		    qg->max_excl) {
2107			ret = -EDQUOT;
2108			goto out;
2109		}
2110
2111		list_for_each_entry(glist, &qg->groups, next_group) {
2112			ret = ulist_add(fs_info->qgroup_ulist,
2113					glist->group->qgroupid,
2114					(uintptr_t)glist->group, GFP_ATOMIC);
2115			if (ret < 0)
2116				goto out;
2117		}
2118	}
2119	ret = 0;
2120	/*
2121	 * no limits exceeded, now record the reservation into all qgroups
2122	 */
2123	ULIST_ITER_INIT(&uiter);
2124	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2125		struct btrfs_qgroup *qg;
2126
2127		qg = u64_to_ptr(unode->aux);
2128
2129		qg->reserved += num_bytes;
2130	}
2131
2132out:
2133	spin_unlock(&fs_info->qgroup_lock);
2134	return ret;
2135}
2136
2137void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
2138			       u64 ref_root, u64 num_bytes)
2139{
2140	struct btrfs_root *quota_root;
2141	struct btrfs_qgroup *qgroup;
2142	struct ulist_node *unode;
2143	struct ulist_iterator uiter;
2144	int ret = 0;
2145
2146	if (!is_fstree(ref_root))
2147		return;
2148
2149	if (num_bytes == 0)
2150		return;
2151
2152	spin_lock(&fs_info->qgroup_lock);
2153
2154	quota_root = fs_info->quota_root;
2155	if (!quota_root)
2156		goto out;
2157
2158	qgroup = find_qgroup_rb(fs_info, ref_root);
2159	if (!qgroup)
2160		goto out;
2161
2162	ulist_reinit(fs_info->qgroup_ulist);
2163	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2164			(uintptr_t)qgroup, GFP_ATOMIC);
2165	if (ret < 0)
2166		goto out;
2167	ULIST_ITER_INIT(&uiter);
2168	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2169		struct btrfs_qgroup *qg;
2170		struct btrfs_qgroup_list *glist;
2171
2172		qg = u64_to_ptr(unode->aux);
2173
2174		qg->reserved -= num_bytes;
2175
2176		list_for_each_entry(glist, &qg->groups, next_group) {
2177			ret = ulist_add(fs_info->qgroup_ulist,
2178					glist->group->qgroupid,
2179					(uintptr_t)glist->group, GFP_ATOMIC);
2180			if (ret < 0)
2181				goto out;
2182		}
2183	}
2184
2185out:
2186	spin_unlock(&fs_info->qgroup_lock);
2187}
2188
2189static inline void qgroup_free(struct btrfs_root *root, u64 num_bytes)
2190{
2191	return btrfs_qgroup_free_refroot(root->fs_info, root->objectid,
2192					 num_bytes);
2193}
2194void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2195{
2196	if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2197		return;
2198	btrfs_err(trans->root->fs_info,
2199		"qgroups not uptodate in trans handle %p:  list is%s empty, "
2200		"seq is %#x.%x",
2201		trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2202		(u32)(trans->delayed_ref_elem.seq >> 32),
2203		(u32)trans->delayed_ref_elem.seq);
2204	BUG();
2205}
2206
2207/*
2208 * returns < 0 on error, 0 when more leafs are to be scanned.
2209 * returns 1 when done.
2210 */
2211static int
2212qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2213		   struct btrfs_trans_handle *trans)
2214{
2215	struct btrfs_key found;
2216	struct extent_buffer *scratch_leaf = NULL;
2217	struct ulist *roots = NULL;
2218	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2219	u64 num_bytes;
2220	int slot;
2221	int ret;
2222
2223	mutex_lock(&fs_info->qgroup_rescan_lock);
2224	ret = btrfs_search_slot_for_read(fs_info->extent_root,
2225					 &fs_info->qgroup_rescan_progress,
2226					 path, 1, 0);
2227
2228	pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
2229		 fs_info->qgroup_rescan_progress.objectid,
2230		 fs_info->qgroup_rescan_progress.type,
2231		 fs_info->qgroup_rescan_progress.offset, ret);
2232
2233	if (ret) {
2234		/*
2235		 * The rescan is about to end, we will not be scanning any
2236		 * further blocks. We cannot unset the RESCAN flag here, because
2237		 * we want to commit the transaction if everything went well.
2238		 * To make the live accounting work in this phase, we set our
2239		 * scan progress pointer such that every real extent objectid
2240		 * will be smaller.
2241		 */
2242		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2243		btrfs_release_path(path);
2244		mutex_unlock(&fs_info->qgroup_rescan_lock);
2245		return ret;
2246	}
2247
2248	btrfs_item_key_to_cpu(path->nodes[0], &found,
2249			      btrfs_header_nritems(path->nodes[0]) - 1);
2250	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2251
2252	btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2253	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
2254	if (!scratch_leaf) {
2255		ret = -ENOMEM;
2256		mutex_unlock(&fs_info->qgroup_rescan_lock);
2257		goto out;
2258	}
2259	extent_buffer_get(scratch_leaf);
2260	btrfs_tree_read_lock(scratch_leaf);
2261	btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK);
2262	slot = path->slots[0];
2263	btrfs_release_path(path);
2264	mutex_unlock(&fs_info->qgroup_rescan_lock);
2265
2266	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2267		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2268		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2269		    found.type != BTRFS_METADATA_ITEM_KEY)
2270			continue;
2271		if (found.type == BTRFS_METADATA_ITEM_KEY)
2272			num_bytes = fs_info->extent_root->nodesize;
2273		else
2274			num_bytes = found.offset;
2275
2276		ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2277					   &roots);
2278		if (ret < 0)
2279			goto out;
2280		/* For rescan, just pass old_roots as NULL */
2281		ret = btrfs_qgroup_account_extent(trans, fs_info,
2282				found.objectid, num_bytes, NULL, roots);
2283		if (ret < 0)
2284			goto out;
2285	}
2286out:
2287	if (scratch_leaf) {
2288		btrfs_tree_read_unlock_blocking(scratch_leaf);
2289		free_extent_buffer(scratch_leaf);
2290	}
2291	btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2292
2293	return ret;
2294}
2295
2296static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2297{
2298	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2299						     qgroup_rescan_work);
2300	struct btrfs_path *path;
2301	struct btrfs_trans_handle *trans = NULL;
2302	int err = -ENOMEM;
2303	int ret = 0;
2304
2305	path = btrfs_alloc_path();
2306	if (!path)
2307		goto out;
2308
2309	err = 0;
2310	while (!err && !btrfs_fs_closing(fs_info)) {
2311		trans = btrfs_start_transaction(fs_info->fs_root, 0);
2312		if (IS_ERR(trans)) {
2313			err = PTR_ERR(trans);
2314			break;
2315		}
2316		if (!fs_info->quota_enabled) {
2317			err = -EINTR;
2318		} else {
2319			err = qgroup_rescan_leaf(fs_info, path, trans);
2320		}
2321		if (err > 0)
2322			btrfs_commit_transaction(trans, fs_info->fs_root);
2323		else
2324			btrfs_end_transaction(trans, fs_info->fs_root);
2325	}
2326
2327out:
2328	btrfs_free_path(path);
2329
2330	mutex_lock(&fs_info->qgroup_rescan_lock);
2331	if (!btrfs_fs_closing(fs_info))
2332		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2333
2334	if (err > 0 &&
2335	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2336		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2337	} else if (err < 0) {
2338		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2339	}
2340	mutex_unlock(&fs_info->qgroup_rescan_lock);
2341
2342	/*
2343	 * only update status, since the previous part has alreay updated the
2344	 * qgroup info.
2345	 */
2346	trans = btrfs_start_transaction(fs_info->quota_root, 1);
2347	if (IS_ERR(trans)) {
2348		err = PTR_ERR(trans);
2349		btrfs_err(fs_info,
2350			  "fail to start transaction for status update: %d\n",
2351			  err);
2352		goto done;
2353	}
2354	ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
2355	if (ret < 0) {
2356		err = ret;
2357		btrfs_err(fs_info, "fail to update qgroup status: %d\n", err);
2358	}
2359	btrfs_end_transaction(trans, fs_info->quota_root);
2360
2361	if (btrfs_fs_closing(fs_info)) {
2362		btrfs_info(fs_info, "qgroup scan paused");
2363	} else if (err >= 0) {
2364		btrfs_info(fs_info, "qgroup scan completed%s",
2365			err > 0 ? " (inconsistency flag cleared)" : "");
2366	} else {
2367		btrfs_err(fs_info, "qgroup scan failed with %d", err);
2368	}
2369
2370done:
2371	complete_all(&fs_info->qgroup_rescan_completion);
2372}
2373
2374/*
2375 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2376 * memory required for the rescan context.
2377 */
2378static int
2379qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2380		   int init_flags)
2381{
2382	int ret = 0;
2383
2384	if (!init_flags &&
2385	    (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2386	     !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2387		ret = -EINVAL;
2388		goto err;
2389	}
2390
2391	mutex_lock(&fs_info->qgroup_rescan_lock);
2392	spin_lock(&fs_info->qgroup_lock);
2393
2394	if (init_flags) {
2395		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2396			ret = -EINPROGRESS;
2397		else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2398			ret = -EINVAL;
2399
2400		if (ret) {
2401			spin_unlock(&fs_info->qgroup_lock);
2402			mutex_unlock(&fs_info->qgroup_rescan_lock);
2403			goto err;
2404		}
2405		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2406	}
2407
2408	memset(&fs_info->qgroup_rescan_progress, 0,
2409		sizeof(fs_info->qgroup_rescan_progress));
2410	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2411	init_completion(&fs_info->qgroup_rescan_completion);
2412
2413	spin_unlock(&fs_info->qgroup_lock);
2414	mutex_unlock(&fs_info->qgroup_rescan_lock);
2415
2416	memset(&fs_info->qgroup_rescan_work, 0,
2417	       sizeof(fs_info->qgroup_rescan_work));
2418	btrfs_init_work(&fs_info->qgroup_rescan_work,
2419			btrfs_qgroup_rescan_helper,
2420			btrfs_qgroup_rescan_worker, NULL, NULL);
2421
2422	if (ret) {
2423err:
2424		btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2425		return ret;
2426	}
2427
2428	return 0;
2429}
2430
2431static void
2432qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2433{
2434	struct rb_node *n;
2435	struct btrfs_qgroup *qgroup;
2436
2437	spin_lock(&fs_info->qgroup_lock);
2438	/* clear all current qgroup tracking information */
2439	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2440		qgroup = rb_entry(n, struct btrfs_qgroup, node);
2441		qgroup->rfer = 0;
2442		qgroup->rfer_cmpr = 0;
2443		qgroup->excl = 0;
2444		qgroup->excl_cmpr = 0;
2445	}
2446	spin_unlock(&fs_info->qgroup_lock);
2447}
2448
2449int
2450btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2451{
2452	int ret = 0;
2453	struct btrfs_trans_handle *trans;
2454
2455	ret = qgroup_rescan_init(fs_info, 0, 1);
2456	if (ret)
2457		return ret;
2458
2459	/*
2460	 * We have set the rescan_progress to 0, which means no more
2461	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2462	 * However, btrfs_qgroup_account_ref may be right after its call
2463	 * to btrfs_find_all_roots, in which case it would still do the
2464	 * accounting.
2465	 * To solve this, we're committing the transaction, which will
2466	 * ensure we run all delayed refs and only after that, we are
2467	 * going to clear all tracking information for a clean start.
2468	 */
2469
2470	trans = btrfs_join_transaction(fs_info->fs_root);
2471	if (IS_ERR(trans)) {
2472		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2473		return PTR_ERR(trans);
2474	}
2475	ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2476	if (ret) {
2477		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2478		return ret;
2479	}
2480
2481	qgroup_rescan_zero_tracking(fs_info);
2482
2483	btrfs_queue_work(fs_info->qgroup_rescan_workers,
2484			 &fs_info->qgroup_rescan_work);
2485
2486	return 0;
2487}
2488
2489int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2490{
2491	int running;
2492	int ret = 0;
2493
2494	mutex_lock(&fs_info->qgroup_rescan_lock);
2495	spin_lock(&fs_info->qgroup_lock);
2496	running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2497	spin_unlock(&fs_info->qgroup_lock);
2498	mutex_unlock(&fs_info->qgroup_rescan_lock);
2499
2500	if (running)
2501		ret = wait_for_completion_interruptible(
2502					&fs_info->qgroup_rescan_completion);
2503
2504	return ret;
2505}
2506
2507/*
2508 * this is only called from open_ctree where we're still single threaded, thus
2509 * locking is omitted here.
2510 */
2511void
2512btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2513{
2514	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2515		btrfs_queue_work(fs_info->qgroup_rescan_workers,
2516				 &fs_info->qgroup_rescan_work);
2517}
2518
2519/*
2520 * Reserve qgroup space for range [start, start + len).
2521 *
2522 * This function will either reserve space from related qgroups or doing
2523 * nothing if the range is already reserved.
2524 *
2525 * Return 0 for successful reserve
2526 * Return <0 for error (including -EQUOT)
2527 *
2528 * NOTE: this function may sleep for memory allocation.
2529 */
2530int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
2531{
2532	struct btrfs_root *root = BTRFS_I(inode)->root;
2533	struct extent_changeset changeset;
2534	struct ulist_node *unode;
2535	struct ulist_iterator uiter;
2536	int ret;
2537
2538	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
2539	    len == 0)
2540		return 0;
2541
2542	changeset.bytes_changed = 0;
2543	changeset.range_changed = ulist_alloc(GFP_NOFS);
2544	ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
2545			start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS,
2546			&changeset);
2547	trace_btrfs_qgroup_reserve_data(inode, start, len,
2548					changeset.bytes_changed,
2549					QGROUP_RESERVE);
2550	if (ret < 0)
2551		goto cleanup;
2552	ret = qgroup_reserve(root, changeset.bytes_changed);
2553	if (ret < 0)
2554		goto cleanup;
2555
2556	ulist_free(changeset.range_changed);
2557	return ret;
2558
2559cleanup:
2560	/* cleanup already reserved ranges */
2561	ULIST_ITER_INIT(&uiter);
2562	while ((unode = ulist_next(changeset.range_changed, &uiter)))
2563		clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
2564				 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
2565				 GFP_NOFS);
2566	ulist_free(changeset.range_changed);
2567	return ret;
2568}
2569
2570static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
2571				       int free)
2572{
2573	struct extent_changeset changeset;
2574	int trace_op = QGROUP_RELEASE;
2575	int ret;
2576
2577	changeset.bytes_changed = 0;
2578	changeset.range_changed = ulist_alloc(GFP_NOFS);
2579	if (!changeset.range_changed)
2580		return -ENOMEM;
2581
2582	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 
2583			start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS,
2584			&changeset);
2585	if (ret < 0)
2586		goto out;
2587
2588	if (free) {
2589		qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed);
2590		trace_op = QGROUP_FREE;
2591	}
2592	trace_btrfs_qgroup_release_data(inode, start, len,
2593					changeset.bytes_changed, trace_op);
2594out:
2595	ulist_free(changeset.range_changed);
2596	return ret;
2597}
2598
2599/*
2600 * Free a reserved space range from io_tree and related qgroups
2601 *
2602 * Should be called when a range of pages get invalidated before reaching disk.
2603 * Or for error cleanup case.
2604 *
2605 * For data written to disk, use btrfs_qgroup_release_data().
2606 *
2607 * NOTE: This function may sleep for memory allocation.
2608 */
2609int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len)
2610{
2611	return __btrfs_qgroup_release_data(inode, start, len, 1);
2612}
2613
2614/*
2615 * Release a reserved space range from io_tree only.
2616 *
2617 * Should be called when a range of pages get written to disk and corresponding
2618 * FILE_EXTENT is inserted into corresponding root.
2619 *
2620 * Since new qgroup accounting framework will only update qgroup numbers at
2621 * commit_transaction() time, its reserved space shouldn't be freed from
2622 * related qgroups.
2623 *
2624 * But we should release the range from io_tree, to allow further write to be
2625 * COWed.
2626 *
2627 * NOTE: This function may sleep for memory allocation.
2628 */
2629int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
2630{
2631	return __btrfs_qgroup_release_data(inode, start, len, 0);
2632}
2633
2634int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
2635{
2636	int ret;
2637
2638	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
2639	    num_bytes == 0)
2640		return 0;
2641
2642	BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
2643	ret = qgroup_reserve(root, num_bytes);
2644	if (ret < 0)
2645		return ret;
2646	atomic_add(num_bytes, &root->qgroup_meta_rsv);
2647	return ret;
2648}
2649
2650void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2651{
2652	int reserved;
2653
2654	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
2655		return;
2656
2657	reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
2658	if (reserved == 0)
2659		return;
2660	qgroup_free(root, reserved);
2661}
2662
2663void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2664{
2665	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
2666		return;
2667
2668	BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
2669	WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
2670	atomic_sub(num_bytes, &root->qgroup_meta_rsv);
2671	qgroup_free(root, num_bytes);
2672}
2673
2674/*
2675 * Check qgroup reserved space leaking, normally at destory inode
2676 * time
2677 */
2678void btrfs_qgroup_check_reserved_leak(struct inode *inode)
2679{
2680	struct extent_changeset changeset;
2681	struct ulist_node *unode;
2682	struct ulist_iterator iter;
2683	int ret;
2684
2685	changeset.bytes_changed = 0;
2686	changeset.range_changed = ulist_alloc(GFP_NOFS);
2687	if (WARN_ON(!changeset.range_changed))
2688		return;
2689
2690	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
2691			EXTENT_QGROUP_RESERVED, GFP_NOFS, &changeset);
2692
2693	WARN_ON(ret < 0);
2694	if (WARN_ON(changeset.bytes_changed)) {
2695		ULIST_ITER_INIT(&iter);
2696		while ((unode = ulist_next(changeset.range_changed, &iter))) {
2697			btrfs_warn(BTRFS_I(inode)->root->fs_info,
2698				"leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
2699				inode->i_ino, unode->val, unode->aux);
2700		}
2701		qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed);
2702	}
2703	ulist_free(changeset.range_changed);
2704}