Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10/*
  11 * Quota change tags are associated with each transaction that allocates or
  12 * deallocates space.  Those changes are accumulated locally to each node (in a
  13 * per-node file) and then are periodically synced to the quota file.  This
  14 * avoids the bottleneck of constantly touching the quota file, but introduces
  15 * fuzziness in the current usage value of IDs that are being used on different
  16 * nodes in the cluster simultaneously.  So, it is possible for a user on
  17 * multiple nodes to overrun their quota, but that overrun is controlable.
  18 * Since quota tags are part of transactions, there is no need for a quota check
  19 * program to be run on node crashes or anything like that.
  20 *
  21 * There are couple of knobs that let the administrator manage the quota
  22 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  23 * sitting on one node before being synced to the quota file.  (The default is
  24 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  25 * of quota file syncs increases as the user moves closer to their limit.  The
  26 * more frequent the syncs, the more accurate the quota enforcement, but that
  27 * means that there is more contention between the nodes for the quota file.
  28 * The default value is one.  This sets the maximum theoretical quota overrun
  29 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  30 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  31 * number greater than one makes quota syncs more frequent and reduces the
  32 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  33 * syncs less frequent.
  34 *
  35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  36 * the quota file, so it is not being constantly read.
  37 */
  38
  39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  40
  41#include <linux/sched.h>
  42#include <linux/slab.h>
  43#include <linux/mm.h>
  44#include <linux/spinlock.h>
  45#include <linux/completion.h>
  46#include <linux/buffer_head.h>
  47#include <linux/sort.h>
  48#include <linux/fs.h>
  49#include <linux/bio.h>
  50#include <linux/gfs2_ondisk.h>
  51#include <linux/kthread.h>
  52#include <linux/freezer.h>
  53#include <linux/quota.h>
  54#include <linux/dqblk_xfs.h>
  55#include <linux/lockref.h>
  56#include <linux/list_lru.h>
  57#include <linux/rcupdate.h>
  58#include <linux/rculist_bl.h>
  59#include <linux/bit_spinlock.h>
  60#include <linux/jhash.h>
  61#include <linux/vmalloc.h>
  62
  63#include "gfs2.h"
  64#include "incore.h"
  65#include "bmap.h"
  66#include "glock.h"
  67#include "glops.h"
  68#include "log.h"
  69#include "meta_io.h"
  70#include "quota.h"
  71#include "rgrp.h"
  72#include "super.h"
  73#include "trans.h"
  74#include "inode.h"
  75#include "util.h"
  76
  77#define GFS2_QD_HASH_SHIFT      12
  78#define GFS2_QD_HASH_SIZE       (1 << GFS2_QD_HASH_SHIFT)
  79#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
  80
  81/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  82/*                     -> sd_bitmap_lock                              */
  83static DEFINE_SPINLOCK(qd_lock);
  84struct list_lru gfs2_qd_lru;
  85
  86static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  87
  88static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  89				 const struct kqid qid)
  90{
  91	unsigned int h;
  92
  93	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  94	h = jhash(&qid, sizeof(struct kqid), h);
  95
  96	return h & GFS2_QD_HASH_MASK;
  97}
  98
  99static inline void spin_lock_bucket(unsigned int hash)
 100{
 101        hlist_bl_lock(&qd_hash_table[hash]);
 102}
 103
 104static inline void spin_unlock_bucket(unsigned int hash)
 105{
 106        hlist_bl_unlock(&qd_hash_table[hash]);
 107}
 
 108
 109static void gfs2_qd_dealloc(struct rcu_head *rcu)
 110{
 111	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
 112	kmem_cache_free(gfs2_quotad_cachep, qd);
 113}
 114
 115static void gfs2_qd_dispose(struct list_head *list)
 116{
 117	struct gfs2_quota_data *qd;
 118	struct gfs2_sbd *sdp;
 
 119
 120	while (!list_empty(list)) {
 121		qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
 122		sdp = qd->qd_gl->gl_sbd;
 
 
 123
 124		list_del(&qd->qd_lru);
 
 
 
 
 125
 126		/* Free from the filesystem-specific list */
 127		spin_lock(&qd_lock);
 128		list_del(&qd->qd_list);
 129		spin_unlock(&qd_lock);
 130
 131		spin_lock_bucket(qd->qd_hash);
 132		hlist_bl_del_rcu(&qd->qd_hlist);
 133		spin_unlock_bucket(qd->qd_hash);
 134
 135		gfs2_assert_warn(sdp, !qd->qd_change);
 136		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 137		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 138
 139		gfs2_glock_put(qd->qd_gl);
 140		atomic_dec(&sdp->sd_quota_count);
 141
 142		/* Delete it from the common reclaim list */
 143		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 144	}
 145}
 146
 147
 148static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
 149{
 150	struct list_head *dispose = arg;
 151	struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
 152
 153	if (!spin_trylock(&qd->qd_lockref.lock))
 154		return LRU_SKIP;
 155
 156	if (qd->qd_lockref.count == 0) {
 157		lockref_mark_dead(&qd->qd_lockref);
 158		list_move(&qd->qd_lru, dispose);
 159	}
 
 160
 161	spin_unlock(&qd->qd_lockref.lock);
 162	return LRU_REMOVED;
 163}
 164
 165static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
 166					 struct shrink_control *sc)
 167{
 168	LIST_HEAD(dispose);
 169	unsigned long freed;
 170
 171	if (!(sc->gfp_mask & __GFP_FS))
 172		return SHRINK_STOP;
 173
 174	freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
 175				   &dispose, &sc->nr_to_scan);
 176
 177	gfs2_qd_dispose(&dispose);
 178
 179	return freed;
 180}
 181
 182static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 183					  struct shrink_control *sc)
 184{
 185	return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
 186}
 187
 188struct shrinker gfs2_qd_shrinker = {
 189	.count_objects = gfs2_qd_shrink_count,
 190	.scan_objects = gfs2_qd_shrink_scan,
 191	.seeks = DEFAULT_SEEKS,
 192	.flags = SHRINKER_NUMA_AWARE,
 193};
 194
 195
 196static u64 qd2index(struct gfs2_quota_data *qd)
 197{
 198	struct kqid qid = qd->qd_id;
 199	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 200		((qid.type == USRQUOTA) ? 0 : 1);
 201}
 202
 203static u64 qd2offset(struct gfs2_quota_data *qd)
 204{
 205	u64 offset;
 206
 207	offset = qd2index(qd);
 208	offset *= sizeof(struct gfs2_quota);
 209
 210	return offset;
 211}
 212
 213static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
 
 214{
 215	struct gfs2_quota_data *qd;
 216	int error;
 217
 218	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 219	if (!qd)
 220		return NULL;
 221
 222	qd->qd_sbd = sdp;
 223	qd->qd_lockref.count = 1;
 224	spin_lock_init(&qd->qd_lockref.lock);
 225	qd->qd_id = qid;
 226	qd->qd_slot = -1;
 227	INIT_LIST_HEAD(&qd->qd_lru);
 228	qd->qd_hash = hash;
 229
 230	error = gfs2_glock_get(sdp, qd2index(qd),
 231			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 232	if (error)
 233		goto fail;
 234
 235	return qd;
 
 
 236
 237fail:
 238	kmem_cache_free(gfs2_quotad_cachep, qd);
 239	return NULL;
 240}
 241
 242static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
 243						     const struct gfs2_sbd *sdp,
 244						     struct kqid qid)
 245{
 246	struct gfs2_quota_data *qd;
 247	struct hlist_bl_node *h;
 248
 249	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
 250		if (!qid_eq(qd->qd_id, qid))
 251			continue;
 252		if (qd->qd_sbd != sdp)
 253			continue;
 254		if (lockref_get_not_dead(&qd->qd_lockref)) {
 255			list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
 256			return qd;
 257		}
 258	}
 259
 260	return NULL;
 261}
 262
 
 263
 264static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 265		  struct gfs2_quota_data **qdp)
 266{
 267	struct gfs2_quota_data *qd, *new_qd;
 268	unsigned int hash = gfs2_qd_hash(sdp, qid);
 
 
 
 
 
 
 
 
 
 
 
 
 269
 270	rcu_read_lock();
 271	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 272	rcu_read_unlock();
 273
 274	if (qd)
 275		return 0;
 
 
 
 
 276
 277	new_qd = qd_alloc(hash, sdp, qid);
 278	if (!new_qd)
 279		return -ENOMEM;
 280
 281	spin_lock(&qd_lock);
 282	spin_lock_bucket(hash);
 283	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 284	if (qd == NULL) {
 285		*qdp = new_qd;
 286		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
 287		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
 288		atomic_inc(&sdp->sd_quota_count);
 289	}
 290	spin_unlock_bucket(hash);
 291	spin_unlock(&qd_lock);
 292
 293	if (qd) {
 294		gfs2_glock_put(new_qd->qd_gl);
 295		kmem_cache_free(gfs2_quotad_cachep, new_qd);
 296	}
 297
 298	return 0;
 299}
 300
 301
 302static void qd_hold(struct gfs2_quota_data *qd)
 303{
 304	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 305	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
 306	lockref_get(&qd->qd_lockref);
 307}
 308
 309static void qd_put(struct gfs2_quota_data *qd)
 310{
 311	if (lockref_put_or_lock(&qd->qd_lockref))
 312		return;
 313
 314	qd->qd_lockref.count = 0;
 315	list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
 316	spin_unlock(&qd->qd_lockref.lock);
 317
 318}
 319
 320static int slot_get(struct gfs2_quota_data *qd)
 321{
 322	struct gfs2_sbd *sdp = qd->qd_sbd;
 323	unsigned int bit;
 324	int error = 0;
 325
 326	spin_lock(&sdp->sd_bitmap_lock);
 327	if (qd->qd_slot_count != 0)
 328		goto out;
 329
 330	error = -ENOSPC;
 331	bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
 332	if (bit < sdp->sd_quota_slots) {
 333		set_bit(bit, sdp->sd_quota_bitmap);
 334		qd->qd_slot = bit;
 335		error = 0;
 336out:
 337		qd->qd_slot_count++;
 338	}
 339	spin_unlock(&sdp->sd_bitmap_lock);
 340
 341	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 342}
 343
 344static void slot_hold(struct gfs2_quota_data *qd)
 345{
 346	struct gfs2_sbd *sdp = qd->qd_sbd;
 347
 348	spin_lock(&sdp->sd_bitmap_lock);
 349	gfs2_assert(sdp, qd->qd_slot_count);
 350	qd->qd_slot_count++;
 351	spin_unlock(&sdp->sd_bitmap_lock);
 352}
 353
 354static void slot_put(struct gfs2_quota_data *qd)
 355{
 356	struct gfs2_sbd *sdp = qd->qd_sbd;
 357
 358	spin_lock(&sdp->sd_bitmap_lock);
 359	gfs2_assert(sdp, qd->qd_slot_count);
 360	if (!--qd->qd_slot_count) {
 361		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
 362		qd->qd_slot = -1;
 363	}
 364	spin_unlock(&sdp->sd_bitmap_lock);
 365}
 366
 367static int bh_get(struct gfs2_quota_data *qd)
 368{
 369	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 370	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 371	unsigned int block, offset;
 372	struct buffer_head *bh;
 373	int error;
 374	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 375
 376	mutex_lock(&sdp->sd_quota_mutex);
 377
 378	if (qd->qd_bh_count++) {
 379		mutex_unlock(&sdp->sd_quota_mutex);
 380		return 0;
 381	}
 382
 383	block = qd->qd_slot / sdp->sd_qc_per_block;
 384	offset = qd->qd_slot % sdp->sd_qc_per_block;
 385
 386	bh_map.b_size = 1 << ip->i_inode.i_blkbits;
 387	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
 388	if (error)
 389		goto fail;
 390	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
 391	if (error)
 392		goto fail;
 393	error = -EIO;
 394	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 395		goto fail_brelse;
 396
 397	qd->qd_bh = bh;
 398	qd->qd_bh_qc = (struct gfs2_quota_change *)
 399		(bh->b_data + sizeof(struct gfs2_meta_header) +
 400		 offset * sizeof(struct gfs2_quota_change));
 401
 402	mutex_unlock(&sdp->sd_quota_mutex);
 403
 404	return 0;
 405
 406fail_brelse:
 407	brelse(bh);
 408fail:
 409	qd->qd_bh_count--;
 410	mutex_unlock(&sdp->sd_quota_mutex);
 411	return error;
 412}
 413
 414static void bh_put(struct gfs2_quota_data *qd)
 415{
 416	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 417
 418	mutex_lock(&sdp->sd_quota_mutex);
 419	gfs2_assert(sdp, qd->qd_bh_count);
 420	if (!--qd->qd_bh_count) {
 421		brelse(qd->qd_bh);
 422		qd->qd_bh = NULL;
 423		qd->qd_bh_qc = NULL;
 424	}
 425	mutex_unlock(&sdp->sd_quota_mutex);
 426}
 427
 428static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 429			 u64 *sync_gen)
 430{
 431	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 432	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 433	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
 434		return 0;
 435
 436	if (!lockref_get_not_dead(&qd->qd_lockref))
 437		return 0;
 438
 439	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 440	set_bit(QDF_LOCKED, &qd->qd_flags);
 441	qd->qd_change_sync = qd->qd_change;
 442	slot_hold(qd);
 443	return 1;
 444}
 445
 446static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 447{
 448	struct gfs2_quota_data *qd = NULL;
 449	int error;
 450	int found = 0;
 451
 452	*qdp = NULL;
 453
 454	if (sdp->sd_vfs->s_flags & MS_RDONLY)
 455		return 0;
 456
 457	spin_lock(&qd_lock);
 458
 459	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 460		found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
 461		if (found)
 462			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 463	}
 464
 465	if (!found)
 466		qd = NULL;
 467
 468	spin_unlock(&qd_lock);
 469
 470	if (qd) {
 471		gfs2_assert_warn(sdp, qd->qd_change_sync);
 472		error = bh_get(qd);
 473		if (error) {
 474			clear_bit(QDF_LOCKED, &qd->qd_flags);
 475			slot_put(qd);
 476			qd_put(qd);
 477			return error;
 478		}
 479	}
 480
 481	*qdp = qd;
 482
 483	return 0;
 484}
 485
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 486static void qd_unlock(struct gfs2_quota_data *qd)
 487{
 488	gfs2_assert_warn(qd->qd_gl->gl_sbd,
 489			 test_bit(QDF_LOCKED, &qd->qd_flags));
 490	clear_bit(QDF_LOCKED, &qd->qd_flags);
 491	bh_put(qd);
 492	slot_put(qd);
 493	qd_put(qd);
 494}
 495
 496static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 497		    struct gfs2_quota_data **qdp)
 498{
 499	int error;
 500
 501	error = qd_get(sdp, qid, qdp);
 502	if (error)
 503		return error;
 504
 505	error = slot_get(*qdp);
 506	if (error)
 507		goto fail;
 508
 509	error = bh_get(*qdp);
 510	if (error)
 511		goto fail_slot;
 512
 513	return 0;
 514
 515fail_slot:
 516	slot_put(*qdp);
 517fail:
 518	qd_put(*qdp);
 519	return error;
 520}
 521
 522static void qdsb_put(struct gfs2_quota_data *qd)
 523{
 524	bh_put(qd);
 525	slot_put(qd);
 526	qd_put(qd);
 527}
 528
 529int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 530{
 531	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 532	struct gfs2_quota_data **qd;
 
 533	int error;
 534
 535	if (ip->i_res == NULL) {
 536		error = gfs2_rs_alloc(ip);
 537		if (error)
 538			return error;
 539	}
 540
 541	qd = ip->i_res->rs_qa_qd;
 542
 543	if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) ||
 544	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
 545		return -EIO;
 546
 547	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 548		return 0;
 549
 550	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 551	if (error)
 552		goto out;
 553	ip->i_res->rs_qa_qd_num++;
 554	qd++;
 555
 556	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 557	if (error)
 558		goto out;
 559	ip->i_res->rs_qa_qd_num++;
 560	qd++;
 561
 562	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 563	    !uid_eq(uid, ip->i_inode.i_uid)) {
 564		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 565		if (error)
 566			goto out;
 567		ip->i_res->rs_qa_qd_num++;
 568		qd++;
 569	}
 570
 571	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 572	    !gid_eq(gid, ip->i_inode.i_gid)) {
 573		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 574		if (error)
 575			goto out;
 576		ip->i_res->rs_qa_qd_num++;
 577		qd++;
 578	}
 579
 580out:
 581	if (error)
 582		gfs2_quota_unhold(ip);
 583	return error;
 584}
 585
 586void gfs2_quota_unhold(struct gfs2_inode *ip)
 587{
 588	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
 589	unsigned int x;
 590
 591	if (ip->i_res == NULL)
 592		return;
 593	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 594
 595	for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
 596		qdsb_put(ip->i_res->rs_qa_qd[x]);
 597		ip->i_res->rs_qa_qd[x] = NULL;
 598	}
 599	ip->i_res->rs_qa_qd_num = 0;
 600}
 601
 602static int sort_qd(const void *a, const void *b)
 603{
 604	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 605	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 606
 607	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 
 
 
 
 
 
 
 608		return -1;
 609	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 610		return 1;
 
 611	return 0;
 612}
 613
 614static void do_qc(struct gfs2_quota_data *qd, s64 change)
 615{
 616	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 617	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 618	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 619	s64 x;
 620
 621	mutex_lock(&sdp->sd_quota_mutex);
 622	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 623
 624	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 625		qc->qc_change = 0;
 626		qc->qc_flags = 0;
 627		if (qd->qd_id.type == USRQUOTA)
 628			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 629		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 630	}
 631
 632	x = be64_to_cpu(qc->qc_change) + change;
 633	qc->qc_change = cpu_to_be64(x);
 634
 635	spin_lock(&qd_lock);
 636	qd->qd_change = x;
 637	spin_unlock(&qd_lock);
 638
 639	if (!x) {
 640		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 641		clear_bit(QDF_CHANGE, &qd->qd_flags);
 642		qc->qc_flags = 0;
 643		qc->qc_id = 0;
 644		slot_put(qd);
 645		qd_put(qd);
 646	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 647		qd_hold(qd);
 648		slot_hold(qd);
 649	}
 650
 651	mutex_unlock(&sdp->sd_quota_mutex);
 652}
 653
 654/**
 655 * gfs2_adjust_quota - adjust record of current block usage
 656 * @ip: The quota inode
 657 * @loc: Offset of the entry in the quota file
 658 * @change: The amount of usage change to record
 659 * @qd: The quota data
 660 * @fdq: The updated limits to record
 661 *
 662 * This function was mostly borrowed from gfs2_block_truncate_page which was
 663 * in turn mostly borrowed from ext3
 664 *
 665 * Returns: 0 or -ve on error
 666 */
 667
 668static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 669			     s64 change, struct gfs2_quota_data *qd,
 670			     struct fs_disk_quota *fdq)
 671{
 672	struct inode *inode = &ip->i_inode;
 673	struct gfs2_sbd *sdp = GFS2_SB(inode);
 674	struct address_space *mapping = inode->i_mapping;
 675	unsigned long index = loc >> PAGE_CACHE_SHIFT;
 676	unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
 677	unsigned blocksize, iblock, pos;
 678	struct buffer_head *bh;
 679	struct page *page;
 680	void *kaddr, *ptr;
 681	struct gfs2_quota q;
 682	int err, nbytes;
 683	u64 size;
 684
 685	if (gfs2_is_stuffed(ip)) {
 686		err = gfs2_unstuff_dinode(ip, NULL);
 687		if (err)
 688			return err;
 689	}
 690
 691	memset(&q, 0, sizeof(struct gfs2_quota));
 692	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 693	if (err < 0)
 694		return err;
 695
 696	err = -EIO;
 697	be64_add_cpu(&q.qu_value, change);
 698	qd->qd_qb.qb_value = q.qu_value;
 
 
 
 699	if (fdq) {
 700		if (fdq->d_fieldmask & FS_DQ_BSOFT) {
 701			q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
 702			qd->qd_qb.qb_warn = q.qu_warn;
 703		}
 704		if (fdq->d_fieldmask & FS_DQ_BHARD) {
 705			q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
 706			qd->qd_qb.qb_limit = q.qu_limit;
 707		}
 708		if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
 709			q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
 710			qd->qd_qb.qb_value = q.qu_value;
 711		}
 712	}
 713
 714	/* Write the quota into the quota file on disk */
 715	ptr = &q;
 716	nbytes = sizeof(struct gfs2_quota);
 717get_a_page:
 718	page = find_or_create_page(mapping, index, GFP_NOFS);
 719	if (!page)
 720		return -ENOMEM;
 721
 722	blocksize = inode->i_sb->s_blocksize;
 723	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 724
 725	if (!page_has_buffers(page))
 726		create_empty_buffers(page, blocksize, 0);
 727
 728	bh = page_buffers(page);
 729	pos = blocksize;
 730	while (offset >= pos) {
 731		bh = bh->b_this_page;
 732		iblock++;
 733		pos += blocksize;
 734	}
 735
 736	if (!buffer_mapped(bh)) {
 737		gfs2_block_map(inode, iblock, bh, 1);
 738		if (!buffer_mapped(bh))
 739			goto unlock_out;
 740		/* If it's a newly allocated disk block for quota, zero it */
 741		if (buffer_new(bh))
 742			zero_user(page, pos - blocksize, bh->b_size);
 743	}
 744
 745	if (PageUptodate(page))
 746		set_buffer_uptodate(bh);
 747
 748	if (!buffer_uptodate(bh)) {
 749		ll_rw_block(READ | REQ_META, 1, &bh);
 750		wait_on_buffer(bh);
 751		if (!buffer_uptodate(bh))
 752			goto unlock_out;
 753	}
 754
 755	gfs2_trans_add_data(ip->i_gl, bh);
 756
 757	kaddr = kmap_atomic(page);
 758	if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
 759		nbytes = PAGE_CACHE_SIZE - offset;
 760	memcpy(kaddr + offset, ptr, nbytes);
 761	flush_dcache_page(page);
 762	kunmap_atomic(kaddr);
 763	unlock_page(page);
 764	page_cache_release(page);
 765
 766	/* If quota straddles page boundary, we need to update the rest of the
 767	 * quota at the beginning of the next page */
 768	if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
 769		ptr = ptr + nbytes;
 770		nbytes = sizeof(struct gfs2_quota) - nbytes;
 771		offset = 0;
 772		index++;
 773		goto get_a_page;
 774	}
 775
 776	size = loc + sizeof(struct gfs2_quota);
 777	if (size > inode->i_size)
 778		i_size_write(inode, size);
 779	inode->i_mtime = inode->i_atime = CURRENT_TIME;
 780	mark_inode_dirty(inode);
 781	return 0;
 782
 783unlock_out:
 784	unlock_page(page);
 785	page_cache_release(page);
 786	return err;
 787}
 788
 789static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 790{
 791	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
 792	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 793	struct gfs2_alloc_parms ap = { .aflags = 0, };
 794	unsigned int data_blocks, ind_blocks;
 795	struct gfs2_holder *ghs, i_gh;
 796	unsigned int qx, x;
 797	struct gfs2_quota_data *qd;
 798	unsigned reserved;
 799	loff_t offset;
 800	unsigned int nalloc = 0, blocks;
 801	int error;
 802
 803	error = gfs2_rs_alloc(ip);
 804	if (error)
 805		return error;
 806
 807	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 808			      &data_blocks, &ind_blocks);
 809
 810	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 811	if (!ghs)
 812		return -ENOMEM;
 813
 814	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 815	mutex_lock(&ip->i_inode.i_mutex);
 816	for (qx = 0; qx < num_qd; qx++) {
 817		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 818					   GL_NOCACHE, &ghs[qx]);
 819		if (error)
 820			goto out;
 821	}
 822
 823	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 824	if (error)
 825		goto out;
 826
 827	for (x = 0; x < num_qd; x++) {
 828		offset = qd2offset(qda[x]);
 829		if (gfs2_write_alloc_required(ip, offset,
 830					      sizeof(struct gfs2_quota)))
 831			nalloc++;
 832	}
 833
 834	/* 
 835	 * 1 blk for unstuffing inode if stuffed. We add this extra
 836	 * block to the reservation unconditionally. If the inode
 837	 * doesn't need unstuffing, the block will be released to the 
 838	 * rgrp since it won't be allocated during the transaction
 839	 */
 840	/* +3 in the end for unstuffing block, inode size update block
 841	 * and another block in case quota straddles page boundary and 
 842	 * two blocks need to be updated instead of 1 */
 843	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 844
 845	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 846	ap.target = reserved;
 847	error = gfs2_inplace_reserve(ip, &ap);
 848	if (error)
 849		goto out_alloc;
 850
 851	if (nalloc)
 852		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 853
 854	error = gfs2_trans_begin(sdp, blocks, 0);
 855	if (error)
 856		goto out_ipres;
 857
 858	for (x = 0; x < num_qd; x++) {
 859		qd = qda[x];
 860		offset = qd2offset(qd);
 861		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 862		if (error)
 863			goto out_end_trans;
 864
 865		do_qc(qd, -qd->qd_change_sync);
 866		set_bit(QDF_REFRESH, &qd->qd_flags);
 867	}
 868
 869	error = 0;
 870
 871out_end_trans:
 872	gfs2_trans_end(sdp);
 873out_ipres:
 874	gfs2_inplace_release(ip);
 875out_alloc:
 876	gfs2_glock_dq_uninit(&i_gh);
 877out:
 878	while (qx--)
 879		gfs2_glock_dq_uninit(&ghs[qx]);
 880	mutex_unlock(&ip->i_inode.i_mutex);
 881	kfree(ghs);
 882	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
 883	return error;
 884}
 885
 886static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 887{
 888	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 889	struct gfs2_quota q;
 890	struct gfs2_quota_lvb *qlvb;
 891	loff_t pos;
 892	int error;
 893
 894	memset(&q, 0, sizeof(struct gfs2_quota));
 895	pos = qd2offset(qd);
 896	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 897	if (error < 0)
 898		return error;
 899
 900	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 901	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 902	qlvb->__pad = 0;
 903	qlvb->qb_limit = q.qu_limit;
 904	qlvb->qb_warn = q.qu_warn;
 905	qlvb->qb_value = q.qu_value;
 906	qd->qd_qb = *qlvb;
 907
 908	return 0;
 909}
 910
 911static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
 912		    struct gfs2_holder *q_gh)
 913{
 914	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 915	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 916	struct gfs2_holder i_gh;
 917	int error;
 918
 919restart:
 920	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
 921	if (error)
 922		return error;
 923
 924	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 925
 926	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
 927		gfs2_glock_dq_uninit(q_gh);
 928		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
 929					   GL_NOCACHE, q_gh);
 930		if (error)
 931			return error;
 932
 933		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
 934		if (error)
 935			goto fail;
 936
 937		error = update_qd(sdp, qd);
 938		if (error)
 939			goto fail_gunlock;
 940
 941		gfs2_glock_dq_uninit(&i_gh);
 942		gfs2_glock_dq_uninit(q_gh);
 943		force_refresh = 0;
 944		goto restart;
 945	}
 946
 947	return 0;
 948
 949fail_gunlock:
 950	gfs2_glock_dq_uninit(&i_gh);
 951fail:
 952	gfs2_glock_dq_uninit(q_gh);
 953	return error;
 954}
 955
 956int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 957{
 958	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
 959	struct gfs2_quota_data *qd;
 960	unsigned int x;
 961	int error = 0;
 962
 963	error = gfs2_quota_hold(ip, uid, gid);
 964	if (error)
 965		return error;
 966
 967	if (capable(CAP_SYS_RESOURCE) ||
 968	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
 969		return 0;
 970
 971	sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num,
 972	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 973
 974	for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
 975		int force = NO_FORCE;
 976		qd = ip->i_res->rs_qa_qd[x];
 977		if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
 978			force = FORCE;
 979		error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]);
 980		if (error)
 981			break;
 982	}
 983
 984	if (!error)
 985		set_bit(GIF_QD_LOCKED, &ip->i_flags);
 986	else {
 987		while (x--)
 988			gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
 989		gfs2_quota_unhold(ip);
 990	}
 991
 992	return error;
 993}
 994
 995static int need_sync(struct gfs2_quota_data *qd)
 996{
 997	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 998	struct gfs2_tune *gt = &sdp->sd_tune;
 999	s64 value;
1000	unsigned int num, den;
1001	int do_sync = 1;
1002
1003	if (!qd->qd_qb.qb_limit)
1004		return 0;
1005
1006	spin_lock(&qd_lock);
1007	value = qd->qd_change;
1008	spin_unlock(&qd_lock);
1009
1010	spin_lock(&gt->gt_spin);
1011	num = gt->gt_quota_scale_num;
1012	den = gt->gt_quota_scale_den;
1013	spin_unlock(&gt->gt_spin);
1014
1015	if (value < 0)
1016		do_sync = 0;
1017	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1018		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1019		do_sync = 0;
1020	else {
1021		value *= gfs2_jindex_size(sdp) * num;
1022		value = div_s64(value, den);
1023		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1024		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1025			do_sync = 0;
1026	}
1027
1028	return do_sync;
1029}
1030
1031void gfs2_quota_unlock(struct gfs2_inode *ip)
1032{
1033	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1034	struct gfs2_quota_data *qda[4];
1035	unsigned int count = 0;
1036	unsigned int x;
1037	int found;
1038
1039	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1040		goto out;
1041
1042	for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1043		struct gfs2_quota_data *qd;
1044		int sync;
1045
1046		qd = ip->i_res->rs_qa_qd[x];
1047		sync = need_sync(qd);
1048
1049		gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
1050		if (!sync)
1051			continue;
1052
1053		spin_lock(&qd_lock);
1054		found = qd_check_sync(sdp, qd, NULL);
1055		spin_unlock(&qd_lock);
1056
1057		if (!found)
1058			continue;
1059
1060		gfs2_assert_warn(sdp, qd->qd_change_sync);
1061		if (bh_get(qd)) {
1062			clear_bit(QDF_LOCKED, &qd->qd_flags);
1063			slot_put(qd);
1064			qd_put(qd);
1065			continue;
1066		}
1067
1068		qda[count++] = qd;
 
1069	}
1070
1071	if (count) {
1072		do_sync(count, qda);
1073		for (x = 0; x < count; x++)
1074			qd_unlock(qda[x]);
1075	}
1076
1077out:
1078	gfs2_quota_unhold(ip);
1079}
1080
1081#define MAX_LINE 256
1082
1083static int print_message(struct gfs2_quota_data *qd, char *type)
1084{
1085	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
1086
1087	fs_info(sdp, "quota %s for %s %u\n",
1088		type,
1089		(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1090		from_kqid(&init_user_ns, qd->qd_id));
1091
1092	return 0;
1093}
1094
1095int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1096{
1097	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
1098	struct gfs2_quota_data *qd;
1099	s64 value;
1100	unsigned int x;
1101	int error = 0;
1102
1103	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1104		return 0;
1105
1106        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1107                return 0;
1108
1109	for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1110		qd = ip->i_res->rs_qa_qd[x];
1111
1112		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1113		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1114			continue;
1115
1116		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1117		spin_lock(&qd_lock);
1118		value += qd->qd_change;
1119		spin_unlock(&qd_lock);
1120
1121		if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1122			print_message(qd, "exceeded");
1123			quota_send_warning(qd->qd_id,
 
1124					   sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1125
1126			error = -EDQUOT;
1127			break;
1128		} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1129			   (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
1130			   time_after_eq(jiffies, qd->qd_last_warn +
1131					 gfs2_tune_get(sdp,
1132						gt_quota_warn_period) * HZ)) {
1133			quota_send_warning(qd->qd_id,
 
1134					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1135			error = print_message(qd, "warning");
1136			qd->qd_last_warn = jiffies;
1137		}
1138	}
1139
1140	return error;
1141}
1142
1143void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1144		       kuid_t uid, kgid_t gid)
1145{
 
1146	struct gfs2_quota_data *qd;
1147	unsigned int x;
1148
1149	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1150		return;
1151	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1152		return;
1153
1154	for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1155		qd = ip->i_res->rs_qa_qd[x];
1156
1157		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1158		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1159			do_qc(qd, change);
1160		}
1161	}
1162}
1163
1164int gfs2_quota_sync(struct super_block *sb, int type)
1165{
1166	struct gfs2_sbd *sdp = sb->s_fs_info;
1167	struct gfs2_quota_data **qda;
1168	unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
1169	unsigned int num_qd;
1170	unsigned int x;
1171	int error = 0;
1172
 
 
1173	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1174	if (!qda)
1175		return -ENOMEM;
1176
1177	mutex_lock(&sdp->sd_quota_sync_mutex);
1178	sdp->sd_quota_sync_gen++;
1179
1180	do {
1181		num_qd = 0;
1182
1183		for (;;) {
1184			error = qd_fish(sdp, qda + num_qd);
1185			if (error || !qda[num_qd])
1186				break;
1187			if (++num_qd == max_qd)
1188				break;
1189		}
1190
1191		if (num_qd) {
1192			if (!error)
1193				error = do_sync(num_qd, qda);
1194			if (!error)
1195				for (x = 0; x < num_qd; x++)
1196					qda[x]->qd_sync_gen =
1197						sdp->sd_quota_sync_gen;
1198
1199			for (x = 0; x < num_qd; x++)
1200				qd_unlock(qda[x]);
1201		}
1202	} while (!error && num_qd == max_qd);
1203
1204	mutex_unlock(&sdp->sd_quota_sync_mutex);
1205	kfree(qda);
1206
1207	return error;
1208}
1209
1210int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
 
 
 
 
 
1211{
1212	struct gfs2_quota_data *qd;
1213	struct gfs2_holder q_gh;
1214	int error;
1215
1216	error = qd_get(sdp, qid, &qd);
1217	if (error)
1218		return error;
1219
1220	error = do_glock(qd, FORCE, &q_gh);
1221	if (!error)
1222		gfs2_glock_dq_uninit(&q_gh);
1223
1224	qd_put(qd);
1225	return error;
1226}
1227
 
 
 
 
 
 
 
 
 
1228int gfs2_quota_init(struct gfs2_sbd *sdp)
1229{
1230	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1231	u64 size = i_size_read(sdp->sd_qc_inode);
1232	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1233	unsigned int x, slot = 0;
1234	unsigned int found = 0;
1235	unsigned int hash;
1236	unsigned int bm_size;
1237	u64 dblock;
1238	u32 extlen = 0;
1239	int error;
1240
1241	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1242		return -EIO;
1243
1244	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1245	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1246	bm_size *= sizeof(unsigned long);
1247	error = -ENOMEM;
1248	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1249	if (sdp->sd_quota_bitmap == NULL)
1250		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1251						 __GFP_ZERO, PAGE_KERNEL);
1252	if (!sdp->sd_quota_bitmap)
1253		return error;
1254
 
 
 
 
 
 
1255	for (x = 0; x < blocks; x++) {
1256		struct buffer_head *bh;
1257		const struct gfs2_quota_change *qc;
1258		unsigned int y;
1259
1260		if (!extlen) {
1261			int new = 0;
1262			error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1263			if (error)
1264				goto fail;
1265		}
1266		error = -EIO;
1267		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1268		if (!bh)
1269			goto fail;
1270		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1271			brelse(bh);
1272			goto fail;
1273		}
1274
1275		qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1276		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1277		     y++, slot++) {
 
1278			struct gfs2_quota_data *qd;
1279			s64 qc_change = be64_to_cpu(qc->qc_change);
1280			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1281			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1282						USRQUOTA : GRPQUOTA;
1283			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1284						      be32_to_cpu(qc->qc_id));
1285			qc++;
1286			if (!qc_change)
1287				continue;
1288
1289			hash = gfs2_qd_hash(sdp, qc_id);
1290			qd = qd_alloc(hash, sdp, qc_id);
1291			if (qd == NULL) {
1292				brelse(bh);
1293				goto fail;
1294			}
1295
1296			set_bit(QDF_CHANGE, &qd->qd_flags);
1297			qd->qd_change = qc_change;
1298			qd->qd_slot = slot;
1299			qd->qd_slot_count = 1;
1300
1301			spin_lock(&qd_lock);
1302			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1303			list_add(&qd->qd_list, &sdp->sd_quota_list);
1304			atomic_inc(&sdp->sd_quota_count);
1305			spin_unlock(&qd_lock);
1306
1307			spin_lock_bucket(hash);
1308			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1309			spin_unlock_bucket(hash);
1310
1311			found++;
1312		}
1313
1314		brelse(bh);
1315		dblock++;
1316		extlen--;
1317	}
1318
1319	if (found)
1320		fs_info(sdp, "found %u quota changes\n", found);
1321
1322	return 0;
1323
1324fail:
1325	gfs2_quota_cleanup(sdp);
1326	return error;
1327}
1328
1329void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1330{
1331	struct list_head *head = &sdp->sd_quota_list;
1332	struct gfs2_quota_data *qd;
 
1333
1334	spin_lock(&qd_lock);
1335	while (!list_empty(head)) {
1336		qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1337
1338		list_del(&qd->qd_list);
 
 
 
 
 
 
 
 
1339
 
1340		/* Also remove if this qd exists in the reclaim list */
1341		list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
 
 
 
1342		atomic_dec(&sdp->sd_quota_count);
1343		spin_unlock(&qd_lock);
1344
1345		spin_lock_bucket(qd->qd_hash);
1346		hlist_bl_del_rcu(&qd->qd_hlist);
1347		spin_unlock_bucket(qd->qd_hash);
1348
1349		gfs2_assert_warn(sdp, !qd->qd_change);
1350		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 
 
 
1351		gfs2_assert_warn(sdp, !qd->qd_bh_count);
1352
1353		gfs2_glock_put(qd->qd_gl);
1354		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
1355
1356		spin_lock(&qd_lock);
1357	}
1358	spin_unlock(&qd_lock);
1359
1360	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1361
1362	if (sdp->sd_quota_bitmap) {
1363		if (is_vmalloc_addr(sdp->sd_quota_bitmap))
1364			vfree(sdp->sd_quota_bitmap);
1365		else
1366			kfree(sdp->sd_quota_bitmap);
1367		sdp->sd_quota_bitmap = NULL;
1368	}
1369}
1370
1371static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1372{
1373	if (error == 0 || error == -EROFS)
1374		return;
1375	if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1376		fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1377}
1378
1379static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1380			       int (*fxn)(struct super_block *sb, int type),
1381			       unsigned long t, unsigned long *timeo,
1382			       unsigned int *new_timeo)
1383{
1384	if (t >= *timeo) {
1385		int error = fxn(sdp->sd_vfs, 0);
1386		quotad_error(sdp, msg, error);
1387		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1388	} else {
1389		*timeo -= t;
1390	}
1391}
1392
1393static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1394{
1395	struct gfs2_inode *ip;
1396
1397	while(1) {
1398		ip = NULL;
1399		spin_lock(&sdp->sd_trunc_lock);
1400		if (!list_empty(&sdp->sd_trunc_list)) {
1401			ip = list_entry(sdp->sd_trunc_list.next,
1402					struct gfs2_inode, i_trunc_list);
1403			list_del_init(&ip->i_trunc_list);
1404		}
1405		spin_unlock(&sdp->sd_trunc_lock);
1406		if (ip == NULL)
1407			return;
1408		gfs2_glock_finish_truncate(ip);
1409	}
1410}
1411
1412void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1413	if (!sdp->sd_statfs_force_sync) {
1414		sdp->sd_statfs_force_sync = 1;
1415		wake_up(&sdp->sd_quota_wait);
1416	}
1417}
1418
1419
1420/**
1421 * gfs2_quotad - Write cached quota changes into the quota file
1422 * @sdp: Pointer to GFS2 superblock
1423 *
1424 */
1425
1426int gfs2_quotad(void *data)
1427{
1428	struct gfs2_sbd *sdp = data;
1429	struct gfs2_tune *tune = &sdp->sd_tune;
1430	unsigned long statfs_timeo = 0;
1431	unsigned long quotad_timeo = 0;
1432	unsigned long t = 0;
1433	DEFINE_WAIT(wait);
1434	int empty;
1435
1436	while (!kthread_should_stop()) {
1437
1438		/* Update the master statfs file */
1439		if (sdp->sd_statfs_force_sync) {
1440			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1441			quotad_error(sdp, "statfs", error);
1442			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1443		}
1444		else
1445			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1446				   	   &statfs_timeo,
1447					   &tune->gt_statfs_quantum);
1448
1449		/* Update quota file */
1450		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1451				   &quotad_timeo, &tune->gt_quota_quantum);
1452
1453		/* Check for & recover partially truncated inodes */
1454		quotad_check_trunc_list(sdp);
1455
1456		try_to_freeze();
1457
1458		t = min(quotad_timeo, statfs_timeo);
1459
1460		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1461		spin_lock(&sdp->sd_trunc_lock);
1462		empty = list_empty(&sdp->sd_trunc_list);
1463		spin_unlock(&sdp->sd_trunc_lock);
1464		if (empty && !sdp->sd_statfs_force_sync)
1465			t -= schedule_timeout(t);
1466		else
1467			t = 0;
1468		finish_wait(&sdp->sd_quota_wait, &wait);
1469	}
1470
1471	return 0;
1472}
1473
1474static int gfs2_quota_get_xstate(struct super_block *sb,
1475				 struct fs_quota_stat *fqs)
1476{
1477	struct gfs2_sbd *sdp = sb->s_fs_info;
1478
1479	memset(fqs, 0, sizeof(struct fs_quota_stat));
1480	fqs->qs_version = FS_QSTAT_VERSION;
1481
1482	switch (sdp->sd_args.ar_quota) {
1483	case GFS2_QUOTA_ON:
1484		fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
1485		/*FALLTHRU*/
1486	case GFS2_QUOTA_ACCOUNT:
1487		fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
1488		break;
1489	case GFS2_QUOTA_OFF:
1490		break;
1491	}
1492
1493	if (sdp->sd_quota_inode) {
1494		fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1495		fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1496	}
1497	fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1498	fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1499	fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
1500	return 0;
1501}
1502
1503static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1504			  struct fs_disk_quota *fdq)
1505{
1506	struct gfs2_sbd *sdp = sb->s_fs_info;
1507	struct gfs2_quota_lvb *qlvb;
1508	struct gfs2_quota_data *qd;
1509	struct gfs2_holder q_gh;
1510	int error;
1511
1512	memset(fdq, 0, sizeof(struct fs_disk_quota));
1513
1514	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1515		return -ESRCH; /* Crazy XFS error code */
1516
1517	if ((qid.type != USRQUOTA) &&
1518	    (qid.type != GRPQUOTA))
 
 
 
1519		return -EINVAL;
1520
1521	error = qd_get(sdp, qid, &qd);
1522	if (error)
1523		return error;
1524	error = do_glock(qd, FORCE, &q_gh);
1525	if (error)
1526		goto out;
1527
1528	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1529	fdq->d_version = FS_DQUOT_VERSION;
1530	fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1531	fdq->d_id = from_kqid_munged(current_user_ns(), qid);
1532	fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1533	fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1534	fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1535
1536	gfs2_glock_dq_uninit(&q_gh);
1537out:
1538	qd_put(qd);
1539	return error;
1540}
1541
1542/* GFS2 only supports a subset of the XFS fields */
1543#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1544
1545static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1546			  struct fs_disk_quota *fdq)
1547{
1548	struct gfs2_sbd *sdp = sb->s_fs_info;
1549	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1550	struct gfs2_quota_data *qd;
1551	struct gfs2_holder q_gh, i_gh;
1552	unsigned int data_blocks, ind_blocks;
1553	unsigned int blocks = 0;
1554	int alloc_required;
1555	loff_t offset;
1556	int error;
1557
1558	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1559		return -ESRCH; /* Crazy XFS error code */
1560
1561	if ((qid.type != USRQUOTA) &&
1562	    (qid.type != GRPQUOTA))
 
 
 
 
 
 
 
 
 
 
1563		return -EINVAL;
 
1564
1565	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1566		return -EINVAL;
 
 
1567
1568	error = qd_get(sdp, qid, &qd);
1569	if (error)
1570		return error;
1571
1572	error = gfs2_rs_alloc(ip);
1573	if (error)
1574		goto out_put;
1575
1576	mutex_lock(&ip->i_inode.i_mutex);
1577	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1578	if (error)
1579		goto out_unlockput;
1580	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1581	if (error)
1582		goto out_q;
1583
1584	/* Check for existing entry, if none then alloc new blocks */
1585	error = update_qd(sdp, qd);
1586	if (error)
1587		goto out_i;
1588
1589	/* If nothing has changed, this is a no-op */
1590	if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1591	    ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1592		fdq->d_fieldmask ^= FS_DQ_BSOFT;
1593
1594	if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1595	    ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1596		fdq->d_fieldmask ^= FS_DQ_BHARD;
1597
1598	if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1599	    ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1600		fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1601
1602	if (fdq->d_fieldmask == 0)
1603		goto out_i;
1604
1605	offset = qd2offset(qd);
1606	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1607	if (gfs2_is_stuffed(ip))
1608		alloc_required = 1;
1609	if (alloc_required) {
1610		struct gfs2_alloc_parms ap = { .aflags = 0, };
1611		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1612				       &data_blocks, &ind_blocks);
1613		blocks = 1 + data_blocks + ind_blocks;
1614		ap.target = blocks;
1615		error = gfs2_inplace_reserve(ip, &ap);
1616		if (error)
1617			goto out_i;
1618		blocks += gfs2_rg_blocks(ip, blocks);
1619	}
1620
1621	/* Some quotas span block boundaries and can update two blocks,
1622	   adding an extra block to the transaction to handle such quotas */
1623	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1624	if (error)
1625		goto out_release;
1626
1627	/* Apply changes */
1628	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1629
1630	gfs2_trans_end(sdp);
1631out_release:
1632	if (alloc_required)
1633		gfs2_inplace_release(ip);
1634out_i:
1635	gfs2_glock_dq_uninit(&i_gh);
1636out_q:
1637	gfs2_glock_dq_uninit(&q_gh);
1638out_unlockput:
1639	mutex_unlock(&ip->i_inode.i_mutex);
1640out_put:
 
1641	qd_put(qd);
1642	return error;
1643}
1644
1645const struct quotactl_ops gfs2_quotactl_ops = {
1646	.quota_sync     = gfs2_quota_sync,
1647	.get_xstate     = gfs2_quota_get_xstate,
1648	.get_dqblk	= gfs2_get_dqblk,
1649	.set_dqblk	= gfs2_set_dqblk,
1650};
1651
1652void __init gfs2_quota_hash_init(void)
1653{
1654	unsigned i;
1655
1656	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1657		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1658}
v3.5.6
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10/*
  11 * Quota change tags are associated with each transaction that allocates or
  12 * deallocates space.  Those changes are accumulated locally to each node (in a
  13 * per-node file) and then are periodically synced to the quota file.  This
  14 * avoids the bottleneck of constantly touching the quota file, but introduces
  15 * fuzziness in the current usage value of IDs that are being used on different
  16 * nodes in the cluster simultaneously.  So, it is possible for a user on
  17 * multiple nodes to overrun their quota, but that overrun is controlable.
  18 * Since quota tags are part of transactions, there is no need for a quota check
  19 * program to be run on node crashes or anything like that.
  20 *
  21 * There are couple of knobs that let the administrator manage the quota
  22 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  23 * sitting on one node before being synced to the quota file.  (The default is
  24 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  25 * of quota file syncs increases as the user moves closer to their limit.  The
  26 * more frequent the syncs, the more accurate the quota enforcement, but that
  27 * means that there is more contention between the nodes for the quota file.
  28 * The default value is one.  This sets the maximum theoretical quota overrun
  29 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  30 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  31 * number greater than one makes quota syncs more frequent and reduces the
  32 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  33 * syncs less frequent.
  34 *
  35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  36 * the quota file, so it is not being constantly read.
  37 */
  38
 
 
  39#include <linux/sched.h>
  40#include <linux/slab.h>
  41#include <linux/mm.h>
  42#include <linux/spinlock.h>
  43#include <linux/completion.h>
  44#include <linux/buffer_head.h>
  45#include <linux/sort.h>
  46#include <linux/fs.h>
  47#include <linux/bio.h>
  48#include <linux/gfs2_ondisk.h>
  49#include <linux/kthread.h>
  50#include <linux/freezer.h>
  51#include <linux/quota.h>
  52#include <linux/dqblk_xfs.h>
 
 
 
 
 
 
 
  53
  54#include "gfs2.h"
  55#include "incore.h"
  56#include "bmap.h"
  57#include "glock.h"
  58#include "glops.h"
  59#include "log.h"
  60#include "meta_io.h"
  61#include "quota.h"
  62#include "rgrp.h"
  63#include "super.h"
  64#include "trans.h"
  65#include "inode.h"
  66#include "util.h"
  67
  68#define QUOTA_USER 1
  69#define QUOTA_GROUP 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70
  71struct gfs2_quota_change_host {
  72	u64 qc_change;
  73	u32 qc_flags; /* GFS2_QCF_... */
  74	u32 qc_id;
  75};
  76
  77static LIST_HEAD(qd_lru_list);
  78static atomic_t qd_lru_count = ATOMIC_INIT(0);
  79static DEFINE_SPINLOCK(qd_lru_lock);
 
 
  80
  81int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
  82{
  83	struct gfs2_quota_data *qd;
  84	struct gfs2_sbd *sdp;
  85	int nr_to_scan = sc->nr_to_scan;
  86
  87	if (nr_to_scan == 0)
  88		goto out;
  89
  90	if (!(sc->gfp_mask & __GFP_FS))
  91		return -1;
  92
  93	spin_lock(&qd_lru_lock);
  94	while (nr_to_scan && !list_empty(&qd_lru_list)) {
  95		qd = list_entry(qd_lru_list.next,
  96				struct gfs2_quota_data, qd_reclaim);
  97		sdp = qd->qd_gl->gl_sbd;
  98
  99		/* Free from the filesystem-specific list */
 
 100		list_del(&qd->qd_list);
 
 
 
 
 
 101
 102		gfs2_assert_warn(sdp, !qd->qd_change);
 103		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 104		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 105
 106		gfs2_glock_put(qd->qd_gl);
 107		atomic_dec(&sdp->sd_quota_count);
 108
 109		/* Delete it from the common reclaim list */
 110		list_del_init(&qd->qd_reclaim);
 111		atomic_dec(&qd_lru_count);
 112		spin_unlock(&qd_lru_lock);
 113		kmem_cache_free(gfs2_quotad_cachep, qd);
 114		spin_lock(&qd_lru_lock);
 115		nr_to_scan--;
 
 
 
 
 
 
 
 
 
 
 116	}
 117	spin_unlock(&qd_lru_lock);
 118
 119out:
 120	return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 121}
 122
 123static u64 qd2offset(struct gfs2_quota_data *qd)
 124{
 125	u64 offset;
 126
 127	offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
 128	offset *= sizeof(struct gfs2_quota);
 129
 130	return offset;
 131}
 132
 133static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
 134		    struct gfs2_quota_data **qdp)
 135{
 136	struct gfs2_quota_data *qd;
 137	int error;
 138
 139	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 140	if (!qd)
 141		return -ENOMEM;
 142
 143	atomic_set(&qd->qd_count, 1);
 144	qd->qd_id = id;
 145	if (user)
 146		set_bit(QDF_USER, &qd->qd_flags);
 147	qd->qd_slot = -1;
 148	INIT_LIST_HEAD(&qd->qd_reclaim);
 
 149
 150	error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
 151			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 152	if (error)
 153		goto fail;
 154
 155	*qdp = qd;
 156
 157	return 0;
 158
 159fail:
 160	kmem_cache_free(gfs2_quotad_cachep, qd);
 161	return error;
 162}
 163
 164static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
 165		  struct gfs2_quota_data **qdp)
 
 166{
 167	struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
 168	int error, found;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 169
 170	*qdp = NULL;
 171
 172	for (;;) {
 173		found = 0;
 174		spin_lock(&qd_lru_lock);
 175		list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 176			if (qd->qd_id == id &&
 177			    !test_bit(QDF_USER, &qd->qd_flags) == !user) {
 178				if (!atomic_read(&qd->qd_count) &&
 179				    !list_empty(&qd->qd_reclaim)) {
 180					/* Remove it from reclaim list */
 181					list_del_init(&qd->qd_reclaim);
 182					atomic_dec(&qd_lru_count);
 183				}
 184				atomic_inc(&qd->qd_count);
 185				found = 1;
 186				break;
 187			}
 188		}
 189
 190		if (!found)
 191			qd = NULL;
 
 192
 193		if (!qd && new_qd) {
 194			qd = new_qd;
 195			list_add(&qd->qd_list, &sdp->sd_quota_list);
 196			atomic_inc(&sdp->sd_quota_count);
 197			new_qd = NULL;
 198		}
 199
 200		spin_unlock(&qd_lru_lock);
 
 
 201
 202		if (qd) {
 203			if (new_qd) {
 204				gfs2_glock_put(new_qd->qd_gl);
 205				kmem_cache_free(gfs2_quotad_cachep, new_qd);
 206			}
 207			*qdp = qd;
 208			return 0;
 209		}
 
 
 
 210
 211		error = qd_alloc(sdp, user, id, &new_qd);
 212		if (error)
 213			return error;
 214	}
 
 
 215}
 216
 
 217static void qd_hold(struct gfs2_quota_data *qd)
 218{
 219	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 220	gfs2_assert(sdp, atomic_read(&qd->qd_count));
 221	atomic_inc(&qd->qd_count);
 222}
 223
 224static void qd_put(struct gfs2_quota_data *qd)
 225{
 226	if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
 227		/* Add to the reclaim list */
 228		list_add_tail(&qd->qd_reclaim, &qd_lru_list);
 229		atomic_inc(&qd_lru_count);
 230		spin_unlock(&qd_lru_lock);
 231	}
 
 232}
 233
 234static int slot_get(struct gfs2_quota_data *qd)
 235{
 236	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 237	unsigned int c, o = 0, b;
 238	unsigned char byte = 0;
 239
 240	spin_lock(&qd_lru_lock);
 
 
 241
 242	if (qd->qd_slot_count++) {
 243		spin_unlock(&qd_lru_lock);
 244		return 0;
 
 
 
 
 
 245	}
 
 246
 247	for (c = 0; c < sdp->sd_quota_chunks; c++)
 248		for (o = 0; o < PAGE_SIZE; o++) {
 249			byte = sdp->sd_quota_bitmap[c][o];
 250			if (byte != 0xFF)
 251				goto found;
 252		}
 253
 254	goto fail;
 255
 256found:
 257	for (b = 0; b < 8; b++)
 258		if (!(byte & (1 << b)))
 259			break;
 260	qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
 261
 262	if (qd->qd_slot >= sdp->sd_quota_slots)
 263		goto fail;
 264
 265	sdp->sd_quota_bitmap[c][o] |= 1 << b;
 266
 267	spin_unlock(&qd_lru_lock);
 268
 269	return 0;
 270
 271fail:
 272	qd->qd_slot_count--;
 273	spin_unlock(&qd_lru_lock);
 274	return -ENOSPC;
 275}
 276
 277static void slot_hold(struct gfs2_quota_data *qd)
 278{
 279	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 280
 281	spin_lock(&qd_lru_lock);
 282	gfs2_assert(sdp, qd->qd_slot_count);
 283	qd->qd_slot_count++;
 284	spin_unlock(&qd_lru_lock);
 285}
 286
 287static void slot_put(struct gfs2_quota_data *qd)
 288{
 289	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 290
 291	spin_lock(&qd_lru_lock);
 292	gfs2_assert(sdp, qd->qd_slot_count);
 293	if (!--qd->qd_slot_count) {
 294		gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
 295		qd->qd_slot = -1;
 296	}
 297	spin_unlock(&qd_lru_lock);
 298}
 299
 300static int bh_get(struct gfs2_quota_data *qd)
 301{
 302	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 303	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 304	unsigned int block, offset;
 305	struct buffer_head *bh;
 306	int error;
 307	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 308
 309	mutex_lock(&sdp->sd_quota_mutex);
 310
 311	if (qd->qd_bh_count++) {
 312		mutex_unlock(&sdp->sd_quota_mutex);
 313		return 0;
 314	}
 315
 316	block = qd->qd_slot / sdp->sd_qc_per_block;
 317	offset = qd->qd_slot % sdp->sd_qc_per_block;
 318
 319	bh_map.b_size = 1 << ip->i_inode.i_blkbits;
 320	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
 321	if (error)
 322		goto fail;
 323	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
 324	if (error)
 325		goto fail;
 326	error = -EIO;
 327	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 328		goto fail_brelse;
 329
 330	qd->qd_bh = bh;
 331	qd->qd_bh_qc = (struct gfs2_quota_change *)
 332		(bh->b_data + sizeof(struct gfs2_meta_header) +
 333		 offset * sizeof(struct gfs2_quota_change));
 334
 335	mutex_unlock(&sdp->sd_quota_mutex);
 336
 337	return 0;
 338
 339fail_brelse:
 340	brelse(bh);
 341fail:
 342	qd->qd_bh_count--;
 343	mutex_unlock(&sdp->sd_quota_mutex);
 344	return error;
 345}
 346
 347static void bh_put(struct gfs2_quota_data *qd)
 348{
 349	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 350
 351	mutex_lock(&sdp->sd_quota_mutex);
 352	gfs2_assert(sdp, qd->qd_bh_count);
 353	if (!--qd->qd_bh_count) {
 354		brelse(qd->qd_bh);
 355		qd->qd_bh = NULL;
 356		qd->qd_bh_qc = NULL;
 357	}
 358	mutex_unlock(&sdp->sd_quota_mutex);
 359}
 360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 362{
 363	struct gfs2_quota_data *qd = NULL;
 364	int error;
 365	int found = 0;
 366
 367	*qdp = NULL;
 368
 369	if (sdp->sd_vfs->s_flags & MS_RDONLY)
 370		return 0;
 371
 372	spin_lock(&qd_lru_lock);
 373
 374	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 375		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 376		    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 377		    qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
 378			continue;
 379
 380		list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 381
 382		set_bit(QDF_LOCKED, &qd->qd_flags);
 383		gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
 384		atomic_inc(&qd->qd_count);
 385		qd->qd_change_sync = qd->qd_change;
 386		gfs2_assert_warn(sdp, qd->qd_slot_count);
 387		qd->qd_slot_count++;
 388		found = 1;
 389
 390		break;
 391	}
 392
 393	if (!found)
 394		qd = NULL;
 395
 396	spin_unlock(&qd_lru_lock);
 397
 398	if (qd) {
 399		gfs2_assert_warn(sdp, qd->qd_change_sync);
 400		error = bh_get(qd);
 401		if (error) {
 402			clear_bit(QDF_LOCKED, &qd->qd_flags);
 403			slot_put(qd);
 404			qd_put(qd);
 405			return error;
 406		}
 407	}
 408
 409	*qdp = qd;
 410
 411	return 0;
 412}
 413
 414static int qd_trylock(struct gfs2_quota_data *qd)
 415{
 416	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 417
 418	if (sdp->sd_vfs->s_flags & MS_RDONLY)
 419		return 0;
 420
 421	spin_lock(&qd_lru_lock);
 422
 423	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 424	    !test_bit(QDF_CHANGE, &qd->qd_flags)) {
 425		spin_unlock(&qd_lru_lock);
 426		return 0;
 427	}
 428
 429	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 430
 431	set_bit(QDF_LOCKED, &qd->qd_flags);
 432	gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
 433	atomic_inc(&qd->qd_count);
 434	qd->qd_change_sync = qd->qd_change;
 435	gfs2_assert_warn(sdp, qd->qd_slot_count);
 436	qd->qd_slot_count++;
 437
 438	spin_unlock(&qd_lru_lock);
 439
 440	gfs2_assert_warn(sdp, qd->qd_change_sync);
 441	if (bh_get(qd)) {
 442		clear_bit(QDF_LOCKED, &qd->qd_flags);
 443		slot_put(qd);
 444		qd_put(qd);
 445		return 0;
 446	}
 447
 448	return 1;
 449}
 450
 451static void qd_unlock(struct gfs2_quota_data *qd)
 452{
 453	gfs2_assert_warn(qd->qd_gl->gl_sbd,
 454			 test_bit(QDF_LOCKED, &qd->qd_flags));
 455	clear_bit(QDF_LOCKED, &qd->qd_flags);
 456	bh_put(qd);
 457	slot_put(qd);
 458	qd_put(qd);
 459}
 460
 461static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
 462		    struct gfs2_quota_data **qdp)
 463{
 464	int error;
 465
 466	error = qd_get(sdp, user, id, qdp);
 467	if (error)
 468		return error;
 469
 470	error = slot_get(*qdp);
 471	if (error)
 472		goto fail;
 473
 474	error = bh_get(*qdp);
 475	if (error)
 476		goto fail_slot;
 477
 478	return 0;
 479
 480fail_slot:
 481	slot_put(*qdp);
 482fail:
 483	qd_put(*qdp);
 484	return error;
 485}
 486
 487static void qdsb_put(struct gfs2_quota_data *qd)
 488{
 489	bh_put(qd);
 490	slot_put(qd);
 491	qd_put(qd);
 492}
 493
 494int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
 495{
 496	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 497	struct gfs2_qadata *qa = ip->i_qadata;
 498	struct gfs2_quota_data **qd = qa->qa_qd;
 499	int error;
 500
 501	if (gfs2_assert_warn(sdp, !qa->qa_qd_num) ||
 
 
 
 
 
 
 
 
 502	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
 503		return -EIO;
 504
 505	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 506		return 0;
 507
 508	error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
 509	if (error)
 510		goto out;
 511	qa->qa_qd_num++;
 512	qd++;
 513
 514	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
 515	if (error)
 516		goto out;
 517	qa->qa_qd_num++;
 518	qd++;
 519
 520	if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
 521		error = qdsb_get(sdp, QUOTA_USER, uid, qd);
 
 522		if (error)
 523			goto out;
 524		qa->qa_qd_num++;
 525		qd++;
 526	}
 527
 528	if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
 529		error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
 
 530		if (error)
 531			goto out;
 532		qa->qa_qd_num++;
 533		qd++;
 534	}
 535
 536out:
 537	if (error)
 538		gfs2_quota_unhold(ip);
 539	return error;
 540}
 541
 542void gfs2_quota_unhold(struct gfs2_inode *ip)
 543{
 544	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 545	struct gfs2_qadata *qa = ip->i_qadata;
 546	unsigned int x;
 547
 
 
 548	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 549
 550	for (x = 0; x < qa->qa_qd_num; x++) {
 551		qdsb_put(qa->qa_qd[x]);
 552		qa->qa_qd[x] = NULL;
 553	}
 554	qa->qa_qd_num = 0;
 555}
 556
 557static int sort_qd(const void *a, const void *b)
 558{
 559	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 560	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 561
 562	if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
 563	    !test_bit(QDF_USER, &qd_b->qd_flags)) {
 564		if (test_bit(QDF_USER, &qd_a->qd_flags))
 565			return -1;
 566		else
 567			return 1;
 568	}
 569	if (qd_a->qd_id < qd_b->qd_id)
 570		return -1;
 571	if (qd_a->qd_id > qd_b->qd_id)
 572		return 1;
 573
 574	return 0;
 575}
 576
 577static void do_qc(struct gfs2_quota_data *qd, s64 change)
 578{
 579	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 580	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 581	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 582	s64 x;
 583
 584	mutex_lock(&sdp->sd_quota_mutex);
 585	gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
 586
 587	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 588		qc->qc_change = 0;
 589		qc->qc_flags = 0;
 590		if (test_bit(QDF_USER, &qd->qd_flags))
 591			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 592		qc->qc_id = cpu_to_be32(qd->qd_id);
 593	}
 594
 595	x = be64_to_cpu(qc->qc_change) + change;
 596	qc->qc_change = cpu_to_be64(x);
 597
 598	spin_lock(&qd_lru_lock);
 599	qd->qd_change = x;
 600	spin_unlock(&qd_lru_lock);
 601
 602	if (!x) {
 603		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 604		clear_bit(QDF_CHANGE, &qd->qd_flags);
 605		qc->qc_flags = 0;
 606		qc->qc_id = 0;
 607		slot_put(qd);
 608		qd_put(qd);
 609	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 610		qd_hold(qd);
 611		slot_hold(qd);
 612	}
 613
 614	mutex_unlock(&sdp->sd_quota_mutex);
 615}
 616
 617/**
 618 * gfs2_adjust_quota - adjust record of current block usage
 619 * @ip: The quota inode
 620 * @loc: Offset of the entry in the quota file
 621 * @change: The amount of usage change to record
 622 * @qd: The quota data
 623 * @fdq: The updated limits to record
 624 *
 625 * This function was mostly borrowed from gfs2_block_truncate_page which was
 626 * in turn mostly borrowed from ext3
 627 *
 628 * Returns: 0 or -ve on error
 629 */
 630
 631static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 632			     s64 change, struct gfs2_quota_data *qd,
 633			     struct fs_disk_quota *fdq)
 634{
 635	struct inode *inode = &ip->i_inode;
 636	struct gfs2_sbd *sdp = GFS2_SB(inode);
 637	struct address_space *mapping = inode->i_mapping;
 638	unsigned long index = loc >> PAGE_CACHE_SHIFT;
 639	unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
 640	unsigned blocksize, iblock, pos;
 641	struct buffer_head *bh;
 642	struct page *page;
 643	void *kaddr, *ptr;
 644	struct gfs2_quota q, *qp;
 645	int err, nbytes;
 646	u64 size;
 647
 648	if (gfs2_is_stuffed(ip)) {
 649		err = gfs2_unstuff_dinode(ip, NULL);
 650		if (err)
 651			return err;
 652	}
 653
 654	memset(&q, 0, sizeof(struct gfs2_quota));
 655	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 656	if (err < 0)
 657		return err;
 658
 659	err = -EIO;
 660	qp = &q;
 661	qp->qu_value = be64_to_cpu(qp->qu_value);
 662	qp->qu_value += change;
 663	qp->qu_value = cpu_to_be64(qp->qu_value);
 664	qd->qd_qb.qb_value = qp->qu_value;
 665	if (fdq) {
 666		if (fdq->d_fieldmask & FS_DQ_BSOFT) {
 667			qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
 668			qd->qd_qb.qb_warn = qp->qu_warn;
 669		}
 670		if (fdq->d_fieldmask & FS_DQ_BHARD) {
 671			qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
 672			qd->qd_qb.qb_limit = qp->qu_limit;
 673		}
 674		if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
 675			qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
 676			qd->qd_qb.qb_value = qp->qu_value;
 677		}
 678	}
 679
 680	/* Write the quota into the quota file on disk */
 681	ptr = qp;
 682	nbytes = sizeof(struct gfs2_quota);
 683get_a_page:
 684	page = find_or_create_page(mapping, index, GFP_NOFS);
 685	if (!page)
 686		return -ENOMEM;
 687
 688	blocksize = inode->i_sb->s_blocksize;
 689	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 690
 691	if (!page_has_buffers(page))
 692		create_empty_buffers(page, blocksize, 0);
 693
 694	bh = page_buffers(page);
 695	pos = blocksize;
 696	while (offset >= pos) {
 697		bh = bh->b_this_page;
 698		iblock++;
 699		pos += blocksize;
 700	}
 701
 702	if (!buffer_mapped(bh)) {
 703		gfs2_block_map(inode, iblock, bh, 1);
 704		if (!buffer_mapped(bh))
 705			goto unlock_out;
 706		/* If it's a newly allocated disk block for quota, zero it */
 707		if (buffer_new(bh))
 708			zero_user(page, pos - blocksize, bh->b_size);
 709	}
 710
 711	if (PageUptodate(page))
 712		set_buffer_uptodate(bh);
 713
 714	if (!buffer_uptodate(bh)) {
 715		ll_rw_block(READ | REQ_META, 1, &bh);
 716		wait_on_buffer(bh);
 717		if (!buffer_uptodate(bh))
 718			goto unlock_out;
 719	}
 720
 721	gfs2_trans_add_bh(ip->i_gl, bh, 0);
 722
 723	kaddr = kmap_atomic(page);
 724	if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
 725		nbytes = PAGE_CACHE_SIZE - offset;
 726	memcpy(kaddr + offset, ptr, nbytes);
 727	flush_dcache_page(page);
 728	kunmap_atomic(kaddr);
 729	unlock_page(page);
 730	page_cache_release(page);
 731
 732	/* If quota straddles page boundary, we need to update the rest of the
 733	 * quota at the beginning of the next page */
 734	if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
 735		ptr = ptr + nbytes;
 736		nbytes = sizeof(struct gfs2_quota) - nbytes;
 737		offset = 0;
 738		index++;
 739		goto get_a_page;
 740	}
 741
 742	size = loc + sizeof(struct gfs2_quota);
 743	if (size > inode->i_size)
 744		i_size_write(inode, size);
 745	inode->i_mtime = inode->i_atime = CURRENT_TIME;
 746	mark_inode_dirty(inode);
 747	return 0;
 748
 749unlock_out:
 750	unlock_page(page);
 751	page_cache_release(page);
 752	return err;
 753}
 754
 755static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 756{
 757	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
 758	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 
 759	unsigned int data_blocks, ind_blocks;
 760	struct gfs2_holder *ghs, i_gh;
 761	unsigned int qx, x;
 762	struct gfs2_quota_data *qd;
 
 763	loff_t offset;
 764	unsigned int nalloc = 0, blocks;
 765	int error;
 766
 
 
 
 
 767	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 768			      &data_blocks, &ind_blocks);
 769
 770	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 771	if (!ghs)
 772		return -ENOMEM;
 773
 774	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 775	mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
 776	for (qx = 0; qx < num_qd; qx++) {
 777		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 778					   GL_NOCACHE, &ghs[qx]);
 779		if (error)
 780			goto out;
 781	}
 782
 783	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 784	if (error)
 785		goto out;
 786
 787	for (x = 0; x < num_qd; x++) {
 788		offset = qd2offset(qda[x]);
 789		if (gfs2_write_alloc_required(ip, offset,
 790					      sizeof(struct gfs2_quota)))
 791			nalloc++;
 792	}
 793
 794	/* 
 795	 * 1 blk for unstuffing inode if stuffed. We add this extra
 796	 * block to the reservation unconditionally. If the inode
 797	 * doesn't need unstuffing, the block will be released to the 
 798	 * rgrp since it won't be allocated during the transaction
 799	 */
 800	/* +3 in the end for unstuffing block, inode size update block
 801	 * and another block in case quota straddles page boundary and 
 802	 * two blocks need to be updated instead of 1 */
 803	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 804
 805	error = gfs2_inplace_reserve(ip, 1 +
 806				     (nalloc * (data_blocks + ind_blocks)));
 
 807	if (error)
 808		goto out_alloc;
 809
 810	if (nalloc)
 811		blocks += gfs2_rg_blocks(ip) + nalloc * ind_blocks + RES_STATFS;
 812
 813	error = gfs2_trans_begin(sdp, blocks, 0);
 814	if (error)
 815		goto out_ipres;
 816
 817	for (x = 0; x < num_qd; x++) {
 818		qd = qda[x];
 819		offset = qd2offset(qd);
 820		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 821		if (error)
 822			goto out_end_trans;
 823
 824		do_qc(qd, -qd->qd_change_sync);
 825		set_bit(QDF_REFRESH, &qd->qd_flags);
 826	}
 827
 828	error = 0;
 829
 830out_end_trans:
 831	gfs2_trans_end(sdp);
 832out_ipres:
 833	gfs2_inplace_release(ip);
 834out_alloc:
 835	gfs2_glock_dq_uninit(&i_gh);
 836out:
 837	while (qx--)
 838		gfs2_glock_dq_uninit(&ghs[qx]);
 839	mutex_unlock(&ip->i_inode.i_mutex);
 840	kfree(ghs);
 841	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
 842	return error;
 843}
 844
 845static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 846{
 847	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 848	struct gfs2_quota q;
 849	struct gfs2_quota_lvb *qlvb;
 850	loff_t pos;
 851	int error;
 852
 853	memset(&q, 0, sizeof(struct gfs2_quota));
 854	pos = qd2offset(qd);
 855	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 856	if (error < 0)
 857		return error;
 858
 859	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
 860	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 861	qlvb->__pad = 0;
 862	qlvb->qb_limit = q.qu_limit;
 863	qlvb->qb_warn = q.qu_warn;
 864	qlvb->qb_value = q.qu_value;
 865	qd->qd_qb = *qlvb;
 866
 867	return 0;
 868}
 869
 870static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
 871		    struct gfs2_holder *q_gh)
 872{
 873	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 874	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 875	struct gfs2_holder i_gh;
 876	int error;
 877
 878restart:
 879	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
 880	if (error)
 881		return error;
 882
 883	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
 884
 885	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
 886		gfs2_glock_dq_uninit(q_gh);
 887		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
 888					   GL_NOCACHE, q_gh);
 889		if (error)
 890			return error;
 891
 892		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
 893		if (error)
 894			goto fail;
 895
 896		error = update_qd(sdp, qd);
 897		if (error)
 898			goto fail_gunlock;
 899
 900		gfs2_glock_dq_uninit(&i_gh);
 901		gfs2_glock_dq_uninit(q_gh);
 902		force_refresh = 0;
 903		goto restart;
 904	}
 905
 906	return 0;
 907
 908fail_gunlock:
 909	gfs2_glock_dq_uninit(&i_gh);
 910fail:
 911	gfs2_glock_dq_uninit(q_gh);
 912	return error;
 913}
 914
 915int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
 916{
 917	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 918	struct gfs2_qadata *qa = ip->i_qadata;
 919	struct gfs2_quota_data *qd;
 920	unsigned int x;
 921	int error = 0;
 922
 923	error = gfs2_quota_hold(ip, uid, gid);
 924	if (error)
 925		return error;
 926
 927	if (capable(CAP_SYS_RESOURCE) ||
 928	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
 929		return 0;
 930
 931	sort(qa->qa_qd, qa->qa_qd_num, sizeof(struct gfs2_quota_data *),
 932	     sort_qd, NULL);
 933
 934	for (x = 0; x < qa->qa_qd_num; x++) {
 935		int force = NO_FORCE;
 936		qd = qa->qa_qd[x];
 937		if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
 938			force = FORCE;
 939		error = do_glock(qd, force, &qa->qa_qd_ghs[x]);
 940		if (error)
 941			break;
 942	}
 943
 944	if (!error)
 945		set_bit(GIF_QD_LOCKED, &ip->i_flags);
 946	else {
 947		while (x--)
 948			gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
 949		gfs2_quota_unhold(ip);
 950	}
 951
 952	return error;
 953}
 954
 955static int need_sync(struct gfs2_quota_data *qd)
 956{
 957	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 958	struct gfs2_tune *gt = &sdp->sd_tune;
 959	s64 value;
 960	unsigned int num, den;
 961	int do_sync = 1;
 962
 963	if (!qd->qd_qb.qb_limit)
 964		return 0;
 965
 966	spin_lock(&qd_lru_lock);
 967	value = qd->qd_change;
 968	spin_unlock(&qd_lru_lock);
 969
 970	spin_lock(&gt->gt_spin);
 971	num = gt->gt_quota_scale_num;
 972	den = gt->gt_quota_scale_den;
 973	spin_unlock(&gt->gt_spin);
 974
 975	if (value < 0)
 976		do_sync = 0;
 977	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
 978		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
 979		do_sync = 0;
 980	else {
 981		value *= gfs2_jindex_size(sdp) * num;
 982		value = div_s64(value, den);
 983		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
 984		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
 985			do_sync = 0;
 986	}
 987
 988	return do_sync;
 989}
 990
 991void gfs2_quota_unlock(struct gfs2_inode *ip)
 992{
 993	struct gfs2_qadata *qa = ip->i_qadata;
 994	struct gfs2_quota_data *qda[4];
 995	unsigned int count = 0;
 996	unsigned int x;
 
 997
 998	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
 999		goto out;
1000
1001	for (x = 0; x < qa->qa_qd_num; x++) {
1002		struct gfs2_quota_data *qd;
1003		int sync;
1004
1005		qd = qa->qa_qd[x];
1006		sync = need_sync(qd);
1007
1008		gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009
1010		if (sync && qd_trylock(qd))
1011			qda[count++] = qd;
1012	}
1013
1014	if (count) {
1015		do_sync(count, qda);
1016		for (x = 0; x < count; x++)
1017			qd_unlock(qda[x]);
1018	}
1019
1020out:
1021	gfs2_quota_unhold(ip);
1022}
1023
1024#define MAX_LINE 256
1025
1026static int print_message(struct gfs2_quota_data *qd, char *type)
1027{
1028	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
1029
1030	printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
1031	       sdp->sd_fsname, type,
1032	       (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1033	       qd->qd_id);
1034
1035	return 0;
1036}
1037
1038int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1039{
1040	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1041	struct gfs2_qadata *qa = ip->i_qadata;
1042	struct gfs2_quota_data *qd;
1043	s64 value;
1044	unsigned int x;
1045	int error = 0;
1046
1047	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1048		return 0;
1049
1050        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1051                return 0;
1052
1053	for (x = 0; x < qa->qa_qd_num; x++) {
1054		qd = qa->qa_qd[x];
1055
1056		if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1057		      (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
1058			continue;
1059
1060		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1061		spin_lock(&qd_lru_lock);
1062		value += qd->qd_change;
1063		spin_unlock(&qd_lru_lock);
1064
1065		if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1066			print_message(qd, "exceeded");
1067			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1068					   USRQUOTA : GRPQUOTA, qd->qd_id,
1069					   sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1070
1071			error = -EDQUOT;
1072			break;
1073		} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1074			   (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
1075			   time_after_eq(jiffies, qd->qd_last_warn +
1076					 gfs2_tune_get(sdp,
1077						gt_quota_warn_period) * HZ)) {
1078			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1079					   USRQUOTA : GRPQUOTA, qd->qd_id,
1080					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1081			error = print_message(qd, "warning");
1082			qd->qd_last_warn = jiffies;
1083		}
1084	}
1085
1086	return error;
1087}
1088
1089void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1090		       u32 uid, u32 gid)
1091{
1092	struct gfs2_qadata *qa = ip->i_qadata;
1093	struct gfs2_quota_data *qd;
1094	unsigned int x;
1095
1096	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1097		return;
1098	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1099		return;
1100
1101	for (x = 0; x < qa->qa_qd_num; x++) {
1102		qd = qa->qa_qd[x];
1103
1104		if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1105		    (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1106			do_qc(qd, change);
1107		}
1108	}
1109}
1110
1111int gfs2_quota_sync(struct super_block *sb, int type, int wait)
1112{
1113	struct gfs2_sbd *sdp = sb->s_fs_info;
1114	struct gfs2_quota_data **qda;
1115	unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1116	unsigned int num_qd;
1117	unsigned int x;
1118	int error = 0;
1119
1120	sdp->sd_quota_sync_gen++;
1121
1122	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1123	if (!qda)
1124		return -ENOMEM;
1125
 
 
 
1126	do {
1127		num_qd = 0;
1128
1129		for (;;) {
1130			error = qd_fish(sdp, qda + num_qd);
1131			if (error || !qda[num_qd])
1132				break;
1133			if (++num_qd == max_qd)
1134				break;
1135		}
1136
1137		if (num_qd) {
1138			if (!error)
1139				error = do_sync(num_qd, qda);
1140			if (!error)
1141				for (x = 0; x < num_qd; x++)
1142					qda[x]->qd_sync_gen =
1143						sdp->sd_quota_sync_gen;
1144
1145			for (x = 0; x < num_qd; x++)
1146				qd_unlock(qda[x]);
1147		}
1148	} while (!error && num_qd == max_qd);
1149
 
1150	kfree(qda);
1151
1152	return error;
1153}
1154
1155static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
1156{
1157	return gfs2_quota_sync(sb, type, 0);
1158}
1159
1160int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1161{
1162	struct gfs2_quota_data *qd;
1163	struct gfs2_holder q_gh;
1164	int error;
1165
1166	error = qd_get(sdp, user, id, &qd);
1167	if (error)
1168		return error;
1169
1170	error = do_glock(qd, FORCE, &q_gh);
1171	if (!error)
1172		gfs2_glock_dq_uninit(&q_gh);
1173
1174	qd_put(qd);
1175	return error;
1176}
1177
1178static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1179{
1180	const struct gfs2_quota_change *str = buf;
1181
1182	qc->qc_change = be64_to_cpu(str->qc_change);
1183	qc->qc_flags = be32_to_cpu(str->qc_flags);
1184	qc->qc_id = be32_to_cpu(str->qc_id);
1185}
1186
1187int gfs2_quota_init(struct gfs2_sbd *sdp)
1188{
1189	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1190	u64 size = i_size_read(sdp->sd_qc_inode);
1191	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1192	unsigned int x, slot = 0;
1193	unsigned int found = 0;
 
 
1194	u64 dblock;
1195	u32 extlen = 0;
1196	int error;
1197
1198	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1199		return -EIO;
1200
1201	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1202	sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1203
1204	error = -ENOMEM;
1205
1206	sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1207				       sizeof(unsigned char *), GFP_NOFS);
 
1208	if (!sdp->sd_quota_bitmap)
1209		return error;
1210
1211	for (x = 0; x < sdp->sd_quota_chunks; x++) {
1212		sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1213		if (!sdp->sd_quota_bitmap[x])
1214			goto fail;
1215	}
1216
1217	for (x = 0; x < blocks; x++) {
1218		struct buffer_head *bh;
 
1219		unsigned int y;
1220
1221		if (!extlen) {
1222			int new = 0;
1223			error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1224			if (error)
1225				goto fail;
1226		}
1227		error = -EIO;
1228		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1229		if (!bh)
1230			goto fail;
1231		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1232			brelse(bh);
1233			goto fail;
1234		}
1235
 
1236		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1237		     y++, slot++) {
1238			struct gfs2_quota_change_host qc;
1239			struct gfs2_quota_data *qd;
1240
1241			gfs2_quota_change_in(&qc, bh->b_data +
1242					  sizeof(struct gfs2_meta_header) +
1243					  y * sizeof(struct gfs2_quota_change));
1244			if (!qc.qc_change)
 
 
 
1245				continue;
1246
1247			error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1248					 qc.qc_id, &qd);
1249			if (error) {
1250				brelse(bh);
1251				goto fail;
1252			}
1253
1254			set_bit(QDF_CHANGE, &qd->qd_flags);
1255			qd->qd_change = qc.qc_change;
1256			qd->qd_slot = slot;
1257			qd->qd_slot_count = 1;
1258
1259			spin_lock(&qd_lru_lock);
1260			gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1261			list_add(&qd->qd_list, &sdp->sd_quota_list);
1262			atomic_inc(&sdp->sd_quota_count);
1263			spin_unlock(&qd_lru_lock);
 
 
 
 
1264
1265			found++;
1266		}
1267
1268		brelse(bh);
1269		dblock++;
1270		extlen--;
1271	}
1272
1273	if (found)
1274		fs_info(sdp, "found %u quota changes\n", found);
1275
1276	return 0;
1277
1278fail:
1279	gfs2_quota_cleanup(sdp);
1280	return error;
1281}
1282
1283void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1284{
1285	struct list_head *head = &sdp->sd_quota_list;
1286	struct gfs2_quota_data *qd;
1287	unsigned int x;
1288
1289	spin_lock(&qd_lru_lock);
1290	while (!list_empty(head)) {
1291		qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1292
1293		if (atomic_read(&qd->qd_count) > 1 ||
1294		    (atomic_read(&qd->qd_count) &&
1295		     !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1296			list_move(&qd->qd_list, head);
1297			spin_unlock(&qd_lru_lock);
1298			schedule();
1299			spin_lock(&qd_lru_lock);
1300			continue;
1301		}
1302
1303		list_del(&qd->qd_list);
1304		/* Also remove if this qd exists in the reclaim list */
1305		if (!list_empty(&qd->qd_reclaim)) {
1306			list_del_init(&qd->qd_reclaim);
1307			atomic_dec(&qd_lru_count);
1308		}
1309		atomic_dec(&sdp->sd_quota_count);
1310		spin_unlock(&qd_lru_lock);
 
 
 
 
1311
1312		if (!atomic_read(&qd->qd_count)) {
1313			gfs2_assert_warn(sdp, !qd->qd_change);
1314			gfs2_assert_warn(sdp, !qd->qd_slot_count);
1315		} else
1316			gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1317		gfs2_assert_warn(sdp, !qd->qd_bh_count);
1318
1319		gfs2_glock_put(qd->qd_gl);
1320		kmem_cache_free(gfs2_quotad_cachep, qd);
1321
1322		spin_lock(&qd_lru_lock);
1323	}
1324	spin_unlock(&qd_lru_lock);
1325
1326	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1327
1328	if (sdp->sd_quota_bitmap) {
1329		for (x = 0; x < sdp->sd_quota_chunks; x++)
1330			kfree(sdp->sd_quota_bitmap[x]);
1331		kfree(sdp->sd_quota_bitmap);
 
 
1332	}
1333}
1334
1335static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1336{
1337	if (error == 0 || error == -EROFS)
1338		return;
1339	if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1340		fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1341}
1342
1343static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1344			       int (*fxn)(struct super_block *sb, int type),
1345			       unsigned long t, unsigned long *timeo,
1346			       unsigned int *new_timeo)
1347{
1348	if (t >= *timeo) {
1349		int error = fxn(sdp->sd_vfs, 0);
1350		quotad_error(sdp, msg, error);
1351		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1352	} else {
1353		*timeo -= t;
1354	}
1355}
1356
1357static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1358{
1359	struct gfs2_inode *ip;
1360
1361	while(1) {
1362		ip = NULL;
1363		spin_lock(&sdp->sd_trunc_lock);
1364		if (!list_empty(&sdp->sd_trunc_list)) {
1365			ip = list_entry(sdp->sd_trunc_list.next,
1366					struct gfs2_inode, i_trunc_list);
1367			list_del_init(&ip->i_trunc_list);
1368		}
1369		spin_unlock(&sdp->sd_trunc_lock);
1370		if (ip == NULL)
1371			return;
1372		gfs2_glock_finish_truncate(ip);
1373	}
1374}
1375
1376void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1377	if (!sdp->sd_statfs_force_sync) {
1378		sdp->sd_statfs_force_sync = 1;
1379		wake_up(&sdp->sd_quota_wait);
1380	}
1381}
1382
1383
1384/**
1385 * gfs2_quotad - Write cached quota changes into the quota file
1386 * @sdp: Pointer to GFS2 superblock
1387 *
1388 */
1389
1390int gfs2_quotad(void *data)
1391{
1392	struct gfs2_sbd *sdp = data;
1393	struct gfs2_tune *tune = &sdp->sd_tune;
1394	unsigned long statfs_timeo = 0;
1395	unsigned long quotad_timeo = 0;
1396	unsigned long t = 0;
1397	DEFINE_WAIT(wait);
1398	int empty;
1399
1400	while (!kthread_should_stop()) {
1401
1402		/* Update the master statfs file */
1403		if (sdp->sd_statfs_force_sync) {
1404			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1405			quotad_error(sdp, "statfs", error);
1406			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1407		}
1408		else
1409			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1410				   	   &statfs_timeo,
1411					   &tune->gt_statfs_quantum);
1412
1413		/* Update quota file */
1414		quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
1415				   &quotad_timeo, &tune->gt_quota_quantum);
1416
1417		/* Check for & recover partially truncated inodes */
1418		quotad_check_trunc_list(sdp);
1419
1420		try_to_freeze();
1421
1422		t = min(quotad_timeo, statfs_timeo);
1423
1424		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1425		spin_lock(&sdp->sd_trunc_lock);
1426		empty = list_empty(&sdp->sd_trunc_list);
1427		spin_unlock(&sdp->sd_trunc_lock);
1428		if (empty && !sdp->sd_statfs_force_sync)
1429			t -= schedule_timeout(t);
1430		else
1431			t = 0;
1432		finish_wait(&sdp->sd_quota_wait, &wait);
1433	}
1434
1435	return 0;
1436}
1437
1438static int gfs2_quota_get_xstate(struct super_block *sb,
1439				 struct fs_quota_stat *fqs)
1440{
1441	struct gfs2_sbd *sdp = sb->s_fs_info;
1442
1443	memset(fqs, 0, sizeof(struct fs_quota_stat));
1444	fqs->qs_version = FS_QSTAT_VERSION;
1445
1446	switch (sdp->sd_args.ar_quota) {
1447	case GFS2_QUOTA_ON:
1448		fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
1449		/*FALLTHRU*/
1450	case GFS2_QUOTA_ACCOUNT:
1451		fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
1452		break;
1453	case GFS2_QUOTA_OFF:
1454		break;
1455	}
1456
1457	if (sdp->sd_quota_inode) {
1458		fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1459		fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1460	}
1461	fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1462	fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1463	fqs->qs_incoredqs = atomic_read(&qd_lru_count);
1464	return 0;
1465}
1466
1467static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1468			  struct fs_disk_quota *fdq)
1469{
1470	struct gfs2_sbd *sdp = sb->s_fs_info;
1471	struct gfs2_quota_lvb *qlvb;
1472	struct gfs2_quota_data *qd;
1473	struct gfs2_holder q_gh;
1474	int error;
1475
1476	memset(fdq, 0, sizeof(struct fs_disk_quota));
1477
1478	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1479		return -ESRCH; /* Crazy XFS error code */
1480
1481	if (type == USRQUOTA)
1482		type = QUOTA_USER;
1483	else if (type == GRPQUOTA)
1484		type = QUOTA_GROUP;
1485	else
1486		return -EINVAL;
1487
1488	error = qd_get(sdp, type, id, &qd);
1489	if (error)
1490		return error;
1491	error = do_glock(qd, FORCE, &q_gh);
1492	if (error)
1493		goto out;
1494
1495	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1496	fdq->d_version = FS_DQUOT_VERSION;
1497	fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1498	fdq->d_id = id;
1499	fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1500	fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1501	fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1502
1503	gfs2_glock_dq_uninit(&q_gh);
1504out:
1505	qd_put(qd);
1506	return error;
1507}
1508
1509/* GFS2 only supports a subset of the XFS fields */
1510#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1511
1512static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1513			  struct fs_disk_quota *fdq)
1514{
1515	struct gfs2_sbd *sdp = sb->s_fs_info;
1516	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1517	struct gfs2_quota_data *qd;
1518	struct gfs2_holder q_gh, i_gh;
1519	unsigned int data_blocks, ind_blocks;
1520	unsigned int blocks = 0;
1521	int alloc_required;
1522	loff_t offset;
1523	int error;
1524
1525	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1526		return -ESRCH; /* Crazy XFS error code */
1527
1528	switch(type) {
1529	case USRQUOTA:
1530		type = QUOTA_USER;
1531		if (fdq->d_flags != FS_USER_QUOTA)
1532			return -EINVAL;
1533		break;
1534	case GRPQUOTA:
1535		type = QUOTA_GROUP;
1536		if (fdq->d_flags != FS_GROUP_QUOTA)
1537			return -EINVAL;
1538		break;
1539	default:
1540		return -EINVAL;
1541	}
1542
1543	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1544		return -EINVAL;
1545	if (fdq->d_id != id)
1546		return -EINVAL;
1547
1548	error = qd_get(sdp, type, id, &qd);
1549	if (error)
1550		return error;
1551
 
 
 
 
1552	mutex_lock(&ip->i_inode.i_mutex);
1553	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1554	if (error)
1555		goto out_put;
1556	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1557	if (error)
1558		goto out_q;
1559
1560	/* Check for existing entry, if none then alloc new blocks */
1561	error = update_qd(sdp, qd);
1562	if (error)
1563		goto out_i;
1564
1565	/* If nothing has changed, this is a no-op */
1566	if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1567	    ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1568		fdq->d_fieldmask ^= FS_DQ_BSOFT;
1569
1570	if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1571	    ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1572		fdq->d_fieldmask ^= FS_DQ_BHARD;
1573
1574	if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1575	    ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1576		fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1577
1578	if (fdq->d_fieldmask == 0)
1579		goto out_i;
1580
1581	offset = qd2offset(qd);
1582	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1583	if (gfs2_is_stuffed(ip))
1584		alloc_required = 1;
1585	if (alloc_required) {
 
1586		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1587				       &data_blocks, &ind_blocks);
1588		blocks = 1 + data_blocks + ind_blocks;
1589		error = gfs2_inplace_reserve(ip, blocks);
 
1590		if (error)
1591			goto out_i;
1592		blocks += gfs2_rg_blocks(ip);
1593	}
1594
1595	/* Some quotas span block boundaries and can update two blocks,
1596	   adding an extra block to the transaction to handle such quotas */
1597	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1598	if (error)
1599		goto out_release;
1600
1601	/* Apply changes */
1602	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1603
1604	gfs2_trans_end(sdp);
1605out_release:
1606	if (alloc_required)
1607		gfs2_inplace_release(ip);
1608out_i:
1609	gfs2_glock_dq_uninit(&i_gh);
1610out_q:
1611	gfs2_glock_dq_uninit(&q_gh);
 
 
1612out_put:
1613	mutex_unlock(&ip->i_inode.i_mutex);
1614	qd_put(qd);
1615	return error;
1616}
1617
1618const struct quotactl_ops gfs2_quotactl_ops = {
1619	.quota_sync     = gfs2_quota_sync,
1620	.get_xstate     = gfs2_quota_get_xstate,
1621	.get_dqblk	= gfs2_get_dqblk,
1622	.set_dqblk	= gfs2_set_dqblk,
1623};