Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7/*
   8 * Quota change tags are associated with each transaction that allocates or
   9 * deallocates space.  Those changes are accumulated locally to each node (in a
  10 * per-node file) and then are periodically synced to the quota file.  This
  11 * avoids the bottleneck of constantly touching the quota file, but introduces
  12 * fuzziness in the current usage value of IDs that are being used on different
  13 * nodes in the cluster simultaneously.  So, it is possible for a user on
  14 * multiple nodes to overrun their quota, but that overrun is controlable.
  15 * Since quota tags are part of transactions, there is no need for a quota check
  16 * program to be run on node crashes or anything like that.
  17 *
  18 * There are couple of knobs that let the administrator manage the quota
  19 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  20 * sitting on one node before being synced to the quota file.  (The default is
  21 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  22 * of quota file syncs increases as the user moves closer to their limit.  The
  23 * more frequent the syncs, the more accurate the quota enforcement, but that
  24 * means that there is more contention between the nodes for the quota file.
  25 * The default value is one.  This sets the maximum theoretical quota overrun
  26 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  27 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  28 * number greater than one makes quota syncs more frequent and reduces the
  29 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  30 * syncs less frequent.
  31 *
  32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  33 * the quota file, so it is not being constantly read.
  34 */
  35
  36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  37
  38#include <linux/sched.h>
  39#include <linux/slab.h>
  40#include <linux/mm.h>
  41#include <linux/spinlock.h>
  42#include <linux/completion.h>
  43#include <linux/buffer_head.h>
  44#include <linux/sort.h>
  45#include <linux/fs.h>
  46#include <linux/bio.h>
  47#include <linux/gfs2_ondisk.h>
  48#include <linux/kthread.h>
  49#include <linux/freezer.h>
  50#include <linux/quota.h>
  51#include <linux/dqblk_xfs.h>
  52#include <linux/lockref.h>
  53#include <linux/list_lru.h>
  54#include <linux/rcupdate.h>
  55#include <linux/rculist_bl.h>
  56#include <linux/bit_spinlock.h>
  57#include <linux/jhash.h>
  58#include <linux/vmalloc.h>
  59
  60#include "gfs2.h"
  61#include "incore.h"
  62#include "bmap.h"
  63#include "glock.h"
  64#include "glops.h"
  65#include "log.h"
  66#include "meta_io.h"
  67#include "quota.h"
  68#include "rgrp.h"
  69#include "super.h"
  70#include "trans.h"
  71#include "inode.h"
  72#include "util.h"
  73
  74#define GFS2_QD_HASH_SHIFT      12
  75#define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
  76#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
  77
  78/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  79/*                     -> sd_bitmap_lock                              */
  80static DEFINE_SPINLOCK(qd_lock);
  81struct list_lru gfs2_qd_lru;
  82
  83static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  84
  85static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  86				 const struct kqid qid)
  87{
  88	unsigned int h;
  89
  90	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  91	h = jhash(&qid, sizeof(struct kqid), h);
  92
  93	return h & GFS2_QD_HASH_MASK;
  94}
  95
  96static inline void spin_lock_bucket(unsigned int hash)
  97{
  98        hlist_bl_lock(&qd_hash_table[hash]);
  99}
 100
 101static inline void spin_unlock_bucket(unsigned int hash)
 102{
 103        hlist_bl_unlock(&qd_hash_table[hash]);
 104}
 105
 106static void gfs2_qd_dealloc(struct rcu_head *rcu)
 107{
 108	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
 109	kmem_cache_free(gfs2_quotad_cachep, qd);
 110}
 111
 112static void gfs2_qd_dispose(struct list_head *list)
 113{
 114	struct gfs2_quota_data *qd;
 115	struct gfs2_sbd *sdp;
 116
 117	while (!list_empty(list)) {
 118		qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
 119		sdp = qd->qd_gl->gl_name.ln_sbd;
 120
 121		list_del(&qd->qd_lru);
 122
 123		/* Free from the filesystem-specific list */
 124		spin_lock(&qd_lock);
 125		list_del(&qd->qd_list);
 126		spin_unlock(&qd_lock);
 127
 128		spin_lock_bucket(qd->qd_hash);
 129		hlist_bl_del_rcu(&qd->qd_hlist);
 130		spin_unlock_bucket(qd->qd_hash);
 131
 132		gfs2_assert_warn(sdp, !qd->qd_change);
 133		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 134		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 135
 136		gfs2_glock_put(qd->qd_gl);
 137		atomic_dec(&sdp->sd_quota_count);
 138
 139		/* Delete it from the common reclaim list */
 140		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 141	}
 142}
 143
 144
 145static enum lru_status gfs2_qd_isolate(struct list_head *item,
 146		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 147{
 148	struct list_head *dispose = arg;
 149	struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
 150
 151	if (!spin_trylock(&qd->qd_lockref.lock))
 152		return LRU_SKIP;
 153
 154	if (qd->qd_lockref.count == 0) {
 155		lockref_mark_dead(&qd->qd_lockref);
 156		list_lru_isolate_move(lru, &qd->qd_lru, dispose);
 157	}
 158
 159	spin_unlock(&qd->qd_lockref.lock);
 160	return LRU_REMOVED;
 161}
 162
 163static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
 164					 struct shrink_control *sc)
 165{
 166	LIST_HEAD(dispose);
 167	unsigned long freed;
 168
 169	if (!(sc->gfp_mask & __GFP_FS))
 170		return SHRINK_STOP;
 171
 172	freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
 173				     gfs2_qd_isolate, &dispose);
 174
 175	gfs2_qd_dispose(&dispose);
 176
 177	return freed;
 178}
 179
 180static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 181					  struct shrink_control *sc)
 182{
 183	return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
 184}
 185
 186struct shrinker gfs2_qd_shrinker = {
 187	.count_objects = gfs2_qd_shrink_count,
 188	.scan_objects = gfs2_qd_shrink_scan,
 189	.seeks = DEFAULT_SEEKS,
 190	.flags = SHRINKER_NUMA_AWARE,
 191};
 192
 193
 194static u64 qd2index(struct gfs2_quota_data *qd)
 195{
 196	struct kqid qid = qd->qd_id;
 197	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 198		((qid.type == USRQUOTA) ? 0 : 1);
 199}
 200
 201static u64 qd2offset(struct gfs2_quota_data *qd)
 202{
 203	u64 offset;
 204
 205	offset = qd2index(qd);
 206	offset *= sizeof(struct gfs2_quota);
 207
 208	return offset;
 209}
 210
 211static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
 212{
 213	struct gfs2_quota_data *qd;
 214	int error;
 215
 216	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 217	if (!qd)
 218		return NULL;
 219
 220	qd->qd_sbd = sdp;
 221	qd->qd_lockref.count = 1;
 222	spin_lock_init(&qd->qd_lockref.lock);
 223	qd->qd_id = qid;
 224	qd->qd_slot = -1;
 225	INIT_LIST_HEAD(&qd->qd_lru);
 226	qd->qd_hash = hash;
 227
 228	error = gfs2_glock_get(sdp, qd2index(qd),
 229			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 230	if (error)
 231		goto fail;
 232
 233	return qd;
 234
 235fail:
 236	kmem_cache_free(gfs2_quotad_cachep, qd);
 237	return NULL;
 238}
 239
 240static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
 241						     const struct gfs2_sbd *sdp,
 242						     struct kqid qid)
 243{
 244	struct gfs2_quota_data *qd;
 245	struct hlist_bl_node *h;
 246
 247	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
 248		if (!qid_eq(qd->qd_id, qid))
 249			continue;
 250		if (qd->qd_sbd != sdp)
 251			continue;
 252		if (lockref_get_not_dead(&qd->qd_lockref)) {
 253			list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
 254			return qd;
 255		}
 256	}
 257
 258	return NULL;
 259}
 260
 261
 262static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 263		  struct gfs2_quota_data **qdp)
 264{
 265	struct gfs2_quota_data *qd, *new_qd;
 266	unsigned int hash = gfs2_qd_hash(sdp, qid);
 267
 268	rcu_read_lock();
 269	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 270	rcu_read_unlock();
 271
 272	if (qd)
 273		return 0;
 274
 275	new_qd = qd_alloc(hash, sdp, qid);
 276	if (!new_qd)
 277		return -ENOMEM;
 278
 279	spin_lock(&qd_lock);
 280	spin_lock_bucket(hash);
 281	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 282	if (qd == NULL) {
 283		*qdp = new_qd;
 284		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
 285		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
 286		atomic_inc(&sdp->sd_quota_count);
 287	}
 288	spin_unlock_bucket(hash);
 289	spin_unlock(&qd_lock);
 290
 291	if (qd) {
 292		gfs2_glock_put(new_qd->qd_gl);
 293		kmem_cache_free(gfs2_quotad_cachep, new_qd);
 294	}
 295
 296	return 0;
 297}
 298
 299
 300static void qd_hold(struct gfs2_quota_data *qd)
 301{
 302	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 303	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
 304	lockref_get(&qd->qd_lockref);
 305}
 306
 307static void qd_put(struct gfs2_quota_data *qd)
 308{
 309	if (lockref_put_or_lock(&qd->qd_lockref))
 310		return;
 311
 312	qd->qd_lockref.count = 0;
 313	list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
 314	spin_unlock(&qd->qd_lockref.lock);
 315
 316}
 317
 318static int slot_get(struct gfs2_quota_data *qd)
 319{
 320	struct gfs2_sbd *sdp = qd->qd_sbd;
 321	unsigned int bit;
 322	int error = 0;
 323
 324	spin_lock(&sdp->sd_bitmap_lock);
 325	if (qd->qd_slot_count != 0)
 326		goto out;
 327
 328	error = -ENOSPC;
 329	bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
 330	if (bit < sdp->sd_quota_slots) {
 331		set_bit(bit, sdp->sd_quota_bitmap);
 332		qd->qd_slot = bit;
 333		error = 0;
 334out:
 335		qd->qd_slot_count++;
 336	}
 337	spin_unlock(&sdp->sd_bitmap_lock);
 338
 339	return error;
 340}
 341
 342static void slot_hold(struct gfs2_quota_data *qd)
 343{
 344	struct gfs2_sbd *sdp = qd->qd_sbd;
 345
 346	spin_lock(&sdp->sd_bitmap_lock);
 347	gfs2_assert(sdp, qd->qd_slot_count);
 348	qd->qd_slot_count++;
 349	spin_unlock(&sdp->sd_bitmap_lock);
 350}
 351
 352static void slot_put(struct gfs2_quota_data *qd)
 353{
 354	struct gfs2_sbd *sdp = qd->qd_sbd;
 355
 356	spin_lock(&sdp->sd_bitmap_lock);
 357	gfs2_assert(sdp, qd->qd_slot_count);
 358	if (!--qd->qd_slot_count) {
 359		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
 360		qd->qd_slot = -1;
 361	}
 362	spin_unlock(&sdp->sd_bitmap_lock);
 363}
 364
 365static int bh_get(struct gfs2_quota_data *qd)
 366{
 367	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 368	struct inode *inode = sdp->sd_qc_inode;
 369	struct gfs2_inode *ip = GFS2_I(inode);
 370	unsigned int block, offset;
 371	struct buffer_head *bh;
 372	struct iomap iomap = { };
 373	int error;
 
 374
 375	mutex_lock(&sdp->sd_quota_mutex);
 376
 377	if (qd->qd_bh_count++) {
 378		mutex_unlock(&sdp->sd_quota_mutex);
 379		return 0;
 380	}
 381
 382	block = qd->qd_slot / sdp->sd_qc_per_block;
 383	offset = qd->qd_slot % sdp->sd_qc_per_block;
 384
 385	error = gfs2_iomap_get(inode,
 386			       (loff_t)block << inode->i_blkbits,
 387			       i_blocksize(inode), &iomap);
 388	if (error)
 389		goto fail;
 390	error = -ENOENT;
 391	if (iomap.type != IOMAP_MAPPED)
 392		goto fail;
 393
 394	error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
 395			       DIO_WAIT, 0, &bh);
 396	if (error)
 397		goto fail;
 398	error = -EIO;
 399	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 400		goto fail_brelse;
 401
 402	qd->qd_bh = bh;
 403	qd->qd_bh_qc = (struct gfs2_quota_change *)
 404		(bh->b_data + sizeof(struct gfs2_meta_header) +
 405		 offset * sizeof(struct gfs2_quota_change));
 406
 407	mutex_unlock(&sdp->sd_quota_mutex);
 408
 409	return 0;
 410
 411fail_brelse:
 412	brelse(bh);
 413fail:
 414	qd->qd_bh_count--;
 415	mutex_unlock(&sdp->sd_quota_mutex);
 416	return error;
 417}
 418
 419static void bh_put(struct gfs2_quota_data *qd)
 420{
 421	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 422
 423	mutex_lock(&sdp->sd_quota_mutex);
 424	gfs2_assert(sdp, qd->qd_bh_count);
 425	if (!--qd->qd_bh_count) {
 426		brelse(qd->qd_bh);
 427		qd->qd_bh = NULL;
 428		qd->qd_bh_qc = NULL;
 429	}
 430	mutex_unlock(&sdp->sd_quota_mutex);
 431}
 432
 433static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 434			 u64 *sync_gen)
 435{
 436	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 437	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 438	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
 439		return 0;
 440
 441	if (!lockref_get_not_dead(&qd->qd_lockref))
 442		return 0;
 443
 444	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 445	set_bit(QDF_LOCKED, &qd->qd_flags);
 446	qd->qd_change_sync = qd->qd_change;
 447	slot_hold(qd);
 448	return 1;
 449}
 450
 451static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 452{
 453	struct gfs2_quota_data *qd = NULL, *iter;
 454	int error;
 
 455
 456	*qdp = NULL;
 457
 458	if (sb_rdonly(sdp->sd_vfs))
 459		return 0;
 460
 461	spin_lock(&qd_lock);
 462
 463	list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
 464		if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
 465			qd = iter;
 466			break;
 467		}
 468	}
 469
 
 
 
 470	spin_unlock(&qd_lock);
 471
 472	if (qd) {
 473		gfs2_assert_warn(sdp, qd->qd_change_sync);
 474		error = bh_get(qd);
 475		if (error) {
 476			clear_bit(QDF_LOCKED, &qd->qd_flags);
 477			slot_put(qd);
 478			qd_put(qd);
 479			return error;
 480		}
 481	}
 482
 483	*qdp = qd;
 484
 485	return 0;
 486}
 487
 488static void qd_unlock(struct gfs2_quota_data *qd)
 489{
 490	gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
 491			 test_bit(QDF_LOCKED, &qd->qd_flags));
 492	clear_bit(QDF_LOCKED, &qd->qd_flags);
 493	bh_put(qd);
 494	slot_put(qd);
 495	qd_put(qd);
 496}
 497
 498static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 499		    struct gfs2_quota_data **qdp)
 500{
 501	int error;
 502
 503	error = qd_get(sdp, qid, qdp);
 504	if (error)
 505		return error;
 506
 507	error = slot_get(*qdp);
 508	if (error)
 509		goto fail;
 510
 511	error = bh_get(*qdp);
 512	if (error)
 513		goto fail_slot;
 514
 515	return 0;
 516
 517fail_slot:
 518	slot_put(*qdp);
 519fail:
 520	qd_put(*qdp);
 521	return error;
 522}
 523
 524static void qdsb_put(struct gfs2_quota_data *qd)
 525{
 526	bh_put(qd);
 527	slot_put(qd);
 528	qd_put(qd);
 529}
 530
 531/**
 532 * gfs2_qa_get - make sure we have a quota allocations data structure,
 533 *               if necessary
 534 * @ip: the inode for this reservation
 535 */
 536int gfs2_qa_get(struct gfs2_inode *ip)
 537{
 538	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 539	struct inode *inode = &ip->i_inode;
 540
 541	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 542		return 0;
 543
 544	spin_lock(&inode->i_lock);
 545	if (ip->i_qadata == NULL) {
 546		struct gfs2_qadata *tmp;
 547
 548		spin_unlock(&inode->i_lock);
 549		tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
 550		if (!tmp)
 551			return -ENOMEM;
 552
 553		spin_lock(&inode->i_lock);
 554		if (ip->i_qadata == NULL)
 555			ip->i_qadata = tmp;
 556		else
 557			kmem_cache_free(gfs2_qadata_cachep, tmp);
 558	}
 559	ip->i_qadata->qa_ref++;
 560	spin_unlock(&inode->i_lock);
 561	return 0;
 562}
 563
 564void gfs2_qa_put(struct gfs2_inode *ip)
 565{
 566	struct inode *inode = &ip->i_inode;
 567
 568	spin_lock(&inode->i_lock);
 569	if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
 570		kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
 571		ip->i_qadata = NULL;
 572	}
 573	spin_unlock(&inode->i_lock);
 574}
 575
 576int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 577{
 578	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 579	struct gfs2_quota_data **qd;
 580	int error;
 581
 582	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 583		return 0;
 
 
 
 584
 585	error = gfs2_qa_get(ip);
 586	if (error)
 587		return error;
 588
 589	qd = ip->i_qadata->qa_qd;
 
 
 590
 591	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
 592	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
 593		error = -EIO;
 594		goto out;
 595	}
 596
 597	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 598	if (error)
 599		goto out_unhold;
 600	ip->i_qadata->qa_qd_num++;
 601	qd++;
 602
 603	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 604	if (error)
 605		goto out_unhold;
 606	ip->i_qadata->qa_qd_num++;
 607	qd++;
 608
 609	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 610	    !uid_eq(uid, ip->i_inode.i_uid)) {
 611		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 612		if (error)
 613			goto out_unhold;
 614		ip->i_qadata->qa_qd_num++;
 615		qd++;
 616	}
 617
 618	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 619	    !gid_eq(gid, ip->i_inode.i_gid)) {
 620		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 621		if (error)
 622			goto out_unhold;
 623		ip->i_qadata->qa_qd_num++;
 624		qd++;
 625	}
 626
 627out_unhold:
 628	if (error)
 629		gfs2_quota_unhold(ip);
 630out:
 631	return error;
 632}
 633
 634void gfs2_quota_unhold(struct gfs2_inode *ip)
 635{
 636	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 637	u32 x;
 638
 639	if (ip->i_qadata == NULL)
 640		return;
 641
 642	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 643
 644	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
 645		qdsb_put(ip->i_qadata->qa_qd[x]);
 646		ip->i_qadata->qa_qd[x] = NULL;
 647	}
 648	ip->i_qadata->qa_qd_num = 0;
 649	gfs2_qa_put(ip);
 650}
 651
 652static int sort_qd(const void *a, const void *b)
 653{
 654	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 655	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 656
 657	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 658		return -1;
 659	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 660		return 1;
 661	return 0;
 662}
 663
 664static void do_qc(struct gfs2_quota_data *qd, s64 change)
 665{
 666	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 667	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 668	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 669	s64 x;
 670
 671	mutex_lock(&sdp->sd_quota_mutex);
 672	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 673
 674	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 675		qc->qc_change = 0;
 676		qc->qc_flags = 0;
 677		if (qd->qd_id.type == USRQUOTA)
 678			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 679		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 680	}
 681
 682	x = be64_to_cpu(qc->qc_change) + change;
 683	qc->qc_change = cpu_to_be64(x);
 684
 685	spin_lock(&qd_lock);
 686	qd->qd_change = x;
 687	spin_unlock(&qd_lock);
 688
 689	if (!x) {
 690		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 691		clear_bit(QDF_CHANGE, &qd->qd_flags);
 692		qc->qc_flags = 0;
 693		qc->qc_id = 0;
 694		slot_put(qd);
 695		qd_put(qd);
 696	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 697		qd_hold(qd);
 698		slot_hold(qd);
 699	}
 700
 701	if (change < 0) /* Reset quiet flag if we freed some blocks */
 702		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
 703	mutex_unlock(&sdp->sd_quota_mutex);
 704}
 705
 706static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
 707				  unsigned off, void *buf, unsigned bytes)
 708{
 709	struct inode *inode = &ip->i_inode;
 710	struct gfs2_sbd *sdp = GFS2_SB(inode);
 711	struct address_space *mapping = inode->i_mapping;
 712	struct page *page;
 713	struct buffer_head *bh;
 714	void *kaddr;
 715	u64 blk;
 716	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
 717	unsigned to_write = bytes, pg_off = off;
 718	int done = 0;
 719
 720	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
 721	boff = off % bsize;
 722
 723	page = find_or_create_page(mapping, index, GFP_NOFS);
 724	if (!page)
 725		return -ENOMEM;
 726	if (!page_has_buffers(page))
 727		create_empty_buffers(page, bsize, 0);
 728
 729	bh = page_buffers(page);
 730	while (!done) {
 731		/* Find the beginning block within the page */
 732		if (pg_off >= ((bnum * bsize) + bsize)) {
 733			bh = bh->b_this_page;
 734			bnum++;
 735			blk++;
 736			continue;
 737		}
 738		if (!buffer_mapped(bh)) {
 739			gfs2_block_map(inode, blk, bh, 1);
 740			if (!buffer_mapped(bh))
 741				goto unlock_out;
 742			/* If it's a newly allocated disk block, zero it */
 743			if (buffer_new(bh))
 744				zero_user(page, bnum * bsize, bh->b_size);
 745		}
 746		if (PageUptodate(page))
 747			set_buffer_uptodate(bh);
 748		if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
 749			goto unlock_out;
 750		if (gfs2_is_jdata(ip))
 751			gfs2_trans_add_data(ip->i_gl, bh);
 752		else
 753			gfs2_ordered_add_inode(ip);
 754
 755		/* If we need to write to the next block as well */
 756		if (to_write > (bsize - boff)) {
 757			pg_off += (bsize - boff);
 758			to_write -= (bsize - boff);
 759			boff = pg_off % bsize;
 760			continue;
 761		}
 762		done = 1;
 763	}
 764
 765	/* Write to the page, now that we have setup the buffer(s) */
 766	kaddr = kmap_atomic(page);
 767	memcpy(kaddr + off, buf, bytes);
 768	flush_dcache_page(page);
 769	kunmap_atomic(kaddr);
 770	unlock_page(page);
 771	put_page(page);
 772
 773	return 0;
 774
 775unlock_out:
 776	unlock_page(page);
 777	put_page(page);
 778	return -EIO;
 779}
 780
 781static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
 782				 loff_t loc)
 783{
 784	unsigned long pg_beg;
 785	unsigned pg_off, nbytes, overflow = 0;
 786	int pg_oflow = 0, error;
 787	void *ptr;
 788
 789	nbytes = sizeof(struct gfs2_quota);
 790
 791	pg_beg = loc >> PAGE_SHIFT;
 792	pg_off = offset_in_page(loc);
 793
 794	/* If the quota straddles a page boundary, split the write in two */
 795	if ((pg_off + nbytes) > PAGE_SIZE) {
 796		pg_oflow = 1;
 797		overflow = (pg_off + nbytes) - PAGE_SIZE;
 798	}
 799
 800	ptr = qp;
 801	error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
 802				       nbytes - overflow);
 803	/* If there's an overflow, write the remaining bytes to the next page */
 804	if (!error && pg_oflow)
 805		error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
 806					       ptr + nbytes - overflow,
 807					       overflow);
 808	return error;
 809}
 810
 811/**
 812 * gfs2_adjust_quota - adjust record of current block usage
 813 * @ip: The quota inode
 814 * @loc: Offset of the entry in the quota file
 815 * @change: The amount of usage change to record
 816 * @qd: The quota data
 817 * @fdq: The updated limits to record
 818 *
 819 * This function was mostly borrowed from gfs2_block_truncate_page which was
 820 * in turn mostly borrowed from ext3
 821 *
 822 * Returns: 0 or -ve on error
 823 */
 824
 825static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 826			     s64 change, struct gfs2_quota_data *qd,
 827			     struct qc_dqblk *fdq)
 828{
 829	struct inode *inode = &ip->i_inode;
 830	struct gfs2_sbd *sdp = GFS2_SB(inode);
 
 
 
 
 
 
 
 831	struct gfs2_quota q;
 832	int err;
 833	u64 size;
 834
 835	if (gfs2_is_stuffed(ip)) {
 836		err = gfs2_unstuff_dinode(ip);
 837		if (err)
 838			return err;
 839	}
 840
 841	memset(&q, 0, sizeof(struct gfs2_quota));
 842	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 843	if (err < 0)
 844		return err;
 845
 846	loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
 847	err = -EIO;
 848	be64_add_cpu(&q.qu_value, change);
 849	if (((s64)be64_to_cpu(q.qu_value)) < 0)
 850		q.qu_value = 0; /* Never go negative on quota usage */
 851	qd->qd_qb.qb_value = q.qu_value;
 852	if (fdq) {
 853		if (fdq->d_fieldmask & QC_SPC_SOFT) {
 854			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
 855			qd->qd_qb.qb_warn = q.qu_warn;
 856		}
 857		if (fdq->d_fieldmask & QC_SPC_HARD) {
 858			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
 859			qd->qd_qb.qb_limit = q.qu_limit;
 860		}
 861		if (fdq->d_fieldmask & QC_SPACE) {
 862			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
 863			qd->qd_qb.qb_value = q.qu_value;
 864		}
 865	}
 866
 867	err = gfs2_write_disk_quota(ip, &q, loc);
 868	if (!err) {
 869		size = loc + sizeof(struct gfs2_quota);
 870		if (size > inode->i_size)
 871			i_size_write(inode, size);
 872		inode->i_mtime = inode->i_atime = current_time(inode);
 873		mark_inode_dirty(inode);
 874		set_bit(QDF_REFRESH, &qd->qd_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 875	}
 876
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 877	return err;
 878}
 879
 880static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 881{
 882	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
 883	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 884	struct gfs2_alloc_parms ap = { .aflags = 0, };
 885	unsigned int data_blocks, ind_blocks;
 886	struct gfs2_holder *ghs, i_gh;
 887	unsigned int qx, x;
 888	struct gfs2_quota_data *qd;
 889	unsigned reserved;
 890	loff_t offset;
 891	unsigned int nalloc = 0, blocks;
 892	int error;
 893
 894	error = gfs2_qa_get(ip);
 895	if (error)
 896		return error;
 897
 898	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 899			      &data_blocks, &ind_blocks);
 900
 901	ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 902	if (!ghs) {
 903		error = -ENOMEM;
 904		goto out;
 905	}
 906
 907	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 908	inode_lock(&ip->i_inode);
 909	for (qx = 0; qx < num_qd; qx++) {
 910		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 911					   GL_NOCACHE, &ghs[qx]);
 912		if (error)
 913			goto out_dq;
 914	}
 915
 916	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 917	if (error)
 918		goto out_dq;
 919
 920	for (x = 0; x < num_qd; x++) {
 921		offset = qd2offset(qda[x]);
 922		if (gfs2_write_alloc_required(ip, offset,
 923					      sizeof(struct gfs2_quota)))
 924			nalloc++;
 925	}
 926
 927	/* 
 928	 * 1 blk for unstuffing inode if stuffed. We add this extra
 929	 * block to the reservation unconditionally. If the inode
 930	 * doesn't need unstuffing, the block will be released to the 
 931	 * rgrp since it won't be allocated during the transaction
 932	 */
 933	/* +3 in the end for unstuffing block, inode size update block
 934	 * and another block in case quota straddles page boundary and 
 935	 * two blocks need to be updated instead of 1 */
 936	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 937
 938	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 939	ap.target = reserved;
 940	error = gfs2_inplace_reserve(ip, &ap);
 941	if (error)
 942		goto out_alloc;
 943
 944	if (nalloc)
 945		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 946
 947	error = gfs2_trans_begin(sdp, blocks, 0);
 948	if (error)
 949		goto out_ipres;
 950
 951	for (x = 0; x < num_qd; x++) {
 952		qd = qda[x];
 953		offset = qd2offset(qd);
 954		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 955		if (error)
 956			goto out_end_trans;
 957
 958		do_qc(qd, -qd->qd_change_sync);
 959		set_bit(QDF_REFRESH, &qd->qd_flags);
 960	}
 961
 962	error = 0;
 963
 964out_end_trans:
 965	gfs2_trans_end(sdp);
 966out_ipres:
 967	gfs2_inplace_release(ip);
 968out_alloc:
 969	gfs2_glock_dq_uninit(&i_gh);
 970out_dq:
 971	while (qx--)
 972		gfs2_glock_dq_uninit(&ghs[qx]);
 973	inode_unlock(&ip->i_inode);
 974	kfree(ghs);
 975	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
 976		       GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
 977out:
 978	gfs2_qa_put(ip);
 979	return error;
 980}
 981
 982static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 983{
 984	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 985	struct gfs2_quota q;
 986	struct gfs2_quota_lvb *qlvb;
 987	loff_t pos;
 988	int error;
 989
 990	memset(&q, 0, sizeof(struct gfs2_quota));
 991	pos = qd2offset(qd);
 992	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 993	if (error < 0)
 994		return error;
 995
 996	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 997	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 998	qlvb->__pad = 0;
 999	qlvb->qb_limit = q.qu_limit;
1000	qlvb->qb_warn = q.qu_warn;
1001	qlvb->qb_value = q.qu_value;
1002	qd->qd_qb = *qlvb;
1003
1004	return 0;
1005}
1006
1007static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1008		    struct gfs2_holder *q_gh)
1009{
1010	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1011	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1012	struct gfs2_holder i_gh;
1013	int error;
1014
1015restart:
1016	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1017	if (error)
1018		return error;
1019
1020	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1021		force_refresh = FORCE;
1022
1023	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1024
1025	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1026		gfs2_glock_dq_uninit(q_gh);
1027		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1028					   GL_NOCACHE, q_gh);
1029		if (error)
1030			return error;
1031
1032		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1033		if (error)
1034			goto fail;
1035
1036		error = update_qd(sdp, qd);
1037		if (error)
1038			goto fail_gunlock;
1039
1040		gfs2_glock_dq_uninit(&i_gh);
1041		gfs2_glock_dq_uninit(q_gh);
1042		force_refresh = 0;
1043		goto restart;
1044	}
1045
1046	return 0;
1047
1048fail_gunlock:
1049	gfs2_glock_dq_uninit(&i_gh);
1050fail:
1051	gfs2_glock_dq_uninit(q_gh);
1052	return error;
1053}
1054
1055int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1056{
1057	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1058	struct gfs2_quota_data *qd;
1059	u32 x;
1060	int error = 0;
1061
1062	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1063		return 0;
1064
1065	error = gfs2_quota_hold(ip, uid, gid);
1066	if (error)
1067		return error;
1068
1069	sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
 
 
 
 
1070	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
1071
1072	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1073		qd = ip->i_qadata->qa_qd[x];
1074		error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
 
 
 
1075		if (error)
1076			break;
1077	}
1078
1079	if (!error)
1080		set_bit(GIF_QD_LOCKED, &ip->i_flags);
1081	else {
1082		while (x--)
1083			gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1084		gfs2_quota_unhold(ip);
1085	}
1086
1087	return error;
1088}
1089
1090static int need_sync(struct gfs2_quota_data *qd)
1091{
1092	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1093	struct gfs2_tune *gt = &sdp->sd_tune;
1094	s64 value;
1095	unsigned int num, den;
1096	int do_sync = 1;
1097
1098	if (!qd->qd_qb.qb_limit)
1099		return 0;
1100
1101	spin_lock(&qd_lock);
1102	value = qd->qd_change;
1103	spin_unlock(&qd_lock);
1104
1105	spin_lock(&gt->gt_spin);
1106	num = gt->gt_quota_scale_num;
1107	den = gt->gt_quota_scale_den;
1108	spin_unlock(&gt->gt_spin);
1109
1110	if (value < 0)
1111		do_sync = 0;
1112	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1113		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1114		do_sync = 0;
1115	else {
1116		value *= gfs2_jindex_size(sdp) * num;
1117		value = div_s64(value, den);
1118		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1119		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1120			do_sync = 0;
1121	}
1122
1123	return do_sync;
1124}
1125
1126void gfs2_quota_unlock(struct gfs2_inode *ip)
1127{
1128	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1129	struct gfs2_quota_data *qda[4];
1130	unsigned int count = 0;
1131	u32 x;
1132	int found;
1133
1134	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1135		return;
1136
1137	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1138		struct gfs2_quota_data *qd;
1139		int sync;
1140
1141		qd = ip->i_qadata->qa_qd[x];
1142		sync = need_sync(qd);
1143
1144		gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1145		if (!sync)
1146			continue;
1147
1148		spin_lock(&qd_lock);
1149		found = qd_check_sync(sdp, qd, NULL);
1150		spin_unlock(&qd_lock);
1151
1152		if (!found)
1153			continue;
1154
1155		gfs2_assert_warn(sdp, qd->qd_change_sync);
1156		if (bh_get(qd)) {
1157			clear_bit(QDF_LOCKED, &qd->qd_flags);
1158			slot_put(qd);
1159			qd_put(qd);
1160			continue;
1161		}
1162
1163		qda[count++] = qd;
1164	}
1165
1166	if (count) {
1167		do_sync(count, qda);
1168		for (x = 0; x < count; x++)
1169			qd_unlock(qda[x]);
1170	}
1171
 
1172	gfs2_quota_unhold(ip);
1173}
1174
1175#define MAX_LINE 256
1176
1177static int print_message(struct gfs2_quota_data *qd, char *type)
1178{
1179	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1180
1181	fs_info(sdp, "quota %s for %s %u\n",
1182		type,
1183		(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1184		from_kqid(&init_user_ns, qd->qd_id));
1185
1186	return 0;
1187}
1188
1189/**
1190 * gfs2_quota_check - check if allocating new blocks will exceed quota
1191 * @ip:  The inode for which this check is being performed
1192 * @uid: The uid to check against
1193 * @gid: The gid to check against
1194 * @ap:  The allocation parameters. ap->target contains the requested
1195 *       blocks. ap->min_target, if set, contains the minimum blks
1196 *       requested.
1197 *
1198 * Returns: 0 on success.
1199 *                  min_req = ap->min_target ? ap->min_target : ap->target;
1200 *                  quota must allow at least min_req blks for success and
1201 *                  ap->allowed is set to the number of blocks allowed
1202 *
1203 *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1204 *                  of blocks available.
1205 */
1206int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1207		     struct gfs2_alloc_parms *ap)
1208{
1209	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1210	struct gfs2_quota_data *qd;
1211	s64 value, warn, limit;
1212	u32 x;
1213	int error = 0;
1214
1215	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1216	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1217		return 0;
1218
1219	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1220		qd = ip->i_qadata->qa_qd[x];
 
 
 
1221
1222		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1223		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1224			continue;
1225
1226		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1227		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1228		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1229		spin_lock(&qd_lock);
1230		value += qd->qd_change;
1231		spin_unlock(&qd_lock);
1232
1233		if (limit > 0 && (limit - value) < ap->allowed)
1234			ap->allowed = limit - value;
1235		/* If we can't meet the target */
1236		if (limit && limit < (value + (s64)ap->target)) {
1237			/* If no min_target specified or we don't meet
1238			 * min_target, return -EDQUOT */
1239			if (!ap->min_target || ap->min_target > ap->allowed) {
1240				if (!test_and_set_bit(QDF_QMSG_QUIET,
1241						      &qd->qd_flags)) {
1242					print_message(qd, "exceeded");
1243					quota_send_warning(qd->qd_id,
1244							   sdp->sd_vfs->s_dev,
1245							   QUOTA_NL_BHARDWARN);
1246				}
1247				error = -EDQUOT;
1248				break;
1249			}
1250		} else if (warn && warn < value &&
1251			   time_after_eq(jiffies, qd->qd_last_warn +
1252					 gfs2_tune_get(sdp, gt_quota_warn_period)
1253					 * HZ)) {
1254			quota_send_warning(qd->qd_id,
1255					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1256			error = print_message(qd, "warning");
1257			qd->qd_last_warn = jiffies;
1258		}
1259	}
 
1260	return error;
1261}
1262
1263void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1264		       kuid_t uid, kgid_t gid)
1265{
1266	struct gfs2_quota_data *qd;
1267	u32 x;
1268	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1269
1270	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
1271	    gfs2_assert_warn(sdp, change))
1272		return;
1273	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1274		return;
1275
1276	if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1277				 ip->i_qadata->qa_ref > 0))
1278		return;
1279	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1280		qd = ip->i_qadata->qa_qd[x];
1281
1282		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1283		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1284			do_qc(qd, change);
1285		}
1286	}
1287}
1288
1289int gfs2_quota_sync(struct super_block *sb, int type)
1290{
1291	struct gfs2_sbd *sdp = sb->s_fs_info;
1292	struct gfs2_quota_data **qda;
1293	unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
1294	unsigned int num_qd;
1295	unsigned int x;
1296	int error = 0;
1297
1298	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1299	if (!qda)
1300		return -ENOMEM;
1301
1302	mutex_lock(&sdp->sd_quota_sync_mutex);
1303	sdp->sd_quota_sync_gen++;
1304
1305	do {
1306		num_qd = 0;
1307
1308		for (;;) {
1309			error = qd_fish(sdp, qda + num_qd);
1310			if (error || !qda[num_qd])
1311				break;
1312			if (++num_qd == max_qd)
1313				break;
1314		}
1315
1316		if (num_qd) {
1317			if (!error)
1318				error = do_sync(num_qd, qda);
1319			if (!error)
1320				for (x = 0; x < num_qd; x++)
1321					qda[x]->qd_sync_gen =
1322						sdp->sd_quota_sync_gen;
1323
1324			for (x = 0; x < num_qd; x++)
1325				qd_unlock(qda[x]);
1326		}
1327	} while (!error && num_qd == max_qd);
1328
1329	mutex_unlock(&sdp->sd_quota_sync_mutex);
1330	kfree(qda);
1331
1332	return error;
1333}
1334
1335int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1336{
1337	struct gfs2_quota_data *qd;
1338	struct gfs2_holder q_gh;
1339	int error;
1340
1341	error = qd_get(sdp, qid, &qd);
1342	if (error)
1343		return error;
1344
1345	error = do_glock(qd, FORCE, &q_gh);
1346	if (!error)
1347		gfs2_glock_dq_uninit(&q_gh);
1348
1349	qd_put(qd);
1350	return error;
1351}
1352
1353int gfs2_quota_init(struct gfs2_sbd *sdp)
1354{
1355	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1356	u64 size = i_size_read(sdp->sd_qc_inode);
1357	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1358	unsigned int x, slot = 0;
1359	unsigned int found = 0;
1360	unsigned int hash;
1361	unsigned int bm_size;
1362	u64 dblock;
1363	u32 extlen = 0;
1364	int error;
1365
1366	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1367		return -EIO;
1368
1369	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1370	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1371	bm_size *= sizeof(unsigned long);
1372	error = -ENOMEM;
1373	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1374	if (sdp->sd_quota_bitmap == NULL)
1375		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1376						 __GFP_ZERO);
1377	if (!sdp->sd_quota_bitmap)
1378		return error;
1379
1380	for (x = 0; x < blocks; x++) {
1381		struct buffer_head *bh;
1382		const struct gfs2_quota_change *qc;
1383		unsigned int y;
1384
1385		if (!extlen) {
1386			extlen = 32;
1387			error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
1388			if (error)
1389				goto fail;
1390		}
1391		error = -EIO;
1392		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1393		if (!bh)
1394			goto fail;
1395		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1396			brelse(bh);
1397			goto fail;
1398		}
1399
1400		qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1401		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1402		     y++, slot++) {
1403			struct gfs2_quota_data *qd;
1404			s64 qc_change = be64_to_cpu(qc->qc_change);
1405			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1406			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1407						USRQUOTA : GRPQUOTA;
1408			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1409						      be32_to_cpu(qc->qc_id));
1410			qc++;
1411			if (!qc_change)
1412				continue;
1413
1414			hash = gfs2_qd_hash(sdp, qc_id);
1415			qd = qd_alloc(hash, sdp, qc_id);
1416			if (qd == NULL) {
1417				brelse(bh);
1418				goto fail;
1419			}
1420
1421			set_bit(QDF_CHANGE, &qd->qd_flags);
1422			qd->qd_change = qc_change;
1423			qd->qd_slot = slot;
1424			qd->qd_slot_count = 1;
1425
1426			spin_lock(&qd_lock);
1427			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1428			list_add(&qd->qd_list, &sdp->sd_quota_list);
1429			atomic_inc(&sdp->sd_quota_count);
1430			spin_unlock(&qd_lock);
1431
1432			spin_lock_bucket(hash);
1433			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1434			spin_unlock_bucket(hash);
1435
1436			found++;
1437		}
1438
1439		brelse(bh);
1440		dblock++;
1441		extlen--;
1442	}
1443
1444	if (found)
1445		fs_info(sdp, "found %u quota changes\n", found);
1446
1447	return 0;
1448
1449fail:
1450	gfs2_quota_cleanup(sdp);
1451	return error;
1452}
1453
1454void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1455{
1456	struct list_head *head = &sdp->sd_quota_list;
1457	struct gfs2_quota_data *qd;
1458
1459	spin_lock(&qd_lock);
1460	while (!list_empty(head)) {
1461		qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
1462
1463		list_del(&qd->qd_list);
1464
1465		/* Also remove if this qd exists in the reclaim list */
1466		list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
1467		atomic_dec(&sdp->sd_quota_count);
1468		spin_unlock(&qd_lock);
1469
1470		spin_lock_bucket(qd->qd_hash);
1471		hlist_bl_del_rcu(&qd->qd_hlist);
1472		spin_unlock_bucket(qd->qd_hash);
1473
1474		gfs2_assert_warn(sdp, !qd->qd_change);
1475		gfs2_assert_warn(sdp, !qd->qd_slot_count);
1476		gfs2_assert_warn(sdp, !qd->qd_bh_count);
1477
1478		gfs2_glock_put(qd->qd_gl);
1479		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
1480
1481		spin_lock(&qd_lock);
1482	}
1483	spin_unlock(&qd_lock);
1484
1485	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1486
1487	kvfree(sdp->sd_quota_bitmap);
1488	sdp->sd_quota_bitmap = NULL;
 
 
 
 
 
1489}
1490
1491static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1492{
1493	if (error == 0 || error == -EROFS)
1494		return;
1495	if (!gfs2_withdrawn(sdp)) {
1496		if (!cmpxchg(&sdp->sd_log_error, 0, error))
1497			fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1498		wake_up(&sdp->sd_logd_waitq);
1499	}
1500}
1501
1502static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1503			       int (*fxn)(struct super_block *sb, int type),
1504			       unsigned long t, unsigned long *timeo,
1505			       unsigned int *new_timeo)
1506{
1507	if (t >= *timeo) {
1508		int error = fxn(sdp->sd_vfs, 0);
1509		quotad_error(sdp, msg, error);
1510		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1511	} else {
1512		*timeo -= t;
1513	}
1514}
1515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1516void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1517	if (!sdp->sd_statfs_force_sync) {
1518		sdp->sd_statfs_force_sync = 1;
1519		wake_up(&sdp->sd_quota_wait);
1520	}
1521}
1522
1523
1524/**
1525 * gfs2_quotad - Write cached quota changes into the quota file
1526 * @data: Pointer to GFS2 superblock
1527 *
1528 */
1529
1530int gfs2_quotad(void *data)
1531{
1532	struct gfs2_sbd *sdp = data;
1533	struct gfs2_tune *tune = &sdp->sd_tune;
1534	unsigned long statfs_timeo = 0;
1535	unsigned long quotad_timeo = 0;
1536	unsigned long t = 0;
1537	DEFINE_WAIT(wait);
 
1538
1539	while (!kthread_should_stop()) {
1540
1541		if (gfs2_withdrawn(sdp))
1542			goto bypass;
1543		/* Update the master statfs file */
1544		if (sdp->sd_statfs_force_sync) {
1545			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1546			quotad_error(sdp, "statfs", error);
1547			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1548		}
1549		else
1550			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1551				   	   &statfs_timeo,
1552					   &tune->gt_statfs_quantum);
1553
1554		/* Update quota file */
1555		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1556				   &quotad_timeo, &tune->gt_quota_quantum);
1557
 
 
 
1558		try_to_freeze();
1559
1560bypass:
1561		t = min(quotad_timeo, statfs_timeo);
1562
1563		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1564		if (!sdp->sd_statfs_force_sync)
 
 
 
1565			t -= schedule_timeout(t);
1566		else
1567			t = 0;
1568		finish_wait(&sdp->sd_quota_wait, &wait);
1569	}
1570
1571	return 0;
1572}
1573
1574static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
 
1575{
1576	struct gfs2_sbd *sdp = sb->s_fs_info;
1577
1578	memset(state, 0, sizeof(*state));
 
1579
1580	switch (sdp->sd_args.ar_quota) {
1581	case GFS2_QUOTA_ON:
1582		state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1583		state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1584		fallthrough;
1585	case GFS2_QUOTA_ACCOUNT:
1586		state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1587						  QCI_SYSFILE;
1588		state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1589						  QCI_SYSFILE;
1590		break;
1591	case GFS2_QUOTA_OFF:
1592		break;
1593	}
 
1594	if (sdp->sd_quota_inode) {
1595		state->s_state[USRQUOTA].ino =
1596					GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1597		state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1598	}
1599	state->s_state[USRQUOTA].nextents = 1;	/* unsupported */
1600	state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1601	state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1602	return 0;
1603}
1604
1605static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1606			  struct qc_dqblk *fdq)
1607{
1608	struct gfs2_sbd *sdp = sb->s_fs_info;
1609	struct gfs2_quota_lvb *qlvb;
1610	struct gfs2_quota_data *qd;
1611	struct gfs2_holder q_gh;
1612	int error;
1613
1614	memset(fdq, 0, sizeof(*fdq));
1615
1616	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1617		return -ESRCH; /* Crazy XFS error code */
1618
1619	if ((qid.type != USRQUOTA) &&
1620	    (qid.type != GRPQUOTA))
1621		return -EINVAL;
1622
1623	error = qd_get(sdp, qid, &qd);
1624	if (error)
1625		return error;
1626	error = do_glock(qd, FORCE, &q_gh);
1627	if (error)
1628		goto out;
1629
1630	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1631	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1632	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1633	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
 
 
 
1634
1635	gfs2_glock_dq_uninit(&q_gh);
1636out:
1637	qd_put(qd);
1638	return error;
1639}
1640
1641/* GFS2 only supports a subset of the XFS fields */
1642#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1643
1644static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1645			  struct qc_dqblk *fdq)
1646{
1647	struct gfs2_sbd *sdp = sb->s_fs_info;
1648	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1649	struct gfs2_quota_data *qd;
1650	struct gfs2_holder q_gh, i_gh;
1651	unsigned int data_blocks, ind_blocks;
1652	unsigned int blocks = 0;
1653	int alloc_required;
1654	loff_t offset;
1655	int error;
1656
1657	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1658		return -ESRCH; /* Crazy XFS error code */
1659
1660	if ((qid.type != USRQUOTA) &&
1661	    (qid.type != GRPQUOTA))
1662		return -EINVAL;
1663
1664	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1665		return -EINVAL;
1666
1667	error = qd_get(sdp, qid, &qd);
1668	if (error)
1669		return error;
1670
1671	error = gfs2_qa_get(ip);
1672	if (error)
1673		goto out_put;
1674
1675	inode_lock(&ip->i_inode);
1676	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1677	if (error)
1678		goto out_unlockput;
1679	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1680	if (error)
1681		goto out_q;
1682
1683	/* Check for existing entry, if none then alloc new blocks */
1684	error = update_qd(sdp, qd);
1685	if (error)
1686		goto out_i;
1687
1688	/* If nothing has changed, this is a no-op */
1689	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1690	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1691		fdq->d_fieldmask ^= QC_SPC_SOFT;
1692
1693	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1694	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1695		fdq->d_fieldmask ^= QC_SPC_HARD;
1696
1697	if ((fdq->d_fieldmask & QC_SPACE) &&
1698	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1699		fdq->d_fieldmask ^= QC_SPACE;
1700
1701	if (fdq->d_fieldmask == 0)
1702		goto out_i;
1703
1704	offset = qd2offset(qd);
1705	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1706	if (gfs2_is_stuffed(ip))
1707		alloc_required = 1;
1708	if (alloc_required) {
1709		struct gfs2_alloc_parms ap = { .aflags = 0, };
1710		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1711				       &data_blocks, &ind_blocks);
1712		blocks = 1 + data_blocks + ind_blocks;
1713		ap.target = blocks;
1714		error = gfs2_inplace_reserve(ip, &ap);
1715		if (error)
1716			goto out_i;
1717		blocks += gfs2_rg_blocks(ip, blocks);
1718	}
1719
1720	/* Some quotas span block boundaries and can update two blocks,
1721	   adding an extra block to the transaction to handle such quotas */
1722	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1723	if (error)
1724		goto out_release;
1725
1726	/* Apply changes */
1727	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1728	if (!error)
1729		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1730
1731	gfs2_trans_end(sdp);
1732out_release:
1733	if (alloc_required)
1734		gfs2_inplace_release(ip);
1735out_i:
1736	gfs2_glock_dq_uninit(&i_gh);
1737out_q:
1738	gfs2_glock_dq_uninit(&q_gh);
1739out_unlockput:
1740	gfs2_qa_put(ip);
1741	inode_unlock(&ip->i_inode);
1742out_put:
1743	qd_put(qd);
1744	return error;
1745}
1746
1747const struct quotactl_ops gfs2_quotactl_ops = {
1748	.quota_sync     = gfs2_quota_sync,
1749	.get_state	= gfs2_quota_get_state,
1750	.get_dqblk	= gfs2_get_dqblk,
1751	.set_dqblk	= gfs2_set_dqblk,
1752};
1753
1754void __init gfs2_quota_hash_init(void)
1755{
1756	unsigned i;
1757
1758	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1759		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1760}
v3.15
 
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10/*
  11 * Quota change tags are associated with each transaction that allocates or
  12 * deallocates space.  Those changes are accumulated locally to each node (in a
  13 * per-node file) and then are periodically synced to the quota file.  This
  14 * avoids the bottleneck of constantly touching the quota file, but introduces
  15 * fuzziness in the current usage value of IDs that are being used on different
  16 * nodes in the cluster simultaneously.  So, it is possible for a user on
  17 * multiple nodes to overrun their quota, but that overrun is controlable.
  18 * Since quota tags are part of transactions, there is no need for a quota check
  19 * program to be run on node crashes or anything like that.
  20 *
  21 * There are couple of knobs that let the administrator manage the quota
  22 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  23 * sitting on one node before being synced to the quota file.  (The default is
  24 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  25 * of quota file syncs increases as the user moves closer to their limit.  The
  26 * more frequent the syncs, the more accurate the quota enforcement, but that
  27 * means that there is more contention between the nodes for the quota file.
  28 * The default value is one.  This sets the maximum theoretical quota overrun
  29 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  30 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  31 * number greater than one makes quota syncs more frequent and reduces the
  32 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  33 * syncs less frequent.
  34 *
  35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  36 * the quota file, so it is not being constantly read.
  37 */
  38
  39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  40
  41#include <linux/sched.h>
  42#include <linux/slab.h>
  43#include <linux/mm.h>
  44#include <linux/spinlock.h>
  45#include <linux/completion.h>
  46#include <linux/buffer_head.h>
  47#include <linux/sort.h>
  48#include <linux/fs.h>
  49#include <linux/bio.h>
  50#include <linux/gfs2_ondisk.h>
  51#include <linux/kthread.h>
  52#include <linux/freezer.h>
  53#include <linux/quota.h>
  54#include <linux/dqblk_xfs.h>
  55#include <linux/lockref.h>
  56#include <linux/list_lru.h>
  57#include <linux/rcupdate.h>
  58#include <linux/rculist_bl.h>
  59#include <linux/bit_spinlock.h>
  60#include <linux/jhash.h>
  61#include <linux/vmalloc.h>
  62
  63#include "gfs2.h"
  64#include "incore.h"
  65#include "bmap.h"
  66#include "glock.h"
  67#include "glops.h"
  68#include "log.h"
  69#include "meta_io.h"
  70#include "quota.h"
  71#include "rgrp.h"
  72#include "super.h"
  73#include "trans.h"
  74#include "inode.h"
  75#include "util.h"
  76
  77#define GFS2_QD_HASH_SHIFT      12
  78#define GFS2_QD_HASH_SIZE       (1 << GFS2_QD_HASH_SHIFT)
  79#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
  80
  81/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  82/*                     -> sd_bitmap_lock                              */
  83static DEFINE_SPINLOCK(qd_lock);
  84struct list_lru gfs2_qd_lru;
  85
  86static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  87
  88static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  89				 const struct kqid qid)
  90{
  91	unsigned int h;
  92
  93	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  94	h = jhash(&qid, sizeof(struct kqid), h);
  95
  96	return h & GFS2_QD_HASH_MASK;
  97}
  98
  99static inline void spin_lock_bucket(unsigned int hash)
 100{
 101        hlist_bl_lock(&qd_hash_table[hash]);
 102}
 103
 104static inline void spin_unlock_bucket(unsigned int hash)
 105{
 106        hlist_bl_unlock(&qd_hash_table[hash]);
 107}
 108
 109static void gfs2_qd_dealloc(struct rcu_head *rcu)
 110{
 111	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
 112	kmem_cache_free(gfs2_quotad_cachep, qd);
 113}
 114
 115static void gfs2_qd_dispose(struct list_head *list)
 116{
 117	struct gfs2_quota_data *qd;
 118	struct gfs2_sbd *sdp;
 119
 120	while (!list_empty(list)) {
 121		qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
 122		sdp = qd->qd_gl->gl_sbd;
 123
 124		list_del(&qd->qd_lru);
 125
 126		/* Free from the filesystem-specific list */
 127		spin_lock(&qd_lock);
 128		list_del(&qd->qd_list);
 129		spin_unlock(&qd_lock);
 130
 131		spin_lock_bucket(qd->qd_hash);
 132		hlist_bl_del_rcu(&qd->qd_hlist);
 133		spin_unlock_bucket(qd->qd_hash);
 134
 135		gfs2_assert_warn(sdp, !qd->qd_change);
 136		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 137		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 138
 139		gfs2_glock_put(qd->qd_gl);
 140		atomic_dec(&sdp->sd_quota_count);
 141
 142		/* Delete it from the common reclaim list */
 143		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 144	}
 145}
 146
 147
 148static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
 
 149{
 150	struct list_head *dispose = arg;
 151	struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
 152
 153	if (!spin_trylock(&qd->qd_lockref.lock))
 154		return LRU_SKIP;
 155
 156	if (qd->qd_lockref.count == 0) {
 157		lockref_mark_dead(&qd->qd_lockref);
 158		list_move(&qd->qd_lru, dispose);
 159	}
 160
 161	spin_unlock(&qd->qd_lockref.lock);
 162	return LRU_REMOVED;
 163}
 164
 165static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
 166					 struct shrink_control *sc)
 167{
 168	LIST_HEAD(dispose);
 169	unsigned long freed;
 170
 171	if (!(sc->gfp_mask & __GFP_FS))
 172		return SHRINK_STOP;
 173
 174	freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
 175				   &dispose, &sc->nr_to_scan);
 176
 177	gfs2_qd_dispose(&dispose);
 178
 179	return freed;
 180}
 181
 182static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 183					  struct shrink_control *sc)
 184{
 185	return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
 186}
 187
 188struct shrinker gfs2_qd_shrinker = {
 189	.count_objects = gfs2_qd_shrink_count,
 190	.scan_objects = gfs2_qd_shrink_scan,
 191	.seeks = DEFAULT_SEEKS,
 192	.flags = SHRINKER_NUMA_AWARE,
 193};
 194
 195
 196static u64 qd2index(struct gfs2_quota_data *qd)
 197{
 198	struct kqid qid = qd->qd_id;
 199	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 200		((qid.type == USRQUOTA) ? 0 : 1);
 201}
 202
 203static u64 qd2offset(struct gfs2_quota_data *qd)
 204{
 205	u64 offset;
 206
 207	offset = qd2index(qd);
 208	offset *= sizeof(struct gfs2_quota);
 209
 210	return offset;
 211}
 212
 213static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
 214{
 215	struct gfs2_quota_data *qd;
 216	int error;
 217
 218	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 219	if (!qd)
 220		return NULL;
 221
 222	qd->qd_sbd = sdp;
 223	qd->qd_lockref.count = 1;
 224	spin_lock_init(&qd->qd_lockref.lock);
 225	qd->qd_id = qid;
 226	qd->qd_slot = -1;
 227	INIT_LIST_HEAD(&qd->qd_lru);
 228	qd->qd_hash = hash;
 229
 230	error = gfs2_glock_get(sdp, qd2index(qd),
 231			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 232	if (error)
 233		goto fail;
 234
 235	return qd;
 236
 237fail:
 238	kmem_cache_free(gfs2_quotad_cachep, qd);
 239	return NULL;
 240}
 241
 242static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
 243						     const struct gfs2_sbd *sdp,
 244						     struct kqid qid)
 245{
 246	struct gfs2_quota_data *qd;
 247	struct hlist_bl_node *h;
 248
 249	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
 250		if (!qid_eq(qd->qd_id, qid))
 251			continue;
 252		if (qd->qd_sbd != sdp)
 253			continue;
 254		if (lockref_get_not_dead(&qd->qd_lockref)) {
 255			list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
 256			return qd;
 257		}
 258	}
 259
 260	return NULL;
 261}
 262
 263
 264static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 265		  struct gfs2_quota_data **qdp)
 266{
 267	struct gfs2_quota_data *qd, *new_qd;
 268	unsigned int hash = gfs2_qd_hash(sdp, qid);
 269
 270	rcu_read_lock();
 271	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 272	rcu_read_unlock();
 273
 274	if (qd)
 275		return 0;
 276
 277	new_qd = qd_alloc(hash, sdp, qid);
 278	if (!new_qd)
 279		return -ENOMEM;
 280
 281	spin_lock(&qd_lock);
 282	spin_lock_bucket(hash);
 283	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 284	if (qd == NULL) {
 285		*qdp = new_qd;
 286		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
 287		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
 288		atomic_inc(&sdp->sd_quota_count);
 289	}
 290	spin_unlock_bucket(hash);
 291	spin_unlock(&qd_lock);
 292
 293	if (qd) {
 294		gfs2_glock_put(new_qd->qd_gl);
 295		kmem_cache_free(gfs2_quotad_cachep, new_qd);
 296	}
 297
 298	return 0;
 299}
 300
 301
 302static void qd_hold(struct gfs2_quota_data *qd)
 303{
 304	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 305	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
 306	lockref_get(&qd->qd_lockref);
 307}
 308
 309static void qd_put(struct gfs2_quota_data *qd)
 310{
 311	if (lockref_put_or_lock(&qd->qd_lockref))
 312		return;
 313
 314	qd->qd_lockref.count = 0;
 315	list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
 316	spin_unlock(&qd->qd_lockref.lock);
 317
 318}
 319
 320static int slot_get(struct gfs2_quota_data *qd)
 321{
 322	struct gfs2_sbd *sdp = qd->qd_sbd;
 323	unsigned int bit;
 324	int error = 0;
 325
 326	spin_lock(&sdp->sd_bitmap_lock);
 327	if (qd->qd_slot_count != 0)
 328		goto out;
 329
 330	error = -ENOSPC;
 331	bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
 332	if (bit < sdp->sd_quota_slots) {
 333		set_bit(bit, sdp->sd_quota_bitmap);
 334		qd->qd_slot = bit;
 335		error = 0;
 336out:
 337		qd->qd_slot_count++;
 338	}
 339	spin_unlock(&sdp->sd_bitmap_lock);
 340
 341	return error;
 342}
 343
 344static void slot_hold(struct gfs2_quota_data *qd)
 345{
 346	struct gfs2_sbd *sdp = qd->qd_sbd;
 347
 348	spin_lock(&sdp->sd_bitmap_lock);
 349	gfs2_assert(sdp, qd->qd_slot_count);
 350	qd->qd_slot_count++;
 351	spin_unlock(&sdp->sd_bitmap_lock);
 352}
 353
 354static void slot_put(struct gfs2_quota_data *qd)
 355{
 356	struct gfs2_sbd *sdp = qd->qd_sbd;
 357
 358	spin_lock(&sdp->sd_bitmap_lock);
 359	gfs2_assert(sdp, qd->qd_slot_count);
 360	if (!--qd->qd_slot_count) {
 361		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
 362		qd->qd_slot = -1;
 363	}
 364	spin_unlock(&sdp->sd_bitmap_lock);
 365}
 366
 367static int bh_get(struct gfs2_quota_data *qd)
 368{
 369	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 370	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 
 371	unsigned int block, offset;
 372	struct buffer_head *bh;
 
 373	int error;
 374	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 375
 376	mutex_lock(&sdp->sd_quota_mutex);
 377
 378	if (qd->qd_bh_count++) {
 379		mutex_unlock(&sdp->sd_quota_mutex);
 380		return 0;
 381	}
 382
 383	block = qd->qd_slot / sdp->sd_qc_per_block;
 384	offset = qd->qd_slot % sdp->sd_qc_per_block;
 385
 386	bh_map.b_size = 1 << ip->i_inode.i_blkbits;
 387	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
 
 388	if (error)
 389		goto fail;
 390	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
 
 
 
 
 
 391	if (error)
 392		goto fail;
 393	error = -EIO;
 394	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 395		goto fail_brelse;
 396
 397	qd->qd_bh = bh;
 398	qd->qd_bh_qc = (struct gfs2_quota_change *)
 399		(bh->b_data + sizeof(struct gfs2_meta_header) +
 400		 offset * sizeof(struct gfs2_quota_change));
 401
 402	mutex_unlock(&sdp->sd_quota_mutex);
 403
 404	return 0;
 405
 406fail_brelse:
 407	brelse(bh);
 408fail:
 409	qd->qd_bh_count--;
 410	mutex_unlock(&sdp->sd_quota_mutex);
 411	return error;
 412}
 413
 414static void bh_put(struct gfs2_quota_data *qd)
 415{
 416	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 417
 418	mutex_lock(&sdp->sd_quota_mutex);
 419	gfs2_assert(sdp, qd->qd_bh_count);
 420	if (!--qd->qd_bh_count) {
 421		brelse(qd->qd_bh);
 422		qd->qd_bh = NULL;
 423		qd->qd_bh_qc = NULL;
 424	}
 425	mutex_unlock(&sdp->sd_quota_mutex);
 426}
 427
 428static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 429			 u64 *sync_gen)
 430{
 431	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 432	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 433	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
 434		return 0;
 435
 436	if (!lockref_get_not_dead(&qd->qd_lockref))
 437		return 0;
 438
 439	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 440	set_bit(QDF_LOCKED, &qd->qd_flags);
 441	qd->qd_change_sync = qd->qd_change;
 442	slot_hold(qd);
 443	return 1;
 444}
 445
 446static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 447{
 448	struct gfs2_quota_data *qd = NULL;
 449	int error;
 450	int found = 0;
 451
 452	*qdp = NULL;
 453
 454	if (sdp->sd_vfs->s_flags & MS_RDONLY)
 455		return 0;
 456
 457	spin_lock(&qd_lock);
 458
 459	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 460		found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
 461		if (found)
 462			break;
 
 463	}
 464
 465	if (!found)
 466		qd = NULL;
 467
 468	spin_unlock(&qd_lock);
 469
 470	if (qd) {
 471		gfs2_assert_warn(sdp, qd->qd_change_sync);
 472		error = bh_get(qd);
 473		if (error) {
 474			clear_bit(QDF_LOCKED, &qd->qd_flags);
 475			slot_put(qd);
 476			qd_put(qd);
 477			return error;
 478		}
 479	}
 480
 481	*qdp = qd;
 482
 483	return 0;
 484}
 485
 486static void qd_unlock(struct gfs2_quota_data *qd)
 487{
 488	gfs2_assert_warn(qd->qd_gl->gl_sbd,
 489			 test_bit(QDF_LOCKED, &qd->qd_flags));
 490	clear_bit(QDF_LOCKED, &qd->qd_flags);
 491	bh_put(qd);
 492	slot_put(qd);
 493	qd_put(qd);
 494}
 495
 496static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 497		    struct gfs2_quota_data **qdp)
 498{
 499	int error;
 500
 501	error = qd_get(sdp, qid, qdp);
 502	if (error)
 503		return error;
 504
 505	error = slot_get(*qdp);
 506	if (error)
 507		goto fail;
 508
 509	error = bh_get(*qdp);
 510	if (error)
 511		goto fail_slot;
 512
 513	return 0;
 514
 515fail_slot:
 516	slot_put(*qdp);
 517fail:
 518	qd_put(*qdp);
 519	return error;
 520}
 521
 522static void qdsb_put(struct gfs2_quota_data *qd)
 523{
 524	bh_put(qd);
 525	slot_put(qd);
 526	qd_put(qd);
 527}
 528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 530{
 531	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 532	struct gfs2_quota_data **qd;
 533	int error;
 534
 535	if (ip->i_res == NULL) {
 536		error = gfs2_rs_alloc(ip);
 537		if (error)
 538			return error;
 539	}
 540
 541	qd = ip->i_res->rs_qa_qd;
 
 
 542
 543	if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) ||
 544	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
 545		return -EIO;
 546
 547	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 548		return 0;
 
 
 
 549
 550	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 551	if (error)
 552		goto out;
 553	ip->i_res->rs_qa_qd_num++;
 554	qd++;
 555
 556	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 557	if (error)
 558		goto out;
 559	ip->i_res->rs_qa_qd_num++;
 560	qd++;
 561
 562	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 563	    !uid_eq(uid, ip->i_inode.i_uid)) {
 564		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 565		if (error)
 566			goto out;
 567		ip->i_res->rs_qa_qd_num++;
 568		qd++;
 569	}
 570
 571	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 572	    !gid_eq(gid, ip->i_inode.i_gid)) {
 573		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 574		if (error)
 575			goto out;
 576		ip->i_res->rs_qa_qd_num++;
 577		qd++;
 578	}
 579
 580out:
 581	if (error)
 582		gfs2_quota_unhold(ip);
 
 583	return error;
 584}
 585
 586void gfs2_quota_unhold(struct gfs2_inode *ip)
 587{
 588	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 589	unsigned int x;
 590
 591	if (ip->i_res == NULL)
 592		return;
 
 593	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 594
 595	for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
 596		qdsb_put(ip->i_res->rs_qa_qd[x]);
 597		ip->i_res->rs_qa_qd[x] = NULL;
 598	}
 599	ip->i_res->rs_qa_qd_num = 0;
 
 600}
 601
 602static int sort_qd(const void *a, const void *b)
 603{
 604	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 605	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 606
 607	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 608		return -1;
 609	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 610		return 1;
 611	return 0;
 612}
 613
 614static void do_qc(struct gfs2_quota_data *qd, s64 change)
 615{
 616	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 617	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 618	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 619	s64 x;
 620
 621	mutex_lock(&sdp->sd_quota_mutex);
 622	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 623
 624	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 625		qc->qc_change = 0;
 626		qc->qc_flags = 0;
 627		if (qd->qd_id.type == USRQUOTA)
 628			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 629		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 630	}
 631
 632	x = be64_to_cpu(qc->qc_change) + change;
 633	qc->qc_change = cpu_to_be64(x);
 634
 635	spin_lock(&qd_lock);
 636	qd->qd_change = x;
 637	spin_unlock(&qd_lock);
 638
 639	if (!x) {
 640		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 641		clear_bit(QDF_CHANGE, &qd->qd_flags);
 642		qc->qc_flags = 0;
 643		qc->qc_id = 0;
 644		slot_put(qd);
 645		qd_put(qd);
 646	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 647		qd_hold(qd);
 648		slot_hold(qd);
 649	}
 650
 
 
 651	mutex_unlock(&sdp->sd_quota_mutex);
 652}
 653
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654/**
 655 * gfs2_adjust_quota - adjust record of current block usage
 656 * @ip: The quota inode
 657 * @loc: Offset of the entry in the quota file
 658 * @change: The amount of usage change to record
 659 * @qd: The quota data
 660 * @fdq: The updated limits to record
 661 *
 662 * This function was mostly borrowed from gfs2_block_truncate_page which was
 663 * in turn mostly borrowed from ext3
 664 *
 665 * Returns: 0 or -ve on error
 666 */
 667
 668static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 669			     s64 change, struct gfs2_quota_data *qd,
 670			     struct fs_disk_quota *fdq)
 671{
 672	struct inode *inode = &ip->i_inode;
 673	struct gfs2_sbd *sdp = GFS2_SB(inode);
 674	struct address_space *mapping = inode->i_mapping;
 675	unsigned long index = loc >> PAGE_CACHE_SHIFT;
 676	unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
 677	unsigned blocksize, iblock, pos;
 678	struct buffer_head *bh;
 679	struct page *page;
 680	void *kaddr, *ptr;
 681	struct gfs2_quota q;
 682	int err, nbytes;
 683	u64 size;
 684
 685	if (gfs2_is_stuffed(ip)) {
 686		err = gfs2_unstuff_dinode(ip, NULL);
 687		if (err)
 688			return err;
 689	}
 690
 691	memset(&q, 0, sizeof(struct gfs2_quota));
 692	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 693	if (err < 0)
 694		return err;
 695
 
 696	err = -EIO;
 697	be64_add_cpu(&q.qu_value, change);
 
 
 698	qd->qd_qb.qb_value = q.qu_value;
 699	if (fdq) {
 700		if (fdq->d_fieldmask & FS_DQ_BSOFT) {
 701			q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
 702			qd->qd_qb.qb_warn = q.qu_warn;
 703		}
 704		if (fdq->d_fieldmask & FS_DQ_BHARD) {
 705			q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
 706			qd->qd_qb.qb_limit = q.qu_limit;
 707		}
 708		if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
 709			q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
 710			qd->qd_qb.qb_value = q.qu_value;
 711		}
 712	}
 713
 714	/* Write the quota into the quota file on disk */
 715	ptr = &q;
 716	nbytes = sizeof(struct gfs2_quota);
 717get_a_page:
 718	page = find_or_create_page(mapping, index, GFP_NOFS);
 719	if (!page)
 720		return -ENOMEM;
 721
 722	blocksize = inode->i_sb->s_blocksize;
 723	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 724
 725	if (!page_has_buffers(page))
 726		create_empty_buffers(page, blocksize, 0);
 727
 728	bh = page_buffers(page);
 729	pos = blocksize;
 730	while (offset >= pos) {
 731		bh = bh->b_this_page;
 732		iblock++;
 733		pos += blocksize;
 734	}
 735
 736	if (!buffer_mapped(bh)) {
 737		gfs2_block_map(inode, iblock, bh, 1);
 738		if (!buffer_mapped(bh))
 739			goto unlock_out;
 740		/* If it's a newly allocated disk block for quota, zero it */
 741		if (buffer_new(bh))
 742			zero_user(page, pos - blocksize, bh->b_size);
 743	}
 744
 745	if (PageUptodate(page))
 746		set_buffer_uptodate(bh);
 747
 748	if (!buffer_uptodate(bh)) {
 749		ll_rw_block(READ | REQ_META, 1, &bh);
 750		wait_on_buffer(bh);
 751		if (!buffer_uptodate(bh))
 752			goto unlock_out;
 753	}
 754
 755	gfs2_trans_add_data(ip->i_gl, bh);
 756
 757	kaddr = kmap_atomic(page);
 758	if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
 759		nbytes = PAGE_CACHE_SIZE - offset;
 760	memcpy(kaddr + offset, ptr, nbytes);
 761	flush_dcache_page(page);
 762	kunmap_atomic(kaddr);
 763	unlock_page(page);
 764	page_cache_release(page);
 765
 766	/* If quota straddles page boundary, we need to update the rest of the
 767	 * quota at the beginning of the next page */
 768	if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
 769		ptr = ptr + nbytes;
 770		nbytes = sizeof(struct gfs2_quota) - nbytes;
 771		offset = 0;
 772		index++;
 773		goto get_a_page;
 774	}
 775
 776	size = loc + sizeof(struct gfs2_quota);
 777	if (size > inode->i_size)
 778		i_size_write(inode, size);
 779	inode->i_mtime = inode->i_atime = CURRENT_TIME;
 780	mark_inode_dirty(inode);
 781	return 0;
 782
 783unlock_out:
 784	unlock_page(page);
 785	page_cache_release(page);
 786	return err;
 787}
 788
 789static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 790{
 791	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
 792	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 793	struct gfs2_alloc_parms ap = { .aflags = 0, };
 794	unsigned int data_blocks, ind_blocks;
 795	struct gfs2_holder *ghs, i_gh;
 796	unsigned int qx, x;
 797	struct gfs2_quota_data *qd;
 798	unsigned reserved;
 799	loff_t offset;
 800	unsigned int nalloc = 0, blocks;
 801	int error;
 802
 803	error = gfs2_rs_alloc(ip);
 804	if (error)
 805		return error;
 806
 807	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 808			      &data_blocks, &ind_blocks);
 809
 810	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 811	if (!ghs)
 812		return -ENOMEM;
 
 
 813
 814	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 815	mutex_lock(&ip->i_inode.i_mutex);
 816	for (qx = 0; qx < num_qd; qx++) {
 817		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 818					   GL_NOCACHE, &ghs[qx]);
 819		if (error)
 820			goto out;
 821	}
 822
 823	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 824	if (error)
 825		goto out;
 826
 827	for (x = 0; x < num_qd; x++) {
 828		offset = qd2offset(qda[x]);
 829		if (gfs2_write_alloc_required(ip, offset,
 830					      sizeof(struct gfs2_quota)))
 831			nalloc++;
 832	}
 833
 834	/* 
 835	 * 1 blk for unstuffing inode if stuffed. We add this extra
 836	 * block to the reservation unconditionally. If the inode
 837	 * doesn't need unstuffing, the block will be released to the 
 838	 * rgrp since it won't be allocated during the transaction
 839	 */
 840	/* +3 in the end for unstuffing block, inode size update block
 841	 * and another block in case quota straddles page boundary and 
 842	 * two blocks need to be updated instead of 1 */
 843	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 844
 845	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 846	ap.target = reserved;
 847	error = gfs2_inplace_reserve(ip, &ap);
 848	if (error)
 849		goto out_alloc;
 850
 851	if (nalloc)
 852		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 853
 854	error = gfs2_trans_begin(sdp, blocks, 0);
 855	if (error)
 856		goto out_ipres;
 857
 858	for (x = 0; x < num_qd; x++) {
 859		qd = qda[x];
 860		offset = qd2offset(qd);
 861		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 862		if (error)
 863			goto out_end_trans;
 864
 865		do_qc(qd, -qd->qd_change_sync);
 866		set_bit(QDF_REFRESH, &qd->qd_flags);
 867	}
 868
 869	error = 0;
 870
 871out_end_trans:
 872	gfs2_trans_end(sdp);
 873out_ipres:
 874	gfs2_inplace_release(ip);
 875out_alloc:
 876	gfs2_glock_dq_uninit(&i_gh);
 877out:
 878	while (qx--)
 879		gfs2_glock_dq_uninit(&ghs[qx]);
 880	mutex_unlock(&ip->i_inode.i_mutex);
 881	kfree(ghs);
 882	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
 
 
 
 883	return error;
 884}
 885
 886static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 887{
 888	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 889	struct gfs2_quota q;
 890	struct gfs2_quota_lvb *qlvb;
 891	loff_t pos;
 892	int error;
 893
 894	memset(&q, 0, sizeof(struct gfs2_quota));
 895	pos = qd2offset(qd);
 896	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 897	if (error < 0)
 898		return error;
 899
 900	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 901	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 902	qlvb->__pad = 0;
 903	qlvb->qb_limit = q.qu_limit;
 904	qlvb->qb_warn = q.qu_warn;
 905	qlvb->qb_value = q.qu_value;
 906	qd->qd_qb = *qlvb;
 907
 908	return 0;
 909}
 910
 911static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
 912		    struct gfs2_holder *q_gh)
 913{
 914	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 915	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 916	struct gfs2_holder i_gh;
 917	int error;
 918
 919restart:
 920	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
 921	if (error)
 922		return error;
 923
 
 
 
 924	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 925
 926	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
 927		gfs2_glock_dq_uninit(q_gh);
 928		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
 929					   GL_NOCACHE, q_gh);
 930		if (error)
 931			return error;
 932
 933		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
 934		if (error)
 935			goto fail;
 936
 937		error = update_qd(sdp, qd);
 938		if (error)
 939			goto fail_gunlock;
 940
 941		gfs2_glock_dq_uninit(&i_gh);
 942		gfs2_glock_dq_uninit(q_gh);
 943		force_refresh = 0;
 944		goto restart;
 945	}
 946
 947	return 0;
 948
 949fail_gunlock:
 950	gfs2_glock_dq_uninit(&i_gh);
 951fail:
 952	gfs2_glock_dq_uninit(q_gh);
 953	return error;
 954}
 955
 956int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 957{
 958	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 959	struct gfs2_quota_data *qd;
 960	unsigned int x;
 961	int error = 0;
 962
 
 
 
 963	error = gfs2_quota_hold(ip, uid, gid);
 964	if (error)
 965		return error;
 966
 967	if (capable(CAP_SYS_RESOURCE) ||
 968	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
 969		return 0;
 970
 971	sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num,
 972	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 973
 974	for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
 975		int force = NO_FORCE;
 976		qd = ip->i_res->rs_qa_qd[x];
 977		if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
 978			force = FORCE;
 979		error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]);
 980		if (error)
 981			break;
 982	}
 983
 984	if (!error)
 985		set_bit(GIF_QD_LOCKED, &ip->i_flags);
 986	else {
 987		while (x--)
 988			gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
 989		gfs2_quota_unhold(ip);
 990	}
 991
 992	return error;
 993}
 994
 995static int need_sync(struct gfs2_quota_data *qd)
 996{
 997	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 998	struct gfs2_tune *gt = &sdp->sd_tune;
 999	s64 value;
1000	unsigned int num, den;
1001	int do_sync = 1;
1002
1003	if (!qd->qd_qb.qb_limit)
1004		return 0;
1005
1006	spin_lock(&qd_lock);
1007	value = qd->qd_change;
1008	spin_unlock(&qd_lock);
1009
1010	spin_lock(&gt->gt_spin);
1011	num = gt->gt_quota_scale_num;
1012	den = gt->gt_quota_scale_den;
1013	spin_unlock(&gt->gt_spin);
1014
1015	if (value < 0)
1016		do_sync = 0;
1017	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1018		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1019		do_sync = 0;
1020	else {
1021		value *= gfs2_jindex_size(sdp) * num;
1022		value = div_s64(value, den);
1023		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1024		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1025			do_sync = 0;
1026	}
1027
1028	return do_sync;
1029}
1030
1031void gfs2_quota_unlock(struct gfs2_inode *ip)
1032{
1033	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1034	struct gfs2_quota_data *qda[4];
1035	unsigned int count = 0;
1036	unsigned int x;
1037	int found;
1038
1039	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1040		goto out;
1041
1042	for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1043		struct gfs2_quota_data *qd;
1044		int sync;
1045
1046		qd = ip->i_res->rs_qa_qd[x];
1047		sync = need_sync(qd);
1048
1049		gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
1050		if (!sync)
1051			continue;
1052
1053		spin_lock(&qd_lock);
1054		found = qd_check_sync(sdp, qd, NULL);
1055		spin_unlock(&qd_lock);
1056
1057		if (!found)
1058			continue;
1059
1060		gfs2_assert_warn(sdp, qd->qd_change_sync);
1061		if (bh_get(qd)) {
1062			clear_bit(QDF_LOCKED, &qd->qd_flags);
1063			slot_put(qd);
1064			qd_put(qd);
1065			continue;
1066		}
1067
1068		qda[count++] = qd;
1069	}
1070
1071	if (count) {
1072		do_sync(count, qda);
1073		for (x = 0; x < count; x++)
1074			qd_unlock(qda[x]);
1075	}
1076
1077out:
1078	gfs2_quota_unhold(ip);
1079}
1080
1081#define MAX_LINE 256
1082
1083static int print_message(struct gfs2_quota_data *qd, char *type)
1084{
1085	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
1086
1087	fs_info(sdp, "quota %s for %s %u\n",
1088		type,
1089		(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1090		from_kqid(&init_user_ns, qd->qd_id));
1091
1092	return 0;
1093}
1094
1095int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1096{
1097	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1098	struct gfs2_quota_data *qd;
1099	s64 value;
1100	unsigned int x;
1101	int error = 0;
1102
 
1103	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1104		return 0;
1105
1106        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1107                return 0;
1108
1109	for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1110		qd = ip->i_res->rs_qa_qd[x];
1111
1112		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1113		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1114			continue;
1115
 
 
1116		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1117		spin_lock(&qd_lock);
1118		value += qd->qd_change;
1119		spin_unlock(&qd_lock);
1120
1121		if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1122			print_message(qd, "exceeded");
1123			quota_send_warning(qd->qd_id,
1124					   sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1125
1126			error = -EDQUOT;
1127			break;
1128		} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1129			   (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
 
 
 
 
 
 
 
 
 
1130			   time_after_eq(jiffies, qd->qd_last_warn +
1131					 gfs2_tune_get(sdp,
1132						gt_quota_warn_period) * HZ)) {
1133			quota_send_warning(qd->qd_id,
1134					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1135			error = print_message(qd, "warning");
1136			qd->qd_last_warn = jiffies;
1137		}
1138	}
1139
1140	return error;
1141}
1142
1143void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1144		       kuid_t uid, kgid_t gid)
1145{
1146	struct gfs2_quota_data *qd;
1147	unsigned int x;
 
1148
1149	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
 
1150		return;
1151	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1152		return;
1153
1154	for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1155		qd = ip->i_res->rs_qa_qd[x];
 
 
 
1156
1157		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1158		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1159			do_qc(qd, change);
1160		}
1161	}
1162}
1163
1164int gfs2_quota_sync(struct super_block *sb, int type)
1165{
1166	struct gfs2_sbd *sdp = sb->s_fs_info;
1167	struct gfs2_quota_data **qda;
1168	unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
1169	unsigned int num_qd;
1170	unsigned int x;
1171	int error = 0;
1172
1173	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1174	if (!qda)
1175		return -ENOMEM;
1176
1177	mutex_lock(&sdp->sd_quota_sync_mutex);
1178	sdp->sd_quota_sync_gen++;
1179
1180	do {
1181		num_qd = 0;
1182
1183		for (;;) {
1184			error = qd_fish(sdp, qda + num_qd);
1185			if (error || !qda[num_qd])
1186				break;
1187			if (++num_qd == max_qd)
1188				break;
1189		}
1190
1191		if (num_qd) {
1192			if (!error)
1193				error = do_sync(num_qd, qda);
1194			if (!error)
1195				for (x = 0; x < num_qd; x++)
1196					qda[x]->qd_sync_gen =
1197						sdp->sd_quota_sync_gen;
1198
1199			for (x = 0; x < num_qd; x++)
1200				qd_unlock(qda[x]);
1201		}
1202	} while (!error && num_qd == max_qd);
1203
1204	mutex_unlock(&sdp->sd_quota_sync_mutex);
1205	kfree(qda);
1206
1207	return error;
1208}
1209
1210int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1211{
1212	struct gfs2_quota_data *qd;
1213	struct gfs2_holder q_gh;
1214	int error;
1215
1216	error = qd_get(sdp, qid, &qd);
1217	if (error)
1218		return error;
1219
1220	error = do_glock(qd, FORCE, &q_gh);
1221	if (!error)
1222		gfs2_glock_dq_uninit(&q_gh);
1223
1224	qd_put(qd);
1225	return error;
1226}
1227
1228int gfs2_quota_init(struct gfs2_sbd *sdp)
1229{
1230	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1231	u64 size = i_size_read(sdp->sd_qc_inode);
1232	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1233	unsigned int x, slot = 0;
1234	unsigned int found = 0;
1235	unsigned int hash;
1236	unsigned int bm_size;
1237	u64 dblock;
1238	u32 extlen = 0;
1239	int error;
1240
1241	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1242		return -EIO;
1243
1244	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1245	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1246	bm_size *= sizeof(unsigned long);
1247	error = -ENOMEM;
1248	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1249	if (sdp->sd_quota_bitmap == NULL)
1250		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1251						 __GFP_ZERO, PAGE_KERNEL);
1252	if (!sdp->sd_quota_bitmap)
1253		return error;
1254
1255	for (x = 0; x < blocks; x++) {
1256		struct buffer_head *bh;
1257		const struct gfs2_quota_change *qc;
1258		unsigned int y;
1259
1260		if (!extlen) {
1261			int new = 0;
1262			error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1263			if (error)
1264				goto fail;
1265		}
1266		error = -EIO;
1267		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1268		if (!bh)
1269			goto fail;
1270		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1271			brelse(bh);
1272			goto fail;
1273		}
1274
1275		qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1276		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1277		     y++, slot++) {
1278			struct gfs2_quota_data *qd;
1279			s64 qc_change = be64_to_cpu(qc->qc_change);
1280			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1281			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1282						USRQUOTA : GRPQUOTA;
1283			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1284						      be32_to_cpu(qc->qc_id));
1285			qc++;
1286			if (!qc_change)
1287				continue;
1288
1289			hash = gfs2_qd_hash(sdp, qc_id);
1290			qd = qd_alloc(hash, sdp, qc_id);
1291			if (qd == NULL) {
1292				brelse(bh);
1293				goto fail;
1294			}
1295
1296			set_bit(QDF_CHANGE, &qd->qd_flags);
1297			qd->qd_change = qc_change;
1298			qd->qd_slot = slot;
1299			qd->qd_slot_count = 1;
1300
1301			spin_lock(&qd_lock);
1302			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1303			list_add(&qd->qd_list, &sdp->sd_quota_list);
1304			atomic_inc(&sdp->sd_quota_count);
1305			spin_unlock(&qd_lock);
1306
1307			spin_lock_bucket(hash);
1308			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1309			spin_unlock_bucket(hash);
1310
1311			found++;
1312		}
1313
1314		brelse(bh);
1315		dblock++;
1316		extlen--;
1317	}
1318
1319	if (found)
1320		fs_info(sdp, "found %u quota changes\n", found);
1321
1322	return 0;
1323
1324fail:
1325	gfs2_quota_cleanup(sdp);
1326	return error;
1327}
1328
1329void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1330{
1331	struct list_head *head = &sdp->sd_quota_list;
1332	struct gfs2_quota_data *qd;
1333
1334	spin_lock(&qd_lock);
1335	while (!list_empty(head)) {
1336		qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1337
1338		list_del(&qd->qd_list);
1339
1340		/* Also remove if this qd exists in the reclaim list */
1341		list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
1342		atomic_dec(&sdp->sd_quota_count);
1343		spin_unlock(&qd_lock);
1344
1345		spin_lock_bucket(qd->qd_hash);
1346		hlist_bl_del_rcu(&qd->qd_hlist);
1347		spin_unlock_bucket(qd->qd_hash);
1348
1349		gfs2_assert_warn(sdp, !qd->qd_change);
1350		gfs2_assert_warn(sdp, !qd->qd_slot_count);
1351		gfs2_assert_warn(sdp, !qd->qd_bh_count);
1352
1353		gfs2_glock_put(qd->qd_gl);
1354		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
1355
1356		spin_lock(&qd_lock);
1357	}
1358	spin_unlock(&qd_lock);
1359
1360	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1361
1362	if (sdp->sd_quota_bitmap) {
1363		if (is_vmalloc_addr(sdp->sd_quota_bitmap))
1364			vfree(sdp->sd_quota_bitmap);
1365		else
1366			kfree(sdp->sd_quota_bitmap);
1367		sdp->sd_quota_bitmap = NULL;
1368	}
1369}
1370
1371static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1372{
1373	if (error == 0 || error == -EROFS)
1374		return;
1375	if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1376		fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
 
 
 
1377}
1378
1379static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1380			       int (*fxn)(struct super_block *sb, int type),
1381			       unsigned long t, unsigned long *timeo,
1382			       unsigned int *new_timeo)
1383{
1384	if (t >= *timeo) {
1385		int error = fxn(sdp->sd_vfs, 0);
1386		quotad_error(sdp, msg, error);
1387		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1388	} else {
1389		*timeo -= t;
1390	}
1391}
1392
1393static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1394{
1395	struct gfs2_inode *ip;
1396
1397	while(1) {
1398		ip = NULL;
1399		spin_lock(&sdp->sd_trunc_lock);
1400		if (!list_empty(&sdp->sd_trunc_list)) {
1401			ip = list_entry(sdp->sd_trunc_list.next,
1402					struct gfs2_inode, i_trunc_list);
1403			list_del_init(&ip->i_trunc_list);
1404		}
1405		spin_unlock(&sdp->sd_trunc_lock);
1406		if (ip == NULL)
1407			return;
1408		gfs2_glock_finish_truncate(ip);
1409	}
1410}
1411
1412void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1413	if (!sdp->sd_statfs_force_sync) {
1414		sdp->sd_statfs_force_sync = 1;
1415		wake_up(&sdp->sd_quota_wait);
1416	}
1417}
1418
1419
1420/**
1421 * gfs2_quotad - Write cached quota changes into the quota file
1422 * @sdp: Pointer to GFS2 superblock
1423 *
1424 */
1425
1426int gfs2_quotad(void *data)
1427{
1428	struct gfs2_sbd *sdp = data;
1429	struct gfs2_tune *tune = &sdp->sd_tune;
1430	unsigned long statfs_timeo = 0;
1431	unsigned long quotad_timeo = 0;
1432	unsigned long t = 0;
1433	DEFINE_WAIT(wait);
1434	int empty;
1435
1436	while (!kthread_should_stop()) {
1437
 
 
1438		/* Update the master statfs file */
1439		if (sdp->sd_statfs_force_sync) {
1440			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1441			quotad_error(sdp, "statfs", error);
1442			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1443		}
1444		else
1445			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1446				   	   &statfs_timeo,
1447					   &tune->gt_statfs_quantum);
1448
1449		/* Update quota file */
1450		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1451				   &quotad_timeo, &tune->gt_quota_quantum);
1452
1453		/* Check for & recover partially truncated inodes */
1454		quotad_check_trunc_list(sdp);
1455
1456		try_to_freeze();
1457
 
1458		t = min(quotad_timeo, statfs_timeo);
1459
1460		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1461		spin_lock(&sdp->sd_trunc_lock);
1462		empty = list_empty(&sdp->sd_trunc_list);
1463		spin_unlock(&sdp->sd_trunc_lock);
1464		if (empty && !sdp->sd_statfs_force_sync)
1465			t -= schedule_timeout(t);
1466		else
1467			t = 0;
1468		finish_wait(&sdp->sd_quota_wait, &wait);
1469	}
1470
1471	return 0;
1472}
1473
1474static int gfs2_quota_get_xstate(struct super_block *sb,
1475				 struct fs_quota_stat *fqs)
1476{
1477	struct gfs2_sbd *sdp = sb->s_fs_info;
1478
1479	memset(fqs, 0, sizeof(struct fs_quota_stat));
1480	fqs->qs_version = FS_QSTAT_VERSION;
1481
1482	switch (sdp->sd_args.ar_quota) {
1483	case GFS2_QUOTA_ON:
1484		fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
1485		/*FALLTHRU*/
 
1486	case GFS2_QUOTA_ACCOUNT:
1487		fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
 
 
 
1488		break;
1489	case GFS2_QUOTA_OFF:
1490		break;
1491	}
1492
1493	if (sdp->sd_quota_inode) {
1494		fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1495		fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1496	}
1497	fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1498	fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1499	fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
 
1500	return 0;
1501}
1502
1503static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1504			  struct fs_disk_quota *fdq)
1505{
1506	struct gfs2_sbd *sdp = sb->s_fs_info;
1507	struct gfs2_quota_lvb *qlvb;
1508	struct gfs2_quota_data *qd;
1509	struct gfs2_holder q_gh;
1510	int error;
1511
1512	memset(fdq, 0, sizeof(struct fs_disk_quota));
1513
1514	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1515		return -ESRCH; /* Crazy XFS error code */
1516
1517	if ((qid.type != USRQUOTA) &&
1518	    (qid.type != GRPQUOTA))
1519		return -EINVAL;
1520
1521	error = qd_get(sdp, qid, &qd);
1522	if (error)
1523		return error;
1524	error = do_glock(qd, FORCE, &q_gh);
1525	if (error)
1526		goto out;
1527
1528	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1529	fdq->d_version = FS_DQUOT_VERSION;
1530	fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1531	fdq->d_id = from_kqid_munged(current_user_ns(), qid);
1532	fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1533	fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1534	fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1535
1536	gfs2_glock_dq_uninit(&q_gh);
1537out:
1538	qd_put(qd);
1539	return error;
1540}
1541
1542/* GFS2 only supports a subset of the XFS fields */
1543#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1544
1545static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1546			  struct fs_disk_quota *fdq)
1547{
1548	struct gfs2_sbd *sdp = sb->s_fs_info;
1549	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1550	struct gfs2_quota_data *qd;
1551	struct gfs2_holder q_gh, i_gh;
1552	unsigned int data_blocks, ind_blocks;
1553	unsigned int blocks = 0;
1554	int alloc_required;
1555	loff_t offset;
1556	int error;
1557
1558	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1559		return -ESRCH; /* Crazy XFS error code */
1560
1561	if ((qid.type != USRQUOTA) &&
1562	    (qid.type != GRPQUOTA))
1563		return -EINVAL;
1564
1565	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1566		return -EINVAL;
1567
1568	error = qd_get(sdp, qid, &qd);
1569	if (error)
1570		return error;
1571
1572	error = gfs2_rs_alloc(ip);
1573	if (error)
1574		goto out_put;
1575
1576	mutex_lock(&ip->i_inode.i_mutex);
1577	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1578	if (error)
1579		goto out_unlockput;
1580	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1581	if (error)
1582		goto out_q;
1583
1584	/* Check for existing entry, if none then alloc new blocks */
1585	error = update_qd(sdp, qd);
1586	if (error)
1587		goto out_i;
1588
1589	/* If nothing has changed, this is a no-op */
1590	if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1591	    ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1592		fdq->d_fieldmask ^= FS_DQ_BSOFT;
1593
1594	if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1595	    ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1596		fdq->d_fieldmask ^= FS_DQ_BHARD;
1597
1598	if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1599	    ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1600		fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1601
1602	if (fdq->d_fieldmask == 0)
1603		goto out_i;
1604
1605	offset = qd2offset(qd);
1606	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1607	if (gfs2_is_stuffed(ip))
1608		alloc_required = 1;
1609	if (alloc_required) {
1610		struct gfs2_alloc_parms ap = { .aflags = 0, };
1611		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1612				       &data_blocks, &ind_blocks);
1613		blocks = 1 + data_blocks + ind_blocks;
1614		ap.target = blocks;
1615		error = gfs2_inplace_reserve(ip, &ap);
1616		if (error)
1617			goto out_i;
1618		blocks += gfs2_rg_blocks(ip, blocks);
1619	}
1620
1621	/* Some quotas span block boundaries and can update two blocks,
1622	   adding an extra block to the transaction to handle such quotas */
1623	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1624	if (error)
1625		goto out_release;
1626
1627	/* Apply changes */
1628	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
 
 
1629
1630	gfs2_trans_end(sdp);
1631out_release:
1632	if (alloc_required)
1633		gfs2_inplace_release(ip);
1634out_i:
1635	gfs2_glock_dq_uninit(&i_gh);
1636out_q:
1637	gfs2_glock_dq_uninit(&q_gh);
1638out_unlockput:
1639	mutex_unlock(&ip->i_inode.i_mutex);
 
1640out_put:
1641	qd_put(qd);
1642	return error;
1643}
1644
1645const struct quotactl_ops gfs2_quotactl_ops = {
1646	.quota_sync     = gfs2_quota_sync,
1647	.get_xstate     = gfs2_quota_get_xstate,
1648	.get_dqblk	= gfs2_get_dqblk,
1649	.set_dqblk	= gfs2_set_dqblk,
1650};
1651
1652void __init gfs2_quota_hash_init(void)
1653{
1654	unsigned i;
1655
1656	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1657		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1658}