Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7/*
   8 * Quota change tags are associated with each transaction that allocates or
   9 * deallocates space.  Those changes are accumulated locally to each node (in a
  10 * per-node file) and then are periodically synced to the quota file.  This
  11 * avoids the bottleneck of constantly touching the quota file, but introduces
  12 * fuzziness in the current usage value of IDs that are being used on different
  13 * nodes in the cluster simultaneously.  So, it is possible for a user on
  14 * multiple nodes to overrun their quota, but that overrun is controlable.
  15 * Since quota tags are part of transactions, there is no need for a quota check
  16 * program to be run on node crashes or anything like that.
  17 *
  18 * There are couple of knobs that let the administrator manage the quota
  19 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  20 * sitting on one node before being synced to the quota file.  (The default is
  21 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  22 * of quota file syncs increases as the user moves closer to their limit.  The
  23 * more frequent the syncs, the more accurate the quota enforcement, but that
  24 * means that there is more contention between the nodes for the quota file.
  25 * The default value is one.  This sets the maximum theoretical quota overrun
  26 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  27 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  28 * number greater than one makes quota syncs more frequent and reduces the
  29 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  30 * syncs less frequent.
  31 *
  32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  33 * the quota file, so it is not being constantly read.
  34 */
  35
  36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  37
  38#include <linux/sched.h>
  39#include <linux/slab.h>
  40#include <linux/mm.h>
  41#include <linux/spinlock.h>
  42#include <linux/completion.h>
  43#include <linux/buffer_head.h>
  44#include <linux/sort.h>
  45#include <linux/fs.h>
  46#include <linux/bio.h>
  47#include <linux/gfs2_ondisk.h>
  48#include <linux/kthread.h>
  49#include <linux/freezer.h>
  50#include <linux/quota.h>
  51#include <linux/dqblk_xfs.h>
  52#include <linux/lockref.h>
  53#include <linux/list_lru.h>
  54#include <linux/rcupdate.h>
  55#include <linux/rculist_bl.h>
  56#include <linux/bit_spinlock.h>
  57#include <linux/jhash.h>
  58#include <linux/vmalloc.h>
  59
  60#include "gfs2.h"
  61#include "incore.h"
  62#include "bmap.h"
  63#include "glock.h"
  64#include "glops.h"
  65#include "log.h"
  66#include "meta_io.h"
  67#include "quota.h"
  68#include "rgrp.h"
  69#include "super.h"
  70#include "trans.h"
  71#include "inode.h"
  72#include "util.h"
  73
  74#define GFS2_QD_HASH_SHIFT      12
  75#define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
  76#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
  77
  78/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  79/*                     -> sd_bitmap_lock                              */
  80static DEFINE_SPINLOCK(qd_lock);
  81struct list_lru gfs2_qd_lru;
  82
  83static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  84
  85static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  86				 const struct kqid qid)
  87{
  88	unsigned int h;
  89
  90	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  91	h = jhash(&qid, sizeof(struct kqid), h);
  92
  93	return h & GFS2_QD_HASH_MASK;
  94}
  95
  96static inline void spin_lock_bucket(unsigned int hash)
  97{
  98        hlist_bl_lock(&qd_hash_table[hash]);
  99}
 100
 101static inline void spin_unlock_bucket(unsigned int hash)
 102{
 103        hlist_bl_unlock(&qd_hash_table[hash]);
 104}
 
 105
 106static void gfs2_qd_dealloc(struct rcu_head *rcu)
 107{
 108	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
 109	kmem_cache_free(gfs2_quotad_cachep, qd);
 110}
 111
 112static void gfs2_qd_dispose(struct list_head *list)
 113{
 114	struct gfs2_quota_data *qd;
 115	struct gfs2_sbd *sdp;
 
 116
 117	while (!list_empty(list)) {
 118		qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
 119		sdp = qd->qd_gl->gl_name.ln_sbd;
 
 
 120
 121		list_del(&qd->qd_lru);
 
 
 
 
 122
 123		/* Free from the filesystem-specific list */
 124		spin_lock(&qd_lock);
 125		list_del(&qd->qd_list);
 126		spin_unlock(&qd_lock);
 127
 128		spin_lock_bucket(qd->qd_hash);
 129		hlist_bl_del_rcu(&qd->qd_hlist);
 130		spin_unlock_bucket(qd->qd_hash);
 131
 132		gfs2_assert_warn(sdp, !qd->qd_change);
 133		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 134		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 135
 136		gfs2_glock_put(qd->qd_gl);
 137		atomic_dec(&sdp->sd_quota_count);
 138
 139		/* Delete it from the common reclaim list */
 140		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 
 
 
 
 
 141	}
 142}
 143
 144
 145static enum lru_status gfs2_qd_isolate(struct list_head *item,
 146		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 147{
 148	struct list_head *dispose = arg;
 149	struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
 150
 151	if (!spin_trylock(&qd->qd_lockref.lock))
 152		return LRU_SKIP;
 153
 154	if (qd->qd_lockref.count == 0) {
 155		lockref_mark_dead(&qd->qd_lockref);
 156		list_lru_isolate_move(lru, &qd->qd_lru, dispose);
 157	}
 158
 159	spin_unlock(&qd->qd_lockref.lock);
 160	return LRU_REMOVED;
 161}
 162
 163static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
 164					 struct shrink_control *sc)
 165{
 166	LIST_HEAD(dispose);
 167	unsigned long freed;
 168
 169	if (!(sc->gfp_mask & __GFP_FS))
 170		return SHRINK_STOP;
 171
 172	freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
 173				     gfs2_qd_isolate, &dispose);
 174
 175	gfs2_qd_dispose(&dispose);
 176
 177	return freed;
 178}
 179
 180static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 181					  struct shrink_control *sc)
 182{
 183	return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
 184}
 185
 186struct shrinker gfs2_qd_shrinker = {
 187	.count_objects = gfs2_qd_shrink_count,
 188	.scan_objects = gfs2_qd_shrink_scan,
 189	.seeks = DEFAULT_SEEKS,
 190	.flags = SHRINKER_NUMA_AWARE,
 191};
 192
 193
 194static u64 qd2index(struct gfs2_quota_data *qd)
 195{
 196	struct kqid qid = qd->qd_id;
 197	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 198		((qid.type == USRQUOTA) ? 0 : 1);
 199}
 200
 201static u64 qd2offset(struct gfs2_quota_data *qd)
 202{
 203	u64 offset;
 204
 205	offset = qd2index(qd);
 206	offset *= sizeof(struct gfs2_quota);
 207
 208	return offset;
 209}
 210
 211static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
 
 212{
 213	struct gfs2_quota_data *qd;
 214	int error;
 215
 216	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 217	if (!qd)
 218		return NULL;
 219
 220	qd->qd_sbd = sdp;
 221	qd->qd_lockref.count = 1;
 222	spin_lock_init(&qd->qd_lockref.lock);
 223	qd->qd_id = qid;
 224	qd->qd_slot = -1;
 225	INIT_LIST_HEAD(&qd->qd_lru);
 226	qd->qd_hash = hash;
 227
 228	error = gfs2_glock_get(sdp, qd2index(qd),
 229			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 230	if (error)
 231		goto fail;
 232
 233	return qd;
 
 
 234
 235fail:
 236	kmem_cache_free(gfs2_quotad_cachep, qd);
 237	return NULL;
 238}
 239
 240static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
 241						     const struct gfs2_sbd *sdp,
 242						     struct kqid qid)
 243{
 244	struct gfs2_quota_data *qd;
 245	struct hlist_bl_node *h;
 246
 247	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
 248		if (!qid_eq(qd->qd_id, qid))
 249			continue;
 250		if (qd->qd_sbd != sdp)
 251			continue;
 252		if (lockref_get_not_dead(&qd->qd_lockref)) {
 253			list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
 254			return qd;
 255		}
 256	}
 257
 258	return NULL;
 259}
 260
 
 261
 262static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 263		  struct gfs2_quota_data **qdp)
 264{
 265	struct gfs2_quota_data *qd, *new_qd;
 266	unsigned int hash = gfs2_qd_hash(sdp, qid);
 
 
 
 
 
 
 
 
 
 
 
 
 267
 268	rcu_read_lock();
 269	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 270	rcu_read_unlock();
 271
 272	if (qd)
 273		return 0;
 
 
 
 
 274
 275	new_qd = qd_alloc(hash, sdp, qid);
 276	if (!new_qd)
 277		return -ENOMEM;
 278
 279	spin_lock(&qd_lock);
 280	spin_lock_bucket(hash);
 281	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 282	if (qd == NULL) {
 283		*qdp = new_qd;
 284		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
 285		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
 286		atomic_inc(&sdp->sd_quota_count);
 287	}
 288	spin_unlock_bucket(hash);
 289	spin_unlock(&qd_lock);
 290
 291	if (qd) {
 292		gfs2_glock_put(new_qd->qd_gl);
 293		kmem_cache_free(gfs2_quotad_cachep, new_qd);
 294	}
 295
 296	return 0;
 297}
 298
 299
 300static void qd_hold(struct gfs2_quota_data *qd)
 301{
 302	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 303	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
 304	lockref_get(&qd->qd_lockref);
 305}
 306
 307static void qd_put(struct gfs2_quota_data *qd)
 308{
 309	if (lockref_put_or_lock(&qd->qd_lockref))
 310		return;
 311
 312	qd->qd_lockref.count = 0;
 313	list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
 314	spin_unlock(&qd->qd_lockref.lock);
 315
 316}
 317
 318static int slot_get(struct gfs2_quota_data *qd)
 319{
 320	struct gfs2_sbd *sdp = qd->qd_sbd;
 321	unsigned int bit;
 322	int error = 0;
 323
 324	spin_lock(&sdp->sd_bitmap_lock);
 325	if (qd->qd_slot_count != 0)
 326		goto out;
 327
 328	error = -ENOSPC;
 329	bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
 330	if (bit < sdp->sd_quota_slots) {
 331		set_bit(bit, sdp->sd_quota_bitmap);
 332		qd->qd_slot = bit;
 333		error = 0;
 334out:
 335		qd->qd_slot_count++;
 336	}
 337	spin_unlock(&sdp->sd_bitmap_lock);
 338
 339	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 340}
 341
 342static void slot_hold(struct gfs2_quota_data *qd)
 343{
 344	struct gfs2_sbd *sdp = qd->qd_sbd;
 345
 346	spin_lock(&sdp->sd_bitmap_lock);
 347	gfs2_assert(sdp, qd->qd_slot_count);
 348	qd->qd_slot_count++;
 349	spin_unlock(&sdp->sd_bitmap_lock);
 350}
 351
 352static void slot_put(struct gfs2_quota_data *qd)
 353{
 354	struct gfs2_sbd *sdp = qd->qd_sbd;
 355
 356	spin_lock(&sdp->sd_bitmap_lock);
 357	gfs2_assert(sdp, qd->qd_slot_count);
 358	if (!--qd->qd_slot_count) {
 359		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
 360		qd->qd_slot = -1;
 361	}
 362	spin_unlock(&sdp->sd_bitmap_lock);
 363}
 364
 365static int bh_get(struct gfs2_quota_data *qd)
 366{
 367	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 368	struct inode *inode = sdp->sd_qc_inode;
 369	struct gfs2_inode *ip = GFS2_I(inode);
 370	unsigned int block, offset;
 371	struct buffer_head *bh;
 372	struct iomap iomap = { };
 373	int error;
 
 374
 375	mutex_lock(&sdp->sd_quota_mutex);
 376
 377	if (qd->qd_bh_count++) {
 378		mutex_unlock(&sdp->sd_quota_mutex);
 379		return 0;
 380	}
 381
 382	block = qd->qd_slot / sdp->sd_qc_per_block;
 383	offset = qd->qd_slot % sdp->sd_qc_per_block;
 384
 385	error = gfs2_iomap_get(inode,
 386			       (loff_t)block << inode->i_blkbits,
 387			       i_blocksize(inode), &iomap);
 388	if (error)
 389		goto fail;
 390	error = -ENOENT;
 391	if (iomap.type != IOMAP_MAPPED)
 392		goto fail;
 393
 394	error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
 395			       DIO_WAIT, 0, &bh);
 396	if (error)
 397		goto fail;
 398	error = -EIO;
 399	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 400		goto fail_brelse;
 401
 402	qd->qd_bh = bh;
 403	qd->qd_bh_qc = (struct gfs2_quota_change *)
 404		(bh->b_data + sizeof(struct gfs2_meta_header) +
 405		 offset * sizeof(struct gfs2_quota_change));
 406
 407	mutex_unlock(&sdp->sd_quota_mutex);
 408
 409	return 0;
 410
 411fail_brelse:
 412	brelse(bh);
 413fail:
 414	qd->qd_bh_count--;
 415	mutex_unlock(&sdp->sd_quota_mutex);
 416	return error;
 417}
 418
 419static void bh_put(struct gfs2_quota_data *qd)
 420{
 421	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 422
 423	mutex_lock(&sdp->sd_quota_mutex);
 424	gfs2_assert(sdp, qd->qd_bh_count);
 425	if (!--qd->qd_bh_count) {
 426		brelse(qd->qd_bh);
 427		qd->qd_bh = NULL;
 428		qd->qd_bh_qc = NULL;
 429	}
 430	mutex_unlock(&sdp->sd_quota_mutex);
 431}
 432
 433static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 434			 u64 *sync_gen)
 435{
 436	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 437	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 438	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
 439		return 0;
 440
 441	if (!lockref_get_not_dead(&qd->qd_lockref))
 442		return 0;
 443
 444	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 445	set_bit(QDF_LOCKED, &qd->qd_flags);
 446	qd->qd_change_sync = qd->qd_change;
 447	slot_hold(qd);
 448	return 1;
 449}
 450
 451static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 452{
 453	struct gfs2_quota_data *qd = NULL, *iter;
 454	int error;
 
 455
 456	*qdp = NULL;
 457
 458	if (sb_rdonly(sdp->sd_vfs))
 459		return 0;
 460
 461	spin_lock(&qd_lock);
 462
 463	list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
 464		if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
 465			qd = iter;
 466			break;
 467		}
 
 
 
 
 
 
 
 
 
 
 
 
 468	}
 469
 470	spin_unlock(&qd_lock);
 
 
 
 471
 472	if (qd) {
 473		gfs2_assert_warn(sdp, qd->qd_change_sync);
 474		error = bh_get(qd);
 475		if (error) {
 476			clear_bit(QDF_LOCKED, &qd->qd_flags);
 477			slot_put(qd);
 478			qd_put(qd);
 479			return error;
 480		}
 481	}
 482
 483	*qdp = qd;
 484
 485	return 0;
 486}
 487
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 488static void qd_unlock(struct gfs2_quota_data *qd)
 489{
 490	gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
 491			 test_bit(QDF_LOCKED, &qd->qd_flags));
 492	clear_bit(QDF_LOCKED, &qd->qd_flags);
 493	bh_put(qd);
 494	slot_put(qd);
 495	qd_put(qd);
 496}
 497
 498static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 499		    struct gfs2_quota_data **qdp)
 500{
 501	int error;
 502
 503	error = qd_get(sdp, qid, qdp);
 504	if (error)
 505		return error;
 506
 507	error = slot_get(*qdp);
 508	if (error)
 509		goto fail;
 510
 511	error = bh_get(*qdp);
 512	if (error)
 513		goto fail_slot;
 514
 515	return 0;
 516
 517fail_slot:
 518	slot_put(*qdp);
 519fail:
 520	qd_put(*qdp);
 521	return error;
 522}
 523
 524static void qdsb_put(struct gfs2_quota_data *qd)
 525{
 526	bh_put(qd);
 527	slot_put(qd);
 528	qd_put(qd);
 529}
 530
 531/**
 532 * gfs2_qa_get - make sure we have a quota allocations data structure,
 533 *               if necessary
 534 * @ip: the inode for this reservation
 535 */
 536int gfs2_qa_get(struct gfs2_inode *ip)
 537{
 538	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 539	struct inode *inode = &ip->i_inode;
 540
 541	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 542		return 0;
 543
 544	spin_lock(&inode->i_lock);
 545	if (ip->i_qadata == NULL) {
 546		struct gfs2_qadata *tmp;
 547
 548		spin_unlock(&inode->i_lock);
 549		tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
 550		if (!tmp)
 551			return -ENOMEM;
 552
 553		spin_lock(&inode->i_lock);
 554		if (ip->i_qadata == NULL)
 555			ip->i_qadata = tmp;
 556		else
 557			kmem_cache_free(gfs2_qadata_cachep, tmp);
 558	}
 559	ip->i_qadata->qa_ref++;
 560	spin_unlock(&inode->i_lock);
 561	return 0;
 562}
 563
 564void gfs2_qa_put(struct gfs2_inode *ip)
 565{
 566	struct inode *inode = &ip->i_inode;
 567
 568	spin_lock(&inode->i_lock);
 569	if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
 570		kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
 571		ip->i_qadata = NULL;
 572	}
 573	spin_unlock(&inode->i_lock);
 574}
 575
 576int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 577{
 578	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 579	struct gfs2_quota_data **qd;
 
 580	int error;
 581
 
 
 
 
 582	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 583		return 0;
 584
 585	error = gfs2_qa_get(ip);
 586	if (error)
 587		return error;
 588
 589	qd = ip->i_qadata->qa_qd;
 590
 591	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
 592	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
 593		error = -EIO;
 594		goto out;
 595	}
 596
 597	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 598	if (error)
 599		goto out_unhold;
 600	ip->i_qadata->qa_qd_num++;
 601	qd++;
 602
 603	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 604	if (error)
 605		goto out_unhold;
 606	ip->i_qadata->qa_qd_num++;
 607	qd++;
 608
 609	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 610	    !uid_eq(uid, ip->i_inode.i_uid)) {
 611		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 612		if (error)
 613			goto out_unhold;
 614		ip->i_qadata->qa_qd_num++;
 615		qd++;
 616	}
 617
 618	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 619	    !gid_eq(gid, ip->i_inode.i_gid)) {
 620		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 621		if (error)
 622			goto out_unhold;
 623		ip->i_qadata->qa_qd_num++;
 624		qd++;
 625	}
 626
 627out_unhold:
 628	if (error)
 629		gfs2_quota_unhold(ip);
 630out:
 631	return error;
 632}
 633
 634void gfs2_quota_unhold(struct gfs2_inode *ip)
 635{
 636	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 637	u32 x;
 638
 639	if (ip->i_qadata == NULL)
 640		return;
 641
 642	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 643
 644	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
 645		qdsb_put(ip->i_qadata->qa_qd[x]);
 646		ip->i_qadata->qa_qd[x] = NULL;
 647	}
 648	ip->i_qadata->qa_qd_num = 0;
 649	gfs2_qa_put(ip);
 650}
 651
 652static int sort_qd(const void *a, const void *b)
 653{
 654	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 655	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 656
 657	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 
 
 
 
 
 
 
 658		return -1;
 659	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 660		return 1;
 
 661	return 0;
 662}
 663
 664static void do_qc(struct gfs2_quota_data *qd, s64 change)
 665{
 666	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 667	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 668	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 669	s64 x;
 670
 671	mutex_lock(&sdp->sd_quota_mutex);
 672	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 673
 674	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 675		qc->qc_change = 0;
 676		qc->qc_flags = 0;
 677		if (qd->qd_id.type == USRQUOTA)
 678			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 679		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 680	}
 681
 682	x = be64_to_cpu(qc->qc_change) + change;
 683	qc->qc_change = cpu_to_be64(x);
 684
 685	spin_lock(&qd_lock);
 686	qd->qd_change = x;
 687	spin_unlock(&qd_lock);
 688
 689	if (!x) {
 690		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 691		clear_bit(QDF_CHANGE, &qd->qd_flags);
 692		qc->qc_flags = 0;
 693		qc->qc_id = 0;
 694		slot_put(qd);
 695		qd_put(qd);
 696	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 697		qd_hold(qd);
 698		slot_hold(qd);
 699	}
 700
 701	if (change < 0) /* Reset quiet flag if we freed some blocks */
 702		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
 703	mutex_unlock(&sdp->sd_quota_mutex);
 704}
 705
 706static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
 707				  unsigned off, void *buf, unsigned bytes)
 708{
 709	struct inode *inode = &ip->i_inode;
 710	struct gfs2_sbd *sdp = GFS2_SB(inode);
 711	struct address_space *mapping = inode->i_mapping;
 712	struct page *page;
 713	struct buffer_head *bh;
 714	void *kaddr;
 715	u64 blk;
 716	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
 717	unsigned to_write = bytes, pg_off = off;
 718	int done = 0;
 719
 720	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
 721	boff = off % bsize;
 722
 723	page = find_or_create_page(mapping, index, GFP_NOFS);
 724	if (!page)
 725		return -ENOMEM;
 726	if (!page_has_buffers(page))
 727		create_empty_buffers(page, bsize, 0);
 728
 729	bh = page_buffers(page);
 730	while (!done) {
 731		/* Find the beginning block within the page */
 732		if (pg_off >= ((bnum * bsize) + bsize)) {
 733			bh = bh->b_this_page;
 734			bnum++;
 735			blk++;
 736			continue;
 737		}
 738		if (!buffer_mapped(bh)) {
 739			gfs2_block_map(inode, blk, bh, 1);
 740			if (!buffer_mapped(bh))
 741				goto unlock_out;
 742			/* If it's a newly allocated disk block, zero it */
 743			if (buffer_new(bh))
 744				zero_user(page, bnum * bsize, bh->b_size);
 745		}
 746		if (PageUptodate(page))
 747			set_buffer_uptodate(bh);
 748		if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
 749			goto unlock_out;
 750		if (gfs2_is_jdata(ip))
 751			gfs2_trans_add_data(ip->i_gl, bh);
 752		else
 753			gfs2_ordered_add_inode(ip);
 754
 755		/* If we need to write to the next block as well */
 756		if (to_write > (bsize - boff)) {
 757			pg_off += (bsize - boff);
 758			to_write -= (bsize - boff);
 759			boff = pg_off % bsize;
 760			continue;
 761		}
 762		done = 1;
 763	}
 764
 765	/* Write to the page, now that we have setup the buffer(s) */
 766	kaddr = kmap_atomic(page);
 767	memcpy(kaddr + off, buf, bytes);
 768	flush_dcache_page(page);
 769	kunmap_atomic(kaddr);
 770	unlock_page(page);
 771	put_page(page);
 772
 773	return 0;
 774
 775unlock_out:
 776	unlock_page(page);
 777	put_page(page);
 778	return -EIO;
 779}
 780
 781static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
 782				 loff_t loc)
 783{
 784	unsigned long pg_beg;
 785	unsigned pg_off, nbytes, overflow = 0;
 786	int pg_oflow = 0, error;
 787	void *ptr;
 788
 789	nbytes = sizeof(struct gfs2_quota);
 790
 791	pg_beg = loc >> PAGE_SHIFT;
 792	pg_off = offset_in_page(loc);
 793
 794	/* If the quota straddles a page boundary, split the write in two */
 795	if ((pg_off + nbytes) > PAGE_SIZE) {
 796		pg_oflow = 1;
 797		overflow = (pg_off + nbytes) - PAGE_SIZE;
 798	}
 799
 800	ptr = qp;
 801	error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
 802				       nbytes - overflow);
 803	/* If there's an overflow, write the remaining bytes to the next page */
 804	if (!error && pg_oflow)
 805		error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
 806					       ptr + nbytes - overflow,
 807					       overflow);
 808	return error;
 809}
 810
 811/**
 812 * gfs2_adjust_quota - adjust record of current block usage
 813 * @ip: The quota inode
 814 * @loc: Offset of the entry in the quota file
 815 * @change: The amount of usage change to record
 816 * @qd: The quota data
 817 * @fdq: The updated limits to record
 818 *
 819 * This function was mostly borrowed from gfs2_block_truncate_page which was
 820 * in turn mostly borrowed from ext3
 821 *
 822 * Returns: 0 or -ve on error
 823 */
 824
 825static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 826			     s64 change, struct gfs2_quota_data *qd,
 827			     struct qc_dqblk *fdq)
 828{
 829	struct inode *inode = &ip->i_inode;
 830	struct gfs2_sbd *sdp = GFS2_SB(inode);
 831	struct gfs2_quota q;
 832	int err;
 
 
 
 
 
 
 
 833	u64 size;
 834
 835	if (gfs2_is_stuffed(ip)) {
 836		err = gfs2_unstuff_dinode(ip);
 837		if (err)
 838			return err;
 839	}
 840
 841	memset(&q, 0, sizeof(struct gfs2_quota));
 842	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 843	if (err < 0)
 844		return err;
 845
 846	loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
 847	err = -EIO;
 848	be64_add_cpu(&q.qu_value, change);
 849	if (((s64)be64_to_cpu(q.qu_value)) < 0)
 850		q.qu_value = 0; /* Never go negative on quota usage */
 851	qd->qd_qb.qb_value = q.qu_value;
 
 852	if (fdq) {
 853		if (fdq->d_fieldmask & QC_SPC_SOFT) {
 854			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
 855			qd->qd_qb.qb_warn = q.qu_warn;
 856		}
 857		if (fdq->d_fieldmask & QC_SPC_HARD) {
 858			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
 859			qd->qd_qb.qb_limit = q.qu_limit;
 860		}
 861		if (fdq->d_fieldmask & QC_SPACE) {
 862			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
 863			qd->qd_qb.qb_value = q.qu_value;
 864		}
 865	}
 866
 867	err = gfs2_write_disk_quota(ip, &q, loc);
 868	if (!err) {
 869		size = loc + sizeof(struct gfs2_quota);
 870		if (size > inode->i_size)
 871			i_size_write(inode, size);
 872		inode->i_mtime = inode->i_atime = current_time(inode);
 873		mark_inode_dirty(inode);
 874		set_bit(QDF_REFRESH, &qd->qd_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 875	}
 876
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 877	return err;
 878}
 879
 880static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 881{
 882	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
 883	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 884	struct gfs2_alloc_parms ap = { .aflags = 0, };
 885	unsigned int data_blocks, ind_blocks;
 886	struct gfs2_holder *ghs, i_gh;
 887	unsigned int qx, x;
 888	struct gfs2_quota_data *qd;
 889	unsigned reserved;
 890	loff_t offset;
 891	unsigned int nalloc = 0, blocks;
 892	int error;
 893
 894	error = gfs2_qa_get(ip);
 895	if (error)
 896		return error;
 897
 898	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 899			      &data_blocks, &ind_blocks);
 900
 901	ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 902	if (!ghs) {
 903		error = -ENOMEM;
 904		goto out;
 905	}
 906
 907	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 908	inode_lock(&ip->i_inode);
 909	for (qx = 0; qx < num_qd; qx++) {
 910		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 911					   GL_NOCACHE, &ghs[qx]);
 912		if (error)
 913			goto out_dq;
 914	}
 915
 916	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 917	if (error)
 918		goto out_dq;
 919
 920	for (x = 0; x < num_qd; x++) {
 921		offset = qd2offset(qda[x]);
 922		if (gfs2_write_alloc_required(ip, offset,
 923					      sizeof(struct gfs2_quota)))
 924			nalloc++;
 925	}
 926
 927	/* 
 928	 * 1 blk for unstuffing inode if stuffed. We add this extra
 929	 * block to the reservation unconditionally. If the inode
 930	 * doesn't need unstuffing, the block will be released to the 
 931	 * rgrp since it won't be allocated during the transaction
 932	 */
 933	/* +3 in the end for unstuffing block, inode size update block
 934	 * and another block in case quota straddles page boundary and 
 935	 * two blocks need to be updated instead of 1 */
 936	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 937
 938	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 939	ap.target = reserved;
 940	error = gfs2_inplace_reserve(ip, &ap);
 941	if (error)
 942		goto out_alloc;
 943
 944	if (nalloc)
 945		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 946
 947	error = gfs2_trans_begin(sdp, blocks, 0);
 948	if (error)
 949		goto out_ipres;
 950
 951	for (x = 0; x < num_qd; x++) {
 952		qd = qda[x];
 953		offset = qd2offset(qd);
 954		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 955		if (error)
 956			goto out_end_trans;
 957
 958		do_qc(qd, -qd->qd_change_sync);
 959		set_bit(QDF_REFRESH, &qd->qd_flags);
 960	}
 961
 962	error = 0;
 963
 964out_end_trans:
 965	gfs2_trans_end(sdp);
 966out_ipres:
 967	gfs2_inplace_release(ip);
 968out_alloc:
 969	gfs2_glock_dq_uninit(&i_gh);
 970out_dq:
 971	while (qx--)
 972		gfs2_glock_dq_uninit(&ghs[qx]);
 973	inode_unlock(&ip->i_inode);
 974	kfree(ghs);
 975	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
 976		       GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
 977out:
 978	gfs2_qa_put(ip);
 979	return error;
 980}
 981
 982static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 983{
 984	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 985	struct gfs2_quota q;
 986	struct gfs2_quota_lvb *qlvb;
 987	loff_t pos;
 988	int error;
 989
 990	memset(&q, 0, sizeof(struct gfs2_quota));
 991	pos = qd2offset(qd);
 992	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 993	if (error < 0)
 994		return error;
 995
 996	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 997	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 998	qlvb->__pad = 0;
 999	qlvb->qb_limit = q.qu_limit;
1000	qlvb->qb_warn = q.qu_warn;
1001	qlvb->qb_value = q.qu_value;
1002	qd->qd_qb = *qlvb;
1003
1004	return 0;
1005}
1006
1007static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1008		    struct gfs2_holder *q_gh)
1009{
1010	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1011	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1012	struct gfs2_holder i_gh;
1013	int error;
1014
1015restart:
1016	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1017	if (error)
1018		return error;
1019
1020	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1021		force_refresh = FORCE;
1022
1023	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1024
1025	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1026		gfs2_glock_dq_uninit(q_gh);
1027		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1028					   GL_NOCACHE, q_gh);
1029		if (error)
1030			return error;
1031
1032		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1033		if (error)
1034			goto fail;
1035
1036		error = update_qd(sdp, qd);
1037		if (error)
1038			goto fail_gunlock;
1039
1040		gfs2_glock_dq_uninit(&i_gh);
1041		gfs2_glock_dq_uninit(q_gh);
1042		force_refresh = 0;
1043		goto restart;
1044	}
1045
1046	return 0;
1047
1048fail_gunlock:
1049	gfs2_glock_dq_uninit(&i_gh);
1050fail:
1051	gfs2_glock_dq_uninit(q_gh);
1052	return error;
1053}
1054
1055int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1056{
1057	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
1058	struct gfs2_quota_data *qd;
1059	u32 x;
1060	int error = 0;
1061
1062	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1063		return 0;
1064
1065	error = gfs2_quota_hold(ip, uid, gid);
1066	if (error)
1067		return error;
1068
1069	sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1070	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 
 
 
 
1071
1072	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1073		qd = ip->i_qadata->qa_qd[x];
1074		error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
 
 
 
1075		if (error)
1076			break;
1077	}
1078
1079	if (!error)
1080		set_bit(GIF_QD_LOCKED, &ip->i_flags);
1081	else {
1082		while (x--)
1083			gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1084		gfs2_quota_unhold(ip);
1085	}
1086
1087	return error;
1088}
1089
1090static int need_sync(struct gfs2_quota_data *qd)
1091{
1092	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1093	struct gfs2_tune *gt = &sdp->sd_tune;
1094	s64 value;
1095	unsigned int num, den;
1096	int do_sync = 1;
1097
1098	if (!qd->qd_qb.qb_limit)
1099		return 0;
1100
1101	spin_lock(&qd_lock);
1102	value = qd->qd_change;
1103	spin_unlock(&qd_lock);
1104
1105	spin_lock(&gt->gt_spin);
1106	num = gt->gt_quota_scale_num;
1107	den = gt->gt_quota_scale_den;
1108	spin_unlock(&gt->gt_spin);
1109
1110	if (value < 0)
1111		do_sync = 0;
1112	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1113		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1114		do_sync = 0;
1115	else {
1116		value *= gfs2_jindex_size(sdp) * num;
1117		value = div_s64(value, den);
1118		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1119		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1120			do_sync = 0;
1121	}
1122
1123	return do_sync;
1124}
1125
1126void gfs2_quota_unlock(struct gfs2_inode *ip)
1127{
1128	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1129	struct gfs2_quota_data *qda[4];
1130	unsigned int count = 0;
1131	u32 x;
1132	int found;
1133
1134	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1135		return;
1136
1137	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1138		struct gfs2_quota_data *qd;
1139		int sync;
1140
1141		qd = ip->i_qadata->qa_qd[x];
1142		sync = need_sync(qd);
1143
1144		gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1145		if (!sync)
1146			continue;
1147
1148		spin_lock(&qd_lock);
1149		found = qd_check_sync(sdp, qd, NULL);
1150		spin_unlock(&qd_lock);
1151
1152		if (!found)
1153			continue;
1154
1155		gfs2_assert_warn(sdp, qd->qd_change_sync);
1156		if (bh_get(qd)) {
1157			clear_bit(QDF_LOCKED, &qd->qd_flags);
1158			slot_put(qd);
1159			qd_put(qd);
1160			continue;
1161		}
1162
1163		qda[count++] = qd;
 
1164	}
1165
1166	if (count) {
1167		do_sync(count, qda);
1168		for (x = 0; x < count; x++)
1169			qd_unlock(qda[x]);
1170	}
1171
 
1172	gfs2_quota_unhold(ip);
1173}
1174
1175#define MAX_LINE 256
1176
1177static int print_message(struct gfs2_quota_data *qd, char *type)
1178{
1179	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1180
1181	fs_info(sdp, "quota %s for %s %u\n",
1182		type,
1183		(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1184		from_kqid(&init_user_ns, qd->qd_id));
1185
1186	return 0;
1187}
1188
1189/**
1190 * gfs2_quota_check - check if allocating new blocks will exceed quota
1191 * @ip:  The inode for which this check is being performed
1192 * @uid: The uid to check against
1193 * @gid: The gid to check against
1194 * @ap:  The allocation parameters. ap->target contains the requested
1195 *       blocks. ap->min_target, if set, contains the minimum blks
1196 *       requested.
1197 *
1198 * Returns: 0 on success.
1199 *                  min_req = ap->min_target ? ap->min_target : ap->target;
1200 *                  quota must allow at least min_req blks for success and
1201 *                  ap->allowed is set to the number of blocks allowed
1202 *
1203 *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1204 *                  of blocks available.
1205 */
1206int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1207		     struct gfs2_alloc_parms *ap)
1208{
1209	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
1210	struct gfs2_quota_data *qd;
1211	s64 value, warn, limit;
1212	u32 x;
1213	int error = 0;
1214
1215	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1216	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1217		return 0;
1218
1219	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1220		qd = ip->i_qadata->qa_qd[x];
 
 
 
1221
1222		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1223		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1224			continue;
1225
1226		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1227		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1228		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1229		spin_lock(&qd_lock);
1230		value += qd->qd_change;
1231		spin_unlock(&qd_lock);
1232
1233		if (limit > 0 && (limit - value) < ap->allowed)
1234			ap->allowed = limit - value;
1235		/* If we can't meet the target */
1236		if (limit && limit < (value + (s64)ap->target)) {
1237			/* If no min_target specified or we don't meet
1238			 * min_target, return -EDQUOT */
1239			if (!ap->min_target || ap->min_target > ap->allowed) {
1240				if (!test_and_set_bit(QDF_QMSG_QUIET,
1241						      &qd->qd_flags)) {
1242					print_message(qd, "exceeded");
1243					quota_send_warning(qd->qd_id,
1244							   sdp->sd_vfs->s_dev,
1245							   QUOTA_NL_BHARDWARN);
1246				}
1247				error = -EDQUOT;
1248				break;
1249			}
1250		} else if (warn && warn < value &&
1251			   time_after_eq(jiffies, qd->qd_last_warn +
1252					 gfs2_tune_get(sdp, gt_quota_warn_period)
1253					 * HZ)) {
1254			quota_send_warning(qd->qd_id,
 
1255					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1256			error = print_message(qd, "warning");
1257			qd->qd_last_warn = jiffies;
1258		}
1259	}
 
1260	return error;
1261}
1262
1263void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1264		       kuid_t uid, kgid_t gid)
1265{
 
1266	struct gfs2_quota_data *qd;
1267	u32 x;
1268	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1269
1270	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
1271	    gfs2_assert_warn(sdp, change))
1272		return;
1273	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1274		return;
1275
1276	if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1277				 ip->i_qadata->qa_ref > 0))
1278		return;
1279	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1280		qd = ip->i_qadata->qa_qd[x];
1281
1282		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1283		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1284			do_qc(qd, change);
1285		}
1286	}
1287}
1288
1289int gfs2_quota_sync(struct super_block *sb, int type)
1290{
1291	struct gfs2_sbd *sdp = sb->s_fs_info;
1292	struct gfs2_quota_data **qda;
1293	unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
1294	unsigned int num_qd;
1295	unsigned int x;
1296	int error = 0;
1297
 
 
1298	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1299	if (!qda)
1300		return -ENOMEM;
1301
1302	mutex_lock(&sdp->sd_quota_sync_mutex);
1303	sdp->sd_quota_sync_gen++;
1304
1305	do {
1306		num_qd = 0;
1307
1308		for (;;) {
1309			error = qd_fish(sdp, qda + num_qd);
1310			if (error || !qda[num_qd])
1311				break;
1312			if (++num_qd == max_qd)
1313				break;
1314		}
1315
1316		if (num_qd) {
1317			if (!error)
1318				error = do_sync(num_qd, qda);
1319			if (!error)
1320				for (x = 0; x < num_qd; x++)
1321					qda[x]->qd_sync_gen =
1322						sdp->sd_quota_sync_gen;
1323
1324			for (x = 0; x < num_qd; x++)
1325				qd_unlock(qda[x]);
1326		}
1327	} while (!error && num_qd == max_qd);
1328
1329	mutex_unlock(&sdp->sd_quota_sync_mutex);
1330	kfree(qda);
1331
1332	return error;
1333}
1334
1335int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
 
 
 
 
 
1336{
1337	struct gfs2_quota_data *qd;
1338	struct gfs2_holder q_gh;
1339	int error;
1340
1341	error = qd_get(sdp, qid, &qd);
1342	if (error)
1343		return error;
1344
1345	error = do_glock(qd, FORCE, &q_gh);
1346	if (!error)
1347		gfs2_glock_dq_uninit(&q_gh);
1348
1349	qd_put(qd);
1350	return error;
1351}
1352
 
 
 
 
 
 
 
 
 
1353int gfs2_quota_init(struct gfs2_sbd *sdp)
1354{
1355	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1356	u64 size = i_size_read(sdp->sd_qc_inode);
1357	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1358	unsigned int x, slot = 0;
1359	unsigned int found = 0;
1360	unsigned int hash;
1361	unsigned int bm_size;
1362	u64 dblock;
1363	u32 extlen = 0;
1364	int error;
1365
1366	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1367		return -EIO;
1368
1369	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1370	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1371	bm_size *= sizeof(unsigned long);
1372	error = -ENOMEM;
1373	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1374	if (sdp->sd_quota_bitmap == NULL)
1375		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1376						 __GFP_ZERO);
1377	if (!sdp->sd_quota_bitmap)
1378		return error;
1379
 
 
 
 
 
 
1380	for (x = 0; x < blocks; x++) {
1381		struct buffer_head *bh;
1382		const struct gfs2_quota_change *qc;
1383		unsigned int y;
1384
1385		if (!extlen) {
1386			extlen = 32;
1387			error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
1388			if (error)
1389				goto fail;
1390		}
1391		error = -EIO;
1392		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1393		if (!bh)
1394			goto fail;
1395		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1396			brelse(bh);
1397			goto fail;
1398		}
1399
1400		qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1401		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1402		     y++, slot++) {
 
1403			struct gfs2_quota_data *qd;
1404			s64 qc_change = be64_to_cpu(qc->qc_change);
1405			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1406			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1407						USRQUOTA : GRPQUOTA;
1408			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1409						      be32_to_cpu(qc->qc_id));
1410			qc++;
1411			if (!qc_change)
1412				continue;
1413
1414			hash = gfs2_qd_hash(sdp, qc_id);
1415			qd = qd_alloc(hash, sdp, qc_id);
1416			if (qd == NULL) {
1417				brelse(bh);
1418				goto fail;
1419			}
1420
1421			set_bit(QDF_CHANGE, &qd->qd_flags);
1422			qd->qd_change = qc_change;
1423			qd->qd_slot = slot;
1424			qd->qd_slot_count = 1;
1425
1426			spin_lock(&qd_lock);
1427			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1428			list_add(&qd->qd_list, &sdp->sd_quota_list);
1429			atomic_inc(&sdp->sd_quota_count);
1430			spin_unlock(&qd_lock);
1431
1432			spin_lock_bucket(hash);
1433			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1434			spin_unlock_bucket(hash);
1435
1436			found++;
1437		}
1438
1439		brelse(bh);
1440		dblock++;
1441		extlen--;
1442	}
1443
1444	if (found)
1445		fs_info(sdp, "found %u quota changes\n", found);
1446
1447	return 0;
1448
1449fail:
1450	gfs2_quota_cleanup(sdp);
1451	return error;
1452}
1453
1454void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1455{
1456	struct list_head *head = &sdp->sd_quota_list;
1457	struct gfs2_quota_data *qd;
 
1458
1459	spin_lock(&qd_lock);
1460	while (!list_empty(head)) {
1461		qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
1462
1463		list_del(&qd->qd_list);
 
 
 
 
 
 
 
 
1464
 
1465		/* Also remove if this qd exists in the reclaim list */
1466		list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
 
 
 
1467		atomic_dec(&sdp->sd_quota_count);
1468		spin_unlock(&qd_lock);
1469
1470		spin_lock_bucket(qd->qd_hash);
1471		hlist_bl_del_rcu(&qd->qd_hlist);
1472		spin_unlock_bucket(qd->qd_hash);
1473
1474		gfs2_assert_warn(sdp, !qd->qd_change);
1475		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 
 
 
1476		gfs2_assert_warn(sdp, !qd->qd_bh_count);
1477
1478		gfs2_glock_put(qd->qd_gl);
1479		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
1480
1481		spin_lock(&qd_lock);
1482	}
1483	spin_unlock(&qd_lock);
1484
1485	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1486
1487	kvfree(sdp->sd_quota_bitmap);
1488	sdp->sd_quota_bitmap = NULL;
 
 
 
1489}
1490
1491static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1492{
1493	if (error == 0 || error == -EROFS)
1494		return;
1495	if (!gfs2_withdrawn(sdp)) {
1496		if (!cmpxchg(&sdp->sd_log_error, 0, error))
1497			fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1498		wake_up(&sdp->sd_logd_waitq);
1499	}
1500}
1501
1502static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1503			       int (*fxn)(struct super_block *sb, int type),
1504			       unsigned long t, unsigned long *timeo,
1505			       unsigned int *new_timeo)
1506{
1507	if (t >= *timeo) {
1508		int error = fxn(sdp->sd_vfs, 0);
1509		quotad_error(sdp, msg, error);
1510		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1511	} else {
1512		*timeo -= t;
1513	}
1514}
1515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1516void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1517	if (!sdp->sd_statfs_force_sync) {
1518		sdp->sd_statfs_force_sync = 1;
1519		wake_up(&sdp->sd_quota_wait);
1520	}
1521}
1522
1523
1524/**
1525 * gfs2_quotad - Write cached quota changes into the quota file
1526 * @data: Pointer to GFS2 superblock
1527 *
1528 */
1529
1530int gfs2_quotad(void *data)
1531{
1532	struct gfs2_sbd *sdp = data;
1533	struct gfs2_tune *tune = &sdp->sd_tune;
1534	unsigned long statfs_timeo = 0;
1535	unsigned long quotad_timeo = 0;
1536	unsigned long t = 0;
1537	DEFINE_WAIT(wait);
 
1538
1539	while (!kthread_should_stop()) {
1540
1541		if (gfs2_withdrawn(sdp))
1542			goto bypass;
1543		/* Update the master statfs file */
1544		if (sdp->sd_statfs_force_sync) {
1545			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1546			quotad_error(sdp, "statfs", error);
1547			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1548		}
1549		else
1550			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1551				   	   &statfs_timeo,
1552					   &tune->gt_statfs_quantum);
1553
1554		/* Update quota file */
1555		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1556				   &quotad_timeo, &tune->gt_quota_quantum);
1557
 
 
 
1558		try_to_freeze();
1559
1560bypass:
1561		t = min(quotad_timeo, statfs_timeo);
1562
1563		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1564		if (!sdp->sd_statfs_force_sync)
 
 
 
1565			t -= schedule_timeout(t);
1566		else
1567			t = 0;
1568		finish_wait(&sdp->sd_quota_wait, &wait);
1569	}
1570
1571	return 0;
1572}
1573
1574static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
 
1575{
1576	struct gfs2_sbd *sdp = sb->s_fs_info;
1577
1578	memset(state, 0, sizeof(*state));
 
1579
1580	switch (sdp->sd_args.ar_quota) {
1581	case GFS2_QUOTA_ON:
1582		state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1583		state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1584		fallthrough;
1585	case GFS2_QUOTA_ACCOUNT:
1586		state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1587						  QCI_SYSFILE;
1588		state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1589						  QCI_SYSFILE;
1590		break;
1591	case GFS2_QUOTA_OFF:
1592		break;
1593	}
 
1594	if (sdp->sd_quota_inode) {
1595		state->s_state[USRQUOTA].ino =
1596					GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1597		state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1598	}
1599	state->s_state[USRQUOTA].nextents = 1;	/* unsupported */
1600	state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1601	state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1602	return 0;
1603}
1604
1605static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1606			  struct qc_dqblk *fdq)
1607{
1608	struct gfs2_sbd *sdp = sb->s_fs_info;
1609	struct gfs2_quota_lvb *qlvb;
1610	struct gfs2_quota_data *qd;
1611	struct gfs2_holder q_gh;
1612	int error;
1613
1614	memset(fdq, 0, sizeof(*fdq));
1615
1616	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1617		return -ESRCH; /* Crazy XFS error code */
1618
1619	if ((qid.type != USRQUOTA) &&
1620	    (qid.type != GRPQUOTA))
 
 
 
1621		return -EINVAL;
1622
1623	error = qd_get(sdp, qid, &qd);
1624	if (error)
1625		return error;
1626	error = do_glock(qd, FORCE, &q_gh);
1627	if (error)
1628		goto out;
1629
1630	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1631	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1632	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1633	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
 
 
 
1634
1635	gfs2_glock_dq_uninit(&q_gh);
1636out:
1637	qd_put(qd);
1638	return error;
1639}
1640
1641/* GFS2 only supports a subset of the XFS fields */
1642#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1643
1644static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1645			  struct qc_dqblk *fdq)
1646{
1647	struct gfs2_sbd *sdp = sb->s_fs_info;
1648	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1649	struct gfs2_quota_data *qd;
1650	struct gfs2_holder q_gh, i_gh;
1651	unsigned int data_blocks, ind_blocks;
1652	unsigned int blocks = 0;
1653	int alloc_required;
1654	loff_t offset;
1655	int error;
1656
1657	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1658		return -ESRCH; /* Crazy XFS error code */
1659
1660	if ((qid.type != USRQUOTA) &&
1661	    (qid.type != GRPQUOTA))
 
 
 
 
 
 
 
 
 
 
1662		return -EINVAL;
 
1663
1664	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1665		return -EINVAL;
 
 
1666
1667	error = qd_get(sdp, qid, &qd);
1668	if (error)
1669		return error;
1670
1671	error = gfs2_qa_get(ip);
1672	if (error)
1673		goto out_put;
1674
1675	inode_lock(&ip->i_inode);
1676	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1677	if (error)
1678		goto out_unlockput;
1679	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1680	if (error)
1681		goto out_q;
1682
1683	/* Check for existing entry, if none then alloc new blocks */
1684	error = update_qd(sdp, qd);
1685	if (error)
1686		goto out_i;
1687
1688	/* If nothing has changed, this is a no-op */
1689	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1690	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1691		fdq->d_fieldmask ^= QC_SPC_SOFT;
1692
1693	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1694	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1695		fdq->d_fieldmask ^= QC_SPC_HARD;
1696
1697	if ((fdq->d_fieldmask & QC_SPACE) &&
1698	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1699		fdq->d_fieldmask ^= QC_SPACE;
1700
1701	if (fdq->d_fieldmask == 0)
1702		goto out_i;
1703
1704	offset = qd2offset(qd);
1705	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1706	if (gfs2_is_stuffed(ip))
1707		alloc_required = 1;
1708	if (alloc_required) {
1709		struct gfs2_alloc_parms ap = { .aflags = 0, };
1710		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1711				       &data_blocks, &ind_blocks);
1712		blocks = 1 + data_blocks + ind_blocks;
1713		ap.target = blocks;
1714		error = gfs2_inplace_reserve(ip, &ap);
1715		if (error)
1716			goto out_i;
1717		blocks += gfs2_rg_blocks(ip, blocks);
1718	}
1719
1720	/* Some quotas span block boundaries and can update two blocks,
1721	   adding an extra block to the transaction to handle such quotas */
1722	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1723	if (error)
1724		goto out_release;
1725
1726	/* Apply changes */
1727	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1728	if (!error)
1729		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1730
1731	gfs2_trans_end(sdp);
1732out_release:
1733	if (alloc_required)
1734		gfs2_inplace_release(ip);
1735out_i:
1736	gfs2_glock_dq_uninit(&i_gh);
1737out_q:
1738	gfs2_glock_dq_uninit(&q_gh);
1739out_unlockput:
1740	gfs2_qa_put(ip);
1741	inode_unlock(&ip->i_inode);
1742out_put:
 
1743	qd_put(qd);
1744	return error;
1745}
1746
1747const struct quotactl_ops gfs2_quotactl_ops = {
1748	.quota_sync     = gfs2_quota_sync,
1749	.get_state	= gfs2_quota_get_state,
1750	.get_dqblk	= gfs2_get_dqblk,
1751	.set_dqblk	= gfs2_set_dqblk,
1752};
1753
1754void __init gfs2_quota_hash_init(void)
1755{
1756	unsigned i;
1757
1758	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1759		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1760}
v3.5.6
 
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10/*
  11 * Quota change tags are associated with each transaction that allocates or
  12 * deallocates space.  Those changes are accumulated locally to each node (in a
  13 * per-node file) and then are periodically synced to the quota file.  This
  14 * avoids the bottleneck of constantly touching the quota file, but introduces
  15 * fuzziness in the current usage value of IDs that are being used on different
  16 * nodes in the cluster simultaneously.  So, it is possible for a user on
  17 * multiple nodes to overrun their quota, but that overrun is controlable.
  18 * Since quota tags are part of transactions, there is no need for a quota check
  19 * program to be run on node crashes or anything like that.
  20 *
  21 * There are couple of knobs that let the administrator manage the quota
  22 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  23 * sitting on one node before being synced to the quota file.  (The default is
  24 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  25 * of quota file syncs increases as the user moves closer to their limit.  The
  26 * more frequent the syncs, the more accurate the quota enforcement, but that
  27 * means that there is more contention between the nodes for the quota file.
  28 * The default value is one.  This sets the maximum theoretical quota overrun
  29 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  30 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  31 * number greater than one makes quota syncs more frequent and reduces the
  32 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  33 * syncs less frequent.
  34 *
  35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  36 * the quota file, so it is not being constantly read.
  37 */
  38
 
 
  39#include <linux/sched.h>
  40#include <linux/slab.h>
  41#include <linux/mm.h>
  42#include <linux/spinlock.h>
  43#include <linux/completion.h>
  44#include <linux/buffer_head.h>
  45#include <linux/sort.h>
  46#include <linux/fs.h>
  47#include <linux/bio.h>
  48#include <linux/gfs2_ondisk.h>
  49#include <linux/kthread.h>
  50#include <linux/freezer.h>
  51#include <linux/quota.h>
  52#include <linux/dqblk_xfs.h>
 
 
 
 
 
 
 
  53
  54#include "gfs2.h"
  55#include "incore.h"
  56#include "bmap.h"
  57#include "glock.h"
  58#include "glops.h"
  59#include "log.h"
  60#include "meta_io.h"
  61#include "quota.h"
  62#include "rgrp.h"
  63#include "super.h"
  64#include "trans.h"
  65#include "inode.h"
  66#include "util.h"
  67
  68#define QUOTA_USER 1
  69#define QUOTA_GROUP 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70
  71struct gfs2_quota_change_host {
  72	u64 qc_change;
  73	u32 qc_flags; /* GFS2_QCF_... */
  74	u32 qc_id;
  75};
  76
  77static LIST_HEAD(qd_lru_list);
  78static atomic_t qd_lru_count = ATOMIC_INIT(0);
  79static DEFINE_SPINLOCK(qd_lru_lock);
 
 
  80
  81int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
  82{
  83	struct gfs2_quota_data *qd;
  84	struct gfs2_sbd *sdp;
  85	int nr_to_scan = sc->nr_to_scan;
  86
  87	if (nr_to_scan == 0)
  88		goto out;
  89
  90	if (!(sc->gfp_mask & __GFP_FS))
  91		return -1;
  92
  93	spin_lock(&qd_lru_lock);
  94	while (nr_to_scan && !list_empty(&qd_lru_list)) {
  95		qd = list_entry(qd_lru_list.next,
  96				struct gfs2_quota_data, qd_reclaim);
  97		sdp = qd->qd_gl->gl_sbd;
  98
  99		/* Free from the filesystem-specific list */
 
 100		list_del(&qd->qd_list);
 
 
 
 
 
 101
 102		gfs2_assert_warn(sdp, !qd->qd_change);
 103		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 104		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 105
 106		gfs2_glock_put(qd->qd_gl);
 107		atomic_dec(&sdp->sd_quota_count);
 108
 109		/* Delete it from the common reclaim list */
 110		list_del_init(&qd->qd_reclaim);
 111		atomic_dec(&qd_lru_count);
 112		spin_unlock(&qd_lru_lock);
 113		kmem_cache_free(gfs2_quotad_cachep, qd);
 114		spin_lock(&qd_lru_lock);
 115		nr_to_scan--;
 116	}
 117	spin_unlock(&qd_lru_lock);
 118
 119out:
 120	return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 121}
 122
 123static u64 qd2offset(struct gfs2_quota_data *qd)
 124{
 125	u64 offset;
 126
 127	offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
 128	offset *= sizeof(struct gfs2_quota);
 129
 130	return offset;
 131}
 132
 133static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
 134		    struct gfs2_quota_data **qdp)
 135{
 136	struct gfs2_quota_data *qd;
 137	int error;
 138
 139	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 140	if (!qd)
 141		return -ENOMEM;
 142
 143	atomic_set(&qd->qd_count, 1);
 144	qd->qd_id = id;
 145	if (user)
 146		set_bit(QDF_USER, &qd->qd_flags);
 147	qd->qd_slot = -1;
 148	INIT_LIST_HEAD(&qd->qd_reclaim);
 
 149
 150	error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
 151			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 152	if (error)
 153		goto fail;
 154
 155	*qdp = qd;
 156
 157	return 0;
 158
 159fail:
 160	kmem_cache_free(gfs2_quotad_cachep, qd);
 161	return error;
 162}
 163
 164static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
 165		  struct gfs2_quota_data **qdp)
 
 166{
 167	struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
 168	int error, found;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 169
 170	*qdp = NULL;
 171
 172	for (;;) {
 173		found = 0;
 174		spin_lock(&qd_lru_lock);
 175		list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 176			if (qd->qd_id == id &&
 177			    !test_bit(QDF_USER, &qd->qd_flags) == !user) {
 178				if (!atomic_read(&qd->qd_count) &&
 179				    !list_empty(&qd->qd_reclaim)) {
 180					/* Remove it from reclaim list */
 181					list_del_init(&qd->qd_reclaim);
 182					atomic_dec(&qd_lru_count);
 183				}
 184				atomic_inc(&qd->qd_count);
 185				found = 1;
 186				break;
 187			}
 188		}
 189
 190		if (!found)
 191			qd = NULL;
 
 192
 193		if (!qd && new_qd) {
 194			qd = new_qd;
 195			list_add(&qd->qd_list, &sdp->sd_quota_list);
 196			atomic_inc(&sdp->sd_quota_count);
 197			new_qd = NULL;
 198		}
 199
 200		spin_unlock(&qd_lru_lock);
 
 
 201
 202		if (qd) {
 203			if (new_qd) {
 204				gfs2_glock_put(new_qd->qd_gl);
 205				kmem_cache_free(gfs2_quotad_cachep, new_qd);
 206			}
 207			*qdp = qd;
 208			return 0;
 209		}
 
 
 
 210
 211		error = qd_alloc(sdp, user, id, &new_qd);
 212		if (error)
 213			return error;
 214	}
 
 
 215}
 216
 
 217static void qd_hold(struct gfs2_quota_data *qd)
 218{
 219	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 220	gfs2_assert(sdp, atomic_read(&qd->qd_count));
 221	atomic_inc(&qd->qd_count);
 222}
 223
 224static void qd_put(struct gfs2_quota_data *qd)
 225{
 226	if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
 227		/* Add to the reclaim list */
 228		list_add_tail(&qd->qd_reclaim, &qd_lru_list);
 229		atomic_inc(&qd_lru_count);
 230		spin_unlock(&qd_lru_lock);
 231	}
 
 232}
 233
 234static int slot_get(struct gfs2_quota_data *qd)
 235{
 236	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 237	unsigned int c, o = 0, b;
 238	unsigned char byte = 0;
 239
 240	spin_lock(&qd_lru_lock);
 
 
 241
 242	if (qd->qd_slot_count++) {
 243		spin_unlock(&qd_lru_lock);
 244		return 0;
 
 
 
 
 
 245	}
 
 246
 247	for (c = 0; c < sdp->sd_quota_chunks; c++)
 248		for (o = 0; o < PAGE_SIZE; o++) {
 249			byte = sdp->sd_quota_bitmap[c][o];
 250			if (byte != 0xFF)
 251				goto found;
 252		}
 253
 254	goto fail;
 255
 256found:
 257	for (b = 0; b < 8; b++)
 258		if (!(byte & (1 << b)))
 259			break;
 260	qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
 261
 262	if (qd->qd_slot >= sdp->sd_quota_slots)
 263		goto fail;
 264
 265	sdp->sd_quota_bitmap[c][o] |= 1 << b;
 266
 267	spin_unlock(&qd_lru_lock);
 268
 269	return 0;
 270
 271fail:
 272	qd->qd_slot_count--;
 273	spin_unlock(&qd_lru_lock);
 274	return -ENOSPC;
 275}
 276
 277static void slot_hold(struct gfs2_quota_data *qd)
 278{
 279	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 280
 281	spin_lock(&qd_lru_lock);
 282	gfs2_assert(sdp, qd->qd_slot_count);
 283	qd->qd_slot_count++;
 284	spin_unlock(&qd_lru_lock);
 285}
 286
 287static void slot_put(struct gfs2_quota_data *qd)
 288{
 289	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 290
 291	spin_lock(&qd_lru_lock);
 292	gfs2_assert(sdp, qd->qd_slot_count);
 293	if (!--qd->qd_slot_count) {
 294		gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
 295		qd->qd_slot = -1;
 296	}
 297	spin_unlock(&qd_lru_lock);
 298}
 299
 300static int bh_get(struct gfs2_quota_data *qd)
 301{
 302	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 303	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 
 304	unsigned int block, offset;
 305	struct buffer_head *bh;
 
 306	int error;
 307	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 308
 309	mutex_lock(&sdp->sd_quota_mutex);
 310
 311	if (qd->qd_bh_count++) {
 312		mutex_unlock(&sdp->sd_quota_mutex);
 313		return 0;
 314	}
 315
 316	block = qd->qd_slot / sdp->sd_qc_per_block;
 317	offset = qd->qd_slot % sdp->sd_qc_per_block;
 318
 319	bh_map.b_size = 1 << ip->i_inode.i_blkbits;
 320	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
 
 321	if (error)
 322		goto fail;
 323	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
 
 
 
 
 
 324	if (error)
 325		goto fail;
 326	error = -EIO;
 327	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 328		goto fail_brelse;
 329
 330	qd->qd_bh = bh;
 331	qd->qd_bh_qc = (struct gfs2_quota_change *)
 332		(bh->b_data + sizeof(struct gfs2_meta_header) +
 333		 offset * sizeof(struct gfs2_quota_change));
 334
 335	mutex_unlock(&sdp->sd_quota_mutex);
 336
 337	return 0;
 338
 339fail_brelse:
 340	brelse(bh);
 341fail:
 342	qd->qd_bh_count--;
 343	mutex_unlock(&sdp->sd_quota_mutex);
 344	return error;
 345}
 346
 347static void bh_put(struct gfs2_quota_data *qd)
 348{
 349	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 350
 351	mutex_lock(&sdp->sd_quota_mutex);
 352	gfs2_assert(sdp, qd->qd_bh_count);
 353	if (!--qd->qd_bh_count) {
 354		brelse(qd->qd_bh);
 355		qd->qd_bh = NULL;
 356		qd->qd_bh_qc = NULL;
 357	}
 358	mutex_unlock(&sdp->sd_quota_mutex);
 359}
 360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 362{
 363	struct gfs2_quota_data *qd = NULL;
 364	int error;
 365	int found = 0;
 366
 367	*qdp = NULL;
 368
 369	if (sdp->sd_vfs->s_flags & MS_RDONLY)
 370		return 0;
 371
 372	spin_lock(&qd_lru_lock);
 373
 374	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 375		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 376		    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 377		    qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
 378			continue;
 379
 380		list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 381
 382		set_bit(QDF_LOCKED, &qd->qd_flags);
 383		gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
 384		atomic_inc(&qd->qd_count);
 385		qd->qd_change_sync = qd->qd_change;
 386		gfs2_assert_warn(sdp, qd->qd_slot_count);
 387		qd->qd_slot_count++;
 388		found = 1;
 389
 390		break;
 391	}
 392
 393	if (!found)
 394		qd = NULL;
 395
 396	spin_unlock(&qd_lru_lock);
 397
 398	if (qd) {
 399		gfs2_assert_warn(sdp, qd->qd_change_sync);
 400		error = bh_get(qd);
 401		if (error) {
 402			clear_bit(QDF_LOCKED, &qd->qd_flags);
 403			slot_put(qd);
 404			qd_put(qd);
 405			return error;
 406		}
 407	}
 408
 409	*qdp = qd;
 410
 411	return 0;
 412}
 413
 414static int qd_trylock(struct gfs2_quota_data *qd)
 415{
 416	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 417
 418	if (sdp->sd_vfs->s_flags & MS_RDONLY)
 419		return 0;
 420
 421	spin_lock(&qd_lru_lock);
 422
 423	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 424	    !test_bit(QDF_CHANGE, &qd->qd_flags)) {
 425		spin_unlock(&qd_lru_lock);
 426		return 0;
 427	}
 428
 429	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 430
 431	set_bit(QDF_LOCKED, &qd->qd_flags);
 432	gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
 433	atomic_inc(&qd->qd_count);
 434	qd->qd_change_sync = qd->qd_change;
 435	gfs2_assert_warn(sdp, qd->qd_slot_count);
 436	qd->qd_slot_count++;
 437
 438	spin_unlock(&qd_lru_lock);
 439
 440	gfs2_assert_warn(sdp, qd->qd_change_sync);
 441	if (bh_get(qd)) {
 442		clear_bit(QDF_LOCKED, &qd->qd_flags);
 443		slot_put(qd);
 444		qd_put(qd);
 445		return 0;
 446	}
 447
 448	return 1;
 449}
 450
 451static void qd_unlock(struct gfs2_quota_data *qd)
 452{
 453	gfs2_assert_warn(qd->qd_gl->gl_sbd,
 454			 test_bit(QDF_LOCKED, &qd->qd_flags));
 455	clear_bit(QDF_LOCKED, &qd->qd_flags);
 456	bh_put(qd);
 457	slot_put(qd);
 458	qd_put(qd);
 459}
 460
 461static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
 462		    struct gfs2_quota_data **qdp)
 463{
 464	int error;
 465
 466	error = qd_get(sdp, user, id, qdp);
 467	if (error)
 468		return error;
 469
 470	error = slot_get(*qdp);
 471	if (error)
 472		goto fail;
 473
 474	error = bh_get(*qdp);
 475	if (error)
 476		goto fail_slot;
 477
 478	return 0;
 479
 480fail_slot:
 481	slot_put(*qdp);
 482fail:
 483	qd_put(*qdp);
 484	return error;
 485}
 486
 487static void qdsb_put(struct gfs2_quota_data *qd)
 488{
 489	bh_put(qd);
 490	slot_put(qd);
 491	qd_put(qd);
 492}
 493
 494int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495{
 496	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 497	struct gfs2_qadata *qa = ip->i_qadata;
 498	struct gfs2_quota_data **qd = qa->qa_qd;
 499	int error;
 500
 501	if (gfs2_assert_warn(sdp, !qa->qa_qd_num) ||
 502	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
 503		return -EIO;
 504
 505	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 506		return 0;
 507
 508	error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
 509	if (error)
 
 
 
 
 
 
 
 510		goto out;
 511	qa->qa_qd_num++;
 
 
 
 
 
 512	qd++;
 513
 514	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
 515	if (error)
 516		goto out;
 517	qa->qa_qd_num++;
 518	qd++;
 519
 520	if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
 521		error = qdsb_get(sdp, QUOTA_USER, uid, qd);
 
 522		if (error)
 523			goto out;
 524		qa->qa_qd_num++;
 525		qd++;
 526	}
 527
 528	if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
 529		error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
 
 530		if (error)
 531			goto out;
 532		qa->qa_qd_num++;
 533		qd++;
 534	}
 535
 536out:
 537	if (error)
 538		gfs2_quota_unhold(ip);
 
 539	return error;
 540}
 541
 542void gfs2_quota_unhold(struct gfs2_inode *ip)
 543{
 544	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 545	struct gfs2_qadata *qa = ip->i_qadata;
 546	unsigned int x;
 
 
 547
 548	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 549
 550	for (x = 0; x < qa->qa_qd_num; x++) {
 551		qdsb_put(qa->qa_qd[x]);
 552		qa->qa_qd[x] = NULL;
 553	}
 554	qa->qa_qd_num = 0;
 
 555}
 556
 557static int sort_qd(const void *a, const void *b)
 558{
 559	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 560	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 561
 562	if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
 563	    !test_bit(QDF_USER, &qd_b->qd_flags)) {
 564		if (test_bit(QDF_USER, &qd_a->qd_flags))
 565			return -1;
 566		else
 567			return 1;
 568	}
 569	if (qd_a->qd_id < qd_b->qd_id)
 570		return -1;
 571	if (qd_a->qd_id > qd_b->qd_id)
 572		return 1;
 573
 574	return 0;
 575}
 576
 577static void do_qc(struct gfs2_quota_data *qd, s64 change)
 578{
 579	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 580	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 581	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 582	s64 x;
 583
 584	mutex_lock(&sdp->sd_quota_mutex);
 585	gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
 586
 587	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 588		qc->qc_change = 0;
 589		qc->qc_flags = 0;
 590		if (test_bit(QDF_USER, &qd->qd_flags))
 591			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 592		qc->qc_id = cpu_to_be32(qd->qd_id);
 593	}
 594
 595	x = be64_to_cpu(qc->qc_change) + change;
 596	qc->qc_change = cpu_to_be64(x);
 597
 598	spin_lock(&qd_lru_lock);
 599	qd->qd_change = x;
 600	spin_unlock(&qd_lru_lock);
 601
 602	if (!x) {
 603		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 604		clear_bit(QDF_CHANGE, &qd->qd_flags);
 605		qc->qc_flags = 0;
 606		qc->qc_id = 0;
 607		slot_put(qd);
 608		qd_put(qd);
 609	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 610		qd_hold(qd);
 611		slot_hold(qd);
 612	}
 613
 
 
 614	mutex_unlock(&sdp->sd_quota_mutex);
 615}
 616
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 617/**
 618 * gfs2_adjust_quota - adjust record of current block usage
 619 * @ip: The quota inode
 620 * @loc: Offset of the entry in the quota file
 621 * @change: The amount of usage change to record
 622 * @qd: The quota data
 623 * @fdq: The updated limits to record
 624 *
 625 * This function was mostly borrowed from gfs2_block_truncate_page which was
 626 * in turn mostly borrowed from ext3
 627 *
 628 * Returns: 0 or -ve on error
 629 */
 630
 631static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 632			     s64 change, struct gfs2_quota_data *qd,
 633			     struct fs_disk_quota *fdq)
 634{
 635	struct inode *inode = &ip->i_inode;
 636	struct gfs2_sbd *sdp = GFS2_SB(inode);
 637	struct address_space *mapping = inode->i_mapping;
 638	unsigned long index = loc >> PAGE_CACHE_SHIFT;
 639	unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
 640	unsigned blocksize, iblock, pos;
 641	struct buffer_head *bh;
 642	struct page *page;
 643	void *kaddr, *ptr;
 644	struct gfs2_quota q, *qp;
 645	int err, nbytes;
 646	u64 size;
 647
 648	if (gfs2_is_stuffed(ip)) {
 649		err = gfs2_unstuff_dinode(ip, NULL);
 650		if (err)
 651			return err;
 652	}
 653
 654	memset(&q, 0, sizeof(struct gfs2_quota));
 655	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 656	if (err < 0)
 657		return err;
 658
 
 659	err = -EIO;
 660	qp = &q;
 661	qp->qu_value = be64_to_cpu(qp->qu_value);
 662	qp->qu_value += change;
 663	qp->qu_value = cpu_to_be64(qp->qu_value);
 664	qd->qd_qb.qb_value = qp->qu_value;
 665	if (fdq) {
 666		if (fdq->d_fieldmask & FS_DQ_BSOFT) {
 667			qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
 668			qd->qd_qb.qb_warn = qp->qu_warn;
 669		}
 670		if (fdq->d_fieldmask & FS_DQ_BHARD) {
 671			qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
 672			qd->qd_qb.qb_limit = qp->qu_limit;
 673		}
 674		if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
 675			qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
 676			qd->qd_qb.qb_value = qp->qu_value;
 677		}
 678	}
 679
 680	/* Write the quota into the quota file on disk */
 681	ptr = qp;
 682	nbytes = sizeof(struct gfs2_quota);
 683get_a_page:
 684	page = find_or_create_page(mapping, index, GFP_NOFS);
 685	if (!page)
 686		return -ENOMEM;
 687
 688	blocksize = inode->i_sb->s_blocksize;
 689	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 690
 691	if (!page_has_buffers(page))
 692		create_empty_buffers(page, blocksize, 0);
 693
 694	bh = page_buffers(page);
 695	pos = blocksize;
 696	while (offset >= pos) {
 697		bh = bh->b_this_page;
 698		iblock++;
 699		pos += blocksize;
 700	}
 701
 702	if (!buffer_mapped(bh)) {
 703		gfs2_block_map(inode, iblock, bh, 1);
 704		if (!buffer_mapped(bh))
 705			goto unlock_out;
 706		/* If it's a newly allocated disk block for quota, zero it */
 707		if (buffer_new(bh))
 708			zero_user(page, pos - blocksize, bh->b_size);
 709	}
 710
 711	if (PageUptodate(page))
 712		set_buffer_uptodate(bh);
 713
 714	if (!buffer_uptodate(bh)) {
 715		ll_rw_block(READ | REQ_META, 1, &bh);
 716		wait_on_buffer(bh);
 717		if (!buffer_uptodate(bh))
 718			goto unlock_out;
 719	}
 720
 721	gfs2_trans_add_bh(ip->i_gl, bh, 0);
 722
 723	kaddr = kmap_atomic(page);
 724	if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
 725		nbytes = PAGE_CACHE_SIZE - offset;
 726	memcpy(kaddr + offset, ptr, nbytes);
 727	flush_dcache_page(page);
 728	kunmap_atomic(kaddr);
 729	unlock_page(page);
 730	page_cache_release(page);
 731
 732	/* If quota straddles page boundary, we need to update the rest of the
 733	 * quota at the beginning of the next page */
 734	if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
 735		ptr = ptr + nbytes;
 736		nbytes = sizeof(struct gfs2_quota) - nbytes;
 737		offset = 0;
 738		index++;
 739		goto get_a_page;
 740	}
 741
 742	size = loc + sizeof(struct gfs2_quota);
 743	if (size > inode->i_size)
 744		i_size_write(inode, size);
 745	inode->i_mtime = inode->i_atime = CURRENT_TIME;
 746	mark_inode_dirty(inode);
 747	return 0;
 748
 749unlock_out:
 750	unlock_page(page);
 751	page_cache_release(page);
 752	return err;
 753}
 754
 755static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 756{
 757	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
 758	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 
 759	unsigned int data_blocks, ind_blocks;
 760	struct gfs2_holder *ghs, i_gh;
 761	unsigned int qx, x;
 762	struct gfs2_quota_data *qd;
 
 763	loff_t offset;
 764	unsigned int nalloc = 0, blocks;
 765	int error;
 766
 
 
 
 
 767	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 768			      &data_blocks, &ind_blocks);
 769
 770	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 771	if (!ghs)
 772		return -ENOMEM;
 
 
 773
 774	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 775	mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
 776	for (qx = 0; qx < num_qd; qx++) {
 777		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 778					   GL_NOCACHE, &ghs[qx]);
 779		if (error)
 780			goto out;
 781	}
 782
 783	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 784	if (error)
 785		goto out;
 786
 787	for (x = 0; x < num_qd; x++) {
 788		offset = qd2offset(qda[x]);
 789		if (gfs2_write_alloc_required(ip, offset,
 790					      sizeof(struct gfs2_quota)))
 791			nalloc++;
 792	}
 793
 794	/* 
 795	 * 1 blk for unstuffing inode if stuffed. We add this extra
 796	 * block to the reservation unconditionally. If the inode
 797	 * doesn't need unstuffing, the block will be released to the 
 798	 * rgrp since it won't be allocated during the transaction
 799	 */
 800	/* +3 in the end for unstuffing block, inode size update block
 801	 * and another block in case quota straddles page boundary and 
 802	 * two blocks need to be updated instead of 1 */
 803	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 804
 805	error = gfs2_inplace_reserve(ip, 1 +
 806				     (nalloc * (data_blocks + ind_blocks)));
 
 807	if (error)
 808		goto out_alloc;
 809
 810	if (nalloc)
 811		blocks += gfs2_rg_blocks(ip) + nalloc * ind_blocks + RES_STATFS;
 812
 813	error = gfs2_trans_begin(sdp, blocks, 0);
 814	if (error)
 815		goto out_ipres;
 816
 817	for (x = 0; x < num_qd; x++) {
 818		qd = qda[x];
 819		offset = qd2offset(qd);
 820		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 821		if (error)
 822			goto out_end_trans;
 823
 824		do_qc(qd, -qd->qd_change_sync);
 825		set_bit(QDF_REFRESH, &qd->qd_flags);
 826	}
 827
 828	error = 0;
 829
 830out_end_trans:
 831	gfs2_trans_end(sdp);
 832out_ipres:
 833	gfs2_inplace_release(ip);
 834out_alloc:
 835	gfs2_glock_dq_uninit(&i_gh);
 836out:
 837	while (qx--)
 838		gfs2_glock_dq_uninit(&ghs[qx]);
 839	mutex_unlock(&ip->i_inode.i_mutex);
 840	kfree(ghs);
 841	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
 
 
 
 842	return error;
 843}
 844
 845static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 846{
 847	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 848	struct gfs2_quota q;
 849	struct gfs2_quota_lvb *qlvb;
 850	loff_t pos;
 851	int error;
 852
 853	memset(&q, 0, sizeof(struct gfs2_quota));
 854	pos = qd2offset(qd);
 855	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 856	if (error < 0)
 857		return error;
 858
 859	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
 860	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 861	qlvb->__pad = 0;
 862	qlvb->qb_limit = q.qu_limit;
 863	qlvb->qb_warn = q.qu_warn;
 864	qlvb->qb_value = q.qu_value;
 865	qd->qd_qb = *qlvb;
 866
 867	return 0;
 868}
 869
 870static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
 871		    struct gfs2_holder *q_gh)
 872{
 873	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 874	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 875	struct gfs2_holder i_gh;
 876	int error;
 877
 878restart:
 879	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
 880	if (error)
 881		return error;
 882
 883	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
 
 
 
 884
 885	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
 886		gfs2_glock_dq_uninit(q_gh);
 887		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
 888					   GL_NOCACHE, q_gh);
 889		if (error)
 890			return error;
 891
 892		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
 893		if (error)
 894			goto fail;
 895
 896		error = update_qd(sdp, qd);
 897		if (error)
 898			goto fail_gunlock;
 899
 900		gfs2_glock_dq_uninit(&i_gh);
 901		gfs2_glock_dq_uninit(q_gh);
 902		force_refresh = 0;
 903		goto restart;
 904	}
 905
 906	return 0;
 907
 908fail_gunlock:
 909	gfs2_glock_dq_uninit(&i_gh);
 910fail:
 911	gfs2_glock_dq_uninit(q_gh);
 912	return error;
 913}
 914
 915int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
 916{
 917	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 918	struct gfs2_qadata *qa = ip->i_qadata;
 919	struct gfs2_quota_data *qd;
 920	unsigned int x;
 921	int error = 0;
 922
 
 
 
 923	error = gfs2_quota_hold(ip, uid, gid);
 924	if (error)
 925		return error;
 926
 927	if (capable(CAP_SYS_RESOURCE) ||
 928	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
 929		return 0;
 930
 931	sort(qa->qa_qd, qa->qa_qd_num, sizeof(struct gfs2_quota_data *),
 932	     sort_qd, NULL);
 933
 934	for (x = 0; x < qa->qa_qd_num; x++) {
 935		int force = NO_FORCE;
 936		qd = qa->qa_qd[x];
 937		if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
 938			force = FORCE;
 939		error = do_glock(qd, force, &qa->qa_qd_ghs[x]);
 940		if (error)
 941			break;
 942	}
 943
 944	if (!error)
 945		set_bit(GIF_QD_LOCKED, &ip->i_flags);
 946	else {
 947		while (x--)
 948			gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
 949		gfs2_quota_unhold(ip);
 950	}
 951
 952	return error;
 953}
 954
 955static int need_sync(struct gfs2_quota_data *qd)
 956{
 957	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 958	struct gfs2_tune *gt = &sdp->sd_tune;
 959	s64 value;
 960	unsigned int num, den;
 961	int do_sync = 1;
 962
 963	if (!qd->qd_qb.qb_limit)
 964		return 0;
 965
 966	spin_lock(&qd_lru_lock);
 967	value = qd->qd_change;
 968	spin_unlock(&qd_lru_lock);
 969
 970	spin_lock(&gt->gt_spin);
 971	num = gt->gt_quota_scale_num;
 972	den = gt->gt_quota_scale_den;
 973	spin_unlock(&gt->gt_spin);
 974
 975	if (value < 0)
 976		do_sync = 0;
 977	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
 978		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
 979		do_sync = 0;
 980	else {
 981		value *= gfs2_jindex_size(sdp) * num;
 982		value = div_s64(value, den);
 983		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
 984		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
 985			do_sync = 0;
 986	}
 987
 988	return do_sync;
 989}
 990
 991void gfs2_quota_unlock(struct gfs2_inode *ip)
 992{
 993	struct gfs2_qadata *qa = ip->i_qadata;
 994	struct gfs2_quota_data *qda[4];
 995	unsigned int count = 0;
 996	unsigned int x;
 
 997
 998	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
 999		goto out;
1000
1001	for (x = 0; x < qa->qa_qd_num; x++) {
1002		struct gfs2_quota_data *qd;
1003		int sync;
1004
1005		qd = qa->qa_qd[x];
1006		sync = need_sync(qd);
1007
1008		gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009
1010		if (sync && qd_trylock(qd))
1011			qda[count++] = qd;
1012	}
1013
1014	if (count) {
1015		do_sync(count, qda);
1016		for (x = 0; x < count; x++)
1017			qd_unlock(qda[x]);
1018	}
1019
1020out:
1021	gfs2_quota_unhold(ip);
1022}
1023
1024#define MAX_LINE 256
1025
1026static int print_message(struct gfs2_quota_data *qd, char *type)
1027{
1028	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
1029
1030	printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
1031	       sdp->sd_fsname, type,
1032	       (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1033	       qd->qd_id);
1034
1035	return 0;
1036}
1037
1038int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039{
1040	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1041	struct gfs2_qadata *qa = ip->i_qadata;
1042	struct gfs2_quota_data *qd;
1043	s64 value;
1044	unsigned int x;
1045	int error = 0;
1046
 
1047	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1048		return 0;
1049
1050        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1051                return 0;
1052
1053	for (x = 0; x < qa->qa_qd_num; x++) {
1054		qd = qa->qa_qd[x];
1055
1056		if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1057		      (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
1058			continue;
1059
 
 
1060		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1061		spin_lock(&qd_lru_lock);
1062		value += qd->qd_change;
1063		spin_unlock(&qd_lru_lock);
1064
1065		if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1066			print_message(qd, "exceeded");
1067			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1068					   USRQUOTA : GRPQUOTA, qd->qd_id,
1069					   sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1070
1071			error = -EDQUOT;
1072			break;
1073		} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1074			   (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
 
 
 
 
 
 
 
 
1075			   time_after_eq(jiffies, qd->qd_last_warn +
1076					 gfs2_tune_get(sdp,
1077						gt_quota_warn_period) * HZ)) {
1078			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1079					   USRQUOTA : GRPQUOTA, qd->qd_id,
1080					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1081			error = print_message(qd, "warning");
1082			qd->qd_last_warn = jiffies;
1083		}
1084	}
1085
1086	return error;
1087}
1088
1089void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1090		       u32 uid, u32 gid)
1091{
1092	struct gfs2_qadata *qa = ip->i_qadata;
1093	struct gfs2_quota_data *qd;
1094	unsigned int x;
 
1095
1096	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
 
1097		return;
1098	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1099		return;
1100
1101	for (x = 0; x < qa->qa_qd_num; x++) {
1102		qd = qa->qa_qd[x];
 
 
 
1103
1104		if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1105		    (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1106			do_qc(qd, change);
1107		}
1108	}
1109}
1110
1111int gfs2_quota_sync(struct super_block *sb, int type, int wait)
1112{
1113	struct gfs2_sbd *sdp = sb->s_fs_info;
1114	struct gfs2_quota_data **qda;
1115	unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1116	unsigned int num_qd;
1117	unsigned int x;
1118	int error = 0;
1119
1120	sdp->sd_quota_sync_gen++;
1121
1122	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1123	if (!qda)
1124		return -ENOMEM;
1125
 
 
 
1126	do {
1127		num_qd = 0;
1128
1129		for (;;) {
1130			error = qd_fish(sdp, qda + num_qd);
1131			if (error || !qda[num_qd])
1132				break;
1133			if (++num_qd == max_qd)
1134				break;
1135		}
1136
1137		if (num_qd) {
1138			if (!error)
1139				error = do_sync(num_qd, qda);
1140			if (!error)
1141				for (x = 0; x < num_qd; x++)
1142					qda[x]->qd_sync_gen =
1143						sdp->sd_quota_sync_gen;
1144
1145			for (x = 0; x < num_qd; x++)
1146				qd_unlock(qda[x]);
1147		}
1148	} while (!error && num_qd == max_qd);
1149
 
1150	kfree(qda);
1151
1152	return error;
1153}
1154
1155static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
1156{
1157	return gfs2_quota_sync(sb, type, 0);
1158}
1159
1160int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1161{
1162	struct gfs2_quota_data *qd;
1163	struct gfs2_holder q_gh;
1164	int error;
1165
1166	error = qd_get(sdp, user, id, &qd);
1167	if (error)
1168		return error;
1169
1170	error = do_glock(qd, FORCE, &q_gh);
1171	if (!error)
1172		gfs2_glock_dq_uninit(&q_gh);
1173
1174	qd_put(qd);
1175	return error;
1176}
1177
1178static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1179{
1180	const struct gfs2_quota_change *str = buf;
1181
1182	qc->qc_change = be64_to_cpu(str->qc_change);
1183	qc->qc_flags = be32_to_cpu(str->qc_flags);
1184	qc->qc_id = be32_to_cpu(str->qc_id);
1185}
1186
1187int gfs2_quota_init(struct gfs2_sbd *sdp)
1188{
1189	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1190	u64 size = i_size_read(sdp->sd_qc_inode);
1191	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1192	unsigned int x, slot = 0;
1193	unsigned int found = 0;
 
 
1194	u64 dblock;
1195	u32 extlen = 0;
1196	int error;
1197
1198	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1199		return -EIO;
1200
1201	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1202	sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1203
1204	error = -ENOMEM;
1205
1206	sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1207				       sizeof(unsigned char *), GFP_NOFS);
 
1208	if (!sdp->sd_quota_bitmap)
1209		return error;
1210
1211	for (x = 0; x < sdp->sd_quota_chunks; x++) {
1212		sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1213		if (!sdp->sd_quota_bitmap[x])
1214			goto fail;
1215	}
1216
1217	for (x = 0; x < blocks; x++) {
1218		struct buffer_head *bh;
 
1219		unsigned int y;
1220
1221		if (!extlen) {
1222			int new = 0;
1223			error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1224			if (error)
1225				goto fail;
1226		}
1227		error = -EIO;
1228		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1229		if (!bh)
1230			goto fail;
1231		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1232			brelse(bh);
1233			goto fail;
1234		}
1235
 
1236		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1237		     y++, slot++) {
1238			struct gfs2_quota_change_host qc;
1239			struct gfs2_quota_data *qd;
1240
1241			gfs2_quota_change_in(&qc, bh->b_data +
1242					  sizeof(struct gfs2_meta_header) +
1243					  y * sizeof(struct gfs2_quota_change));
1244			if (!qc.qc_change)
 
 
 
1245				continue;
1246
1247			error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1248					 qc.qc_id, &qd);
1249			if (error) {
1250				brelse(bh);
1251				goto fail;
1252			}
1253
1254			set_bit(QDF_CHANGE, &qd->qd_flags);
1255			qd->qd_change = qc.qc_change;
1256			qd->qd_slot = slot;
1257			qd->qd_slot_count = 1;
1258
1259			spin_lock(&qd_lru_lock);
1260			gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1261			list_add(&qd->qd_list, &sdp->sd_quota_list);
1262			atomic_inc(&sdp->sd_quota_count);
1263			spin_unlock(&qd_lru_lock);
 
 
 
 
1264
1265			found++;
1266		}
1267
1268		brelse(bh);
1269		dblock++;
1270		extlen--;
1271	}
1272
1273	if (found)
1274		fs_info(sdp, "found %u quota changes\n", found);
1275
1276	return 0;
1277
1278fail:
1279	gfs2_quota_cleanup(sdp);
1280	return error;
1281}
1282
1283void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1284{
1285	struct list_head *head = &sdp->sd_quota_list;
1286	struct gfs2_quota_data *qd;
1287	unsigned int x;
1288
1289	spin_lock(&qd_lru_lock);
1290	while (!list_empty(head)) {
1291		qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1292
1293		if (atomic_read(&qd->qd_count) > 1 ||
1294		    (atomic_read(&qd->qd_count) &&
1295		     !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1296			list_move(&qd->qd_list, head);
1297			spin_unlock(&qd_lru_lock);
1298			schedule();
1299			spin_lock(&qd_lru_lock);
1300			continue;
1301		}
1302
1303		list_del(&qd->qd_list);
1304		/* Also remove if this qd exists in the reclaim list */
1305		if (!list_empty(&qd->qd_reclaim)) {
1306			list_del_init(&qd->qd_reclaim);
1307			atomic_dec(&qd_lru_count);
1308		}
1309		atomic_dec(&sdp->sd_quota_count);
1310		spin_unlock(&qd_lru_lock);
 
 
 
 
1311
1312		if (!atomic_read(&qd->qd_count)) {
1313			gfs2_assert_warn(sdp, !qd->qd_change);
1314			gfs2_assert_warn(sdp, !qd->qd_slot_count);
1315		} else
1316			gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1317		gfs2_assert_warn(sdp, !qd->qd_bh_count);
1318
1319		gfs2_glock_put(qd->qd_gl);
1320		kmem_cache_free(gfs2_quotad_cachep, qd);
1321
1322		spin_lock(&qd_lru_lock);
1323	}
1324	spin_unlock(&qd_lru_lock);
1325
1326	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1327
1328	if (sdp->sd_quota_bitmap) {
1329		for (x = 0; x < sdp->sd_quota_chunks; x++)
1330			kfree(sdp->sd_quota_bitmap[x]);
1331		kfree(sdp->sd_quota_bitmap);
1332	}
1333}
1334
1335static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1336{
1337	if (error == 0 || error == -EROFS)
1338		return;
1339	if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1340		fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
 
 
 
1341}
1342
1343static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1344			       int (*fxn)(struct super_block *sb, int type),
1345			       unsigned long t, unsigned long *timeo,
1346			       unsigned int *new_timeo)
1347{
1348	if (t >= *timeo) {
1349		int error = fxn(sdp->sd_vfs, 0);
1350		quotad_error(sdp, msg, error);
1351		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1352	} else {
1353		*timeo -= t;
1354	}
1355}
1356
1357static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1358{
1359	struct gfs2_inode *ip;
1360
1361	while(1) {
1362		ip = NULL;
1363		spin_lock(&sdp->sd_trunc_lock);
1364		if (!list_empty(&sdp->sd_trunc_list)) {
1365			ip = list_entry(sdp->sd_trunc_list.next,
1366					struct gfs2_inode, i_trunc_list);
1367			list_del_init(&ip->i_trunc_list);
1368		}
1369		spin_unlock(&sdp->sd_trunc_lock);
1370		if (ip == NULL)
1371			return;
1372		gfs2_glock_finish_truncate(ip);
1373	}
1374}
1375
1376void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1377	if (!sdp->sd_statfs_force_sync) {
1378		sdp->sd_statfs_force_sync = 1;
1379		wake_up(&sdp->sd_quota_wait);
1380	}
1381}
1382
1383
1384/**
1385 * gfs2_quotad - Write cached quota changes into the quota file
1386 * @sdp: Pointer to GFS2 superblock
1387 *
1388 */
1389
1390int gfs2_quotad(void *data)
1391{
1392	struct gfs2_sbd *sdp = data;
1393	struct gfs2_tune *tune = &sdp->sd_tune;
1394	unsigned long statfs_timeo = 0;
1395	unsigned long quotad_timeo = 0;
1396	unsigned long t = 0;
1397	DEFINE_WAIT(wait);
1398	int empty;
1399
1400	while (!kthread_should_stop()) {
1401
 
 
1402		/* Update the master statfs file */
1403		if (sdp->sd_statfs_force_sync) {
1404			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1405			quotad_error(sdp, "statfs", error);
1406			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1407		}
1408		else
1409			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1410				   	   &statfs_timeo,
1411					   &tune->gt_statfs_quantum);
1412
1413		/* Update quota file */
1414		quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
1415				   &quotad_timeo, &tune->gt_quota_quantum);
1416
1417		/* Check for & recover partially truncated inodes */
1418		quotad_check_trunc_list(sdp);
1419
1420		try_to_freeze();
1421
 
1422		t = min(quotad_timeo, statfs_timeo);
1423
1424		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1425		spin_lock(&sdp->sd_trunc_lock);
1426		empty = list_empty(&sdp->sd_trunc_list);
1427		spin_unlock(&sdp->sd_trunc_lock);
1428		if (empty && !sdp->sd_statfs_force_sync)
1429			t -= schedule_timeout(t);
1430		else
1431			t = 0;
1432		finish_wait(&sdp->sd_quota_wait, &wait);
1433	}
1434
1435	return 0;
1436}
1437
1438static int gfs2_quota_get_xstate(struct super_block *sb,
1439				 struct fs_quota_stat *fqs)
1440{
1441	struct gfs2_sbd *sdp = sb->s_fs_info;
1442
1443	memset(fqs, 0, sizeof(struct fs_quota_stat));
1444	fqs->qs_version = FS_QSTAT_VERSION;
1445
1446	switch (sdp->sd_args.ar_quota) {
1447	case GFS2_QUOTA_ON:
1448		fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
1449		/*FALLTHRU*/
 
1450	case GFS2_QUOTA_ACCOUNT:
1451		fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
 
 
 
1452		break;
1453	case GFS2_QUOTA_OFF:
1454		break;
1455	}
1456
1457	if (sdp->sd_quota_inode) {
1458		fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1459		fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1460	}
1461	fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1462	fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1463	fqs->qs_incoredqs = atomic_read(&qd_lru_count);
 
1464	return 0;
1465}
1466
1467static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1468			  struct fs_disk_quota *fdq)
1469{
1470	struct gfs2_sbd *sdp = sb->s_fs_info;
1471	struct gfs2_quota_lvb *qlvb;
1472	struct gfs2_quota_data *qd;
1473	struct gfs2_holder q_gh;
1474	int error;
1475
1476	memset(fdq, 0, sizeof(struct fs_disk_quota));
1477
1478	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1479		return -ESRCH; /* Crazy XFS error code */
1480
1481	if (type == USRQUOTA)
1482		type = QUOTA_USER;
1483	else if (type == GRPQUOTA)
1484		type = QUOTA_GROUP;
1485	else
1486		return -EINVAL;
1487
1488	error = qd_get(sdp, type, id, &qd);
1489	if (error)
1490		return error;
1491	error = do_glock(qd, FORCE, &q_gh);
1492	if (error)
1493		goto out;
1494
1495	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1496	fdq->d_version = FS_DQUOT_VERSION;
1497	fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1498	fdq->d_id = id;
1499	fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1500	fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1501	fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1502
1503	gfs2_glock_dq_uninit(&q_gh);
1504out:
1505	qd_put(qd);
1506	return error;
1507}
1508
1509/* GFS2 only supports a subset of the XFS fields */
1510#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1511
1512static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1513			  struct fs_disk_quota *fdq)
1514{
1515	struct gfs2_sbd *sdp = sb->s_fs_info;
1516	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1517	struct gfs2_quota_data *qd;
1518	struct gfs2_holder q_gh, i_gh;
1519	unsigned int data_blocks, ind_blocks;
1520	unsigned int blocks = 0;
1521	int alloc_required;
1522	loff_t offset;
1523	int error;
1524
1525	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1526		return -ESRCH; /* Crazy XFS error code */
1527
1528	switch(type) {
1529	case USRQUOTA:
1530		type = QUOTA_USER;
1531		if (fdq->d_flags != FS_USER_QUOTA)
1532			return -EINVAL;
1533		break;
1534	case GRPQUOTA:
1535		type = QUOTA_GROUP;
1536		if (fdq->d_flags != FS_GROUP_QUOTA)
1537			return -EINVAL;
1538		break;
1539	default:
1540		return -EINVAL;
1541	}
1542
1543	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1544		return -EINVAL;
1545	if (fdq->d_id != id)
1546		return -EINVAL;
1547
1548	error = qd_get(sdp, type, id, &qd);
1549	if (error)
1550		return error;
1551
1552	mutex_lock(&ip->i_inode.i_mutex);
 
 
 
 
1553	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1554	if (error)
1555		goto out_put;
1556	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1557	if (error)
1558		goto out_q;
1559
1560	/* Check for existing entry, if none then alloc new blocks */
1561	error = update_qd(sdp, qd);
1562	if (error)
1563		goto out_i;
1564
1565	/* If nothing has changed, this is a no-op */
1566	if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1567	    ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1568		fdq->d_fieldmask ^= FS_DQ_BSOFT;
1569
1570	if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1571	    ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1572		fdq->d_fieldmask ^= FS_DQ_BHARD;
1573
1574	if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1575	    ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1576		fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1577
1578	if (fdq->d_fieldmask == 0)
1579		goto out_i;
1580
1581	offset = qd2offset(qd);
1582	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1583	if (gfs2_is_stuffed(ip))
1584		alloc_required = 1;
1585	if (alloc_required) {
 
1586		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1587				       &data_blocks, &ind_blocks);
1588		blocks = 1 + data_blocks + ind_blocks;
1589		error = gfs2_inplace_reserve(ip, blocks);
 
1590		if (error)
1591			goto out_i;
1592		blocks += gfs2_rg_blocks(ip);
1593	}
1594
1595	/* Some quotas span block boundaries and can update two blocks,
1596	   adding an extra block to the transaction to handle such quotas */
1597	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1598	if (error)
1599		goto out_release;
1600
1601	/* Apply changes */
1602	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
 
 
1603
1604	gfs2_trans_end(sdp);
1605out_release:
1606	if (alloc_required)
1607		gfs2_inplace_release(ip);
1608out_i:
1609	gfs2_glock_dq_uninit(&i_gh);
1610out_q:
1611	gfs2_glock_dq_uninit(&q_gh);
 
 
 
1612out_put:
1613	mutex_unlock(&ip->i_inode.i_mutex);
1614	qd_put(qd);
1615	return error;
1616}
1617
1618const struct quotactl_ops gfs2_quotactl_ops = {
1619	.quota_sync     = gfs2_quota_sync,
1620	.get_xstate     = gfs2_quota_get_xstate,
1621	.get_dqblk	= gfs2_get_dqblk,
1622	.set_dqblk	= gfs2_set_dqblk,
1623};