Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10/*
  11 * Quota change tags are associated with each transaction that allocates or
  12 * deallocates space.  Those changes are accumulated locally to each node (in a
  13 * per-node file) and then are periodically synced to the quota file.  This
  14 * avoids the bottleneck of constantly touching the quota file, but introduces
  15 * fuzziness in the current usage value of IDs that are being used on different
  16 * nodes in the cluster simultaneously.  So, it is possible for a user on
  17 * multiple nodes to overrun their quota, but that overrun is controlable.
  18 * Since quota tags are part of transactions, there is no need for a quota check
  19 * program to be run on node crashes or anything like that.
  20 *
  21 * There are couple of knobs that let the administrator manage the quota
  22 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  23 * sitting on one node before being synced to the quota file.  (The default is
  24 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  25 * of quota file syncs increases as the user moves closer to their limit.  The
  26 * more frequent the syncs, the more accurate the quota enforcement, but that
  27 * means that there is more contention between the nodes for the quota file.
  28 * The default value is one.  This sets the maximum theoretical quota overrun
  29 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  30 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  31 * number greater than one makes quota syncs more frequent and reduces the
  32 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  33 * syncs less frequent.
  34 *
  35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  36 * the quota file, so it is not being constantly read.
  37 */
  38
  39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  40
  41#include <linux/sched.h>
  42#include <linux/slab.h>
  43#include <linux/mm.h>
  44#include <linux/spinlock.h>
  45#include <linux/completion.h>
  46#include <linux/buffer_head.h>
  47#include <linux/sort.h>
  48#include <linux/fs.h>
  49#include <linux/bio.h>
  50#include <linux/gfs2_ondisk.h>
  51#include <linux/kthread.h>
  52#include <linux/freezer.h>
  53#include <linux/quota.h>
  54#include <linux/dqblk_xfs.h>
  55#include <linux/lockref.h>
  56#include <linux/list_lru.h>
  57#include <linux/rcupdate.h>
  58#include <linux/rculist_bl.h>
  59#include <linux/bit_spinlock.h>
  60#include <linux/jhash.h>
  61#include <linux/vmalloc.h>
  62
  63#include "gfs2.h"
  64#include "incore.h"
  65#include "bmap.h"
  66#include "glock.h"
  67#include "glops.h"
  68#include "log.h"
  69#include "meta_io.h"
  70#include "quota.h"
  71#include "rgrp.h"
  72#include "super.h"
  73#include "trans.h"
  74#include "inode.h"
  75#include "util.h"
  76
  77#define GFS2_QD_HASH_SHIFT      12
  78#define GFS2_QD_HASH_SIZE       (1 << GFS2_QD_HASH_SHIFT)
  79#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
  80
  81/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  82/*                     -> sd_bitmap_lock                              */
  83static DEFINE_SPINLOCK(qd_lock);
  84struct list_lru gfs2_qd_lru;
  85
  86static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  87
  88static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  89				 const struct kqid qid)
  90{
  91	unsigned int h;
  92
  93	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  94	h = jhash(&qid, sizeof(struct kqid), h);
  95
  96	return h & GFS2_QD_HASH_MASK;
  97}
  98
  99static inline void spin_lock_bucket(unsigned int hash)
 100{
 101        hlist_bl_lock(&qd_hash_table[hash]);
 102}
 103
 104static inline void spin_unlock_bucket(unsigned int hash)
 105{
 106        hlist_bl_unlock(&qd_hash_table[hash]);
 107}
 108
 109static void gfs2_qd_dealloc(struct rcu_head *rcu)
 110{
 111	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
 
 
 112	kmem_cache_free(gfs2_quotad_cachep, qd);
 
 
 113}
 114
 115static void gfs2_qd_dispose(struct list_head *list)
 116{
 117	struct gfs2_quota_data *qd;
 118	struct gfs2_sbd *sdp;
 119
 120	while (!list_empty(list)) {
 121		qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
 122		sdp = qd->qd_gl->gl_name.ln_sbd;
 123
 124		list_del(&qd->qd_lru);
 125
 126		/* Free from the filesystem-specific list */
 127		spin_lock(&qd_lock);
 128		list_del(&qd->qd_list);
 129		spin_unlock(&qd_lock);
 130
 131		spin_lock_bucket(qd->qd_hash);
 132		hlist_bl_del_rcu(&qd->qd_hlist);
 133		spin_unlock_bucket(qd->qd_hash);
 134
 
 135		gfs2_assert_warn(sdp, !qd->qd_change);
 136		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 137		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 
 
 
 
 
 
 
 
 
 138
 139		gfs2_glock_put(qd->qd_gl);
 140		atomic_dec(&sdp->sd_quota_count);
 
 141
 142		/* Delete it from the common reclaim list */
 143		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 144	}
 145}
 146
 147
 148static enum lru_status gfs2_qd_isolate(struct list_head *item,
 149		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 150{
 151	struct list_head *dispose = arg;
 152	struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
 
 
 153
 154	if (!spin_trylock(&qd->qd_lockref.lock))
 155		return LRU_SKIP;
 156
 
 157	if (qd->qd_lockref.count == 0) {
 158		lockref_mark_dead(&qd->qd_lockref);
 159		list_lru_isolate_move(lru, &qd->qd_lru, dispose);
 
 160	}
 161
 162	spin_unlock(&qd->qd_lockref.lock);
 163	return LRU_REMOVED;
 164}
 165
 166static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
 167					 struct shrink_control *sc)
 168{
 169	LIST_HEAD(dispose);
 170	unsigned long freed;
 171
 172	if (!(sc->gfp_mask & __GFP_FS))
 173		return SHRINK_STOP;
 174
 175	freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
 176				     gfs2_qd_isolate, &dispose);
 177
 178	gfs2_qd_dispose(&dispose);
 179
 180	return freed;
 181}
 182
 183static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 184					  struct shrink_control *sc)
 185{
 186	return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
 187}
 188
 189struct shrinker gfs2_qd_shrinker = {
 190	.count_objects = gfs2_qd_shrink_count,
 191	.scan_objects = gfs2_qd_shrink_scan,
 192	.seeks = DEFAULT_SEEKS,
 193	.flags = SHRINKER_NUMA_AWARE,
 194};
 
 
 
 
 
 
 195
 
 
 
 
 
 
 
 196
 197static u64 qd2index(struct gfs2_quota_data *qd)
 198{
 199	struct kqid qid = qd->qd_id;
 200	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 201		((qid.type == USRQUOTA) ? 0 : 1);
 202}
 203
 204static u64 qd2offset(struct gfs2_quota_data *qd)
 205{
 206	u64 offset;
 207
 208	offset = qd2index(qd);
 209	offset *= sizeof(struct gfs2_quota);
 210
 211	return offset;
 212}
 213
 214static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
 215{
 216	struct gfs2_quota_data *qd;
 217	int error;
 218
 219	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 220	if (!qd)
 221		return NULL;
 222
 223	qd->qd_sbd = sdp;
 224	qd->qd_lockref.count = 1;
 225	spin_lock_init(&qd->qd_lockref.lock);
 226	qd->qd_id = qid;
 227	qd->qd_slot = -1;
 228	INIT_LIST_HEAD(&qd->qd_lru);
 229	qd->qd_hash = hash;
 230
 231	error = gfs2_glock_get(sdp, qd2index(qd),
 232			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 233	if (error)
 234		goto fail;
 235
 236	return qd;
 237
 238fail:
 239	kmem_cache_free(gfs2_quotad_cachep, qd);
 240	return NULL;
 241}
 242
 243static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
 244						     const struct gfs2_sbd *sdp,
 245						     struct kqid qid)
 246{
 247	struct gfs2_quota_data *qd;
 248	struct hlist_bl_node *h;
 249
 250	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
 251		if (!qid_eq(qd->qd_id, qid))
 252			continue;
 253		if (qd->qd_sbd != sdp)
 254			continue;
 255		if (lockref_get_not_dead(&qd->qd_lockref)) {
 256			list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
 257			return qd;
 258		}
 259	}
 260
 261	return NULL;
 262}
 263
 264
 265static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 266		  struct gfs2_quota_data **qdp)
 267{
 268	struct gfs2_quota_data *qd, *new_qd;
 269	unsigned int hash = gfs2_qd_hash(sdp, qid);
 270
 271	rcu_read_lock();
 272	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 273	rcu_read_unlock();
 274
 275	if (qd)
 276		return 0;
 277
 278	new_qd = qd_alloc(hash, sdp, qid);
 279	if (!new_qd)
 280		return -ENOMEM;
 281
 282	spin_lock(&qd_lock);
 283	spin_lock_bucket(hash);
 284	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 285	if (qd == NULL) {
 
 286		*qdp = new_qd;
 287		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
 288		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
 289		atomic_inc(&sdp->sd_quota_count);
 290	}
 291	spin_unlock_bucket(hash);
 292	spin_unlock(&qd_lock);
 293
 294	if (qd) {
 295		gfs2_glock_put(new_qd->qd_gl);
 296		kmem_cache_free(gfs2_quotad_cachep, new_qd);
 297	}
 298
 299	return 0;
 300}
 301
 302
 303static void qd_hold(struct gfs2_quota_data *qd)
 304{
 305	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 306	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
 307	lockref_get(&qd->qd_lockref);
 308}
 309
 310static void qd_put(struct gfs2_quota_data *qd)
 311{
 
 
 312	if (lockref_put_or_lock(&qd->qd_lockref))
 313		return;
 314
 
 
 
 
 
 
 
 
 
 
 315	qd->qd_lockref.count = 0;
 316	list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
 317	spin_unlock(&qd->qd_lockref.lock);
 318
 319}
 320
 321static int slot_get(struct gfs2_quota_data *qd)
 322{
 323	struct gfs2_sbd *sdp = qd->qd_sbd;
 324	unsigned int bit;
 325	int error = 0;
 326
 327	spin_lock(&sdp->sd_bitmap_lock);
 328	if (qd->qd_slot_count != 0)
 329		goto out;
 330
 331	error = -ENOSPC;
 332	bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
 333	if (bit < sdp->sd_quota_slots) {
 
 334		set_bit(bit, sdp->sd_quota_bitmap);
 335		qd->qd_slot = bit;
 336		error = 0;
 337out:
 338		qd->qd_slot_count++;
 339	}
 
 
 340	spin_unlock(&sdp->sd_bitmap_lock);
 341
 342	return error;
 343}
 344
 345static void slot_hold(struct gfs2_quota_data *qd)
 346{
 347	struct gfs2_sbd *sdp = qd->qd_sbd;
 348
 349	spin_lock(&sdp->sd_bitmap_lock);
 350	gfs2_assert(sdp, qd->qd_slot_count);
 351	qd->qd_slot_count++;
 352	spin_unlock(&sdp->sd_bitmap_lock);
 353}
 354
 355static void slot_put(struct gfs2_quota_data *qd)
 356{
 357	struct gfs2_sbd *sdp = qd->qd_sbd;
 358
 359	spin_lock(&sdp->sd_bitmap_lock);
 360	gfs2_assert(sdp, qd->qd_slot_count);
 361	if (!--qd->qd_slot_count) {
 362		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
 363		qd->qd_slot = -1;
 364	}
 365	spin_unlock(&sdp->sd_bitmap_lock);
 366}
 367
 368static int bh_get(struct gfs2_quota_data *qd)
 369{
 370	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 371	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 
 372	unsigned int block, offset;
 373	struct buffer_head *bh;
 
 374	int error;
 375	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 376
 377	mutex_lock(&sdp->sd_quota_mutex);
 378
 379	if (qd->qd_bh_count++) {
 380		mutex_unlock(&sdp->sd_quota_mutex);
 381		return 0;
 382	}
 
 383
 384	block = qd->qd_slot / sdp->sd_qc_per_block;
 385	offset = qd->qd_slot % sdp->sd_qc_per_block;
 386
 387	bh_map.b_size = 1 << ip->i_inode.i_blkbits;
 388	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
 
 389	if (error)
 390		goto fail;
 391	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh);
 
 
 
 
 
 392	if (error)
 393		goto fail;
 394	error = -EIO;
 395	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 396		goto fail_brelse;
 397
 398	qd->qd_bh = bh;
 399	qd->qd_bh_qc = (struct gfs2_quota_change *)
 400		(bh->b_data + sizeof(struct gfs2_meta_header) +
 401		 offset * sizeof(struct gfs2_quota_change));
 402
 403	mutex_unlock(&sdp->sd_quota_mutex);
 404
 405	return 0;
 
 
 
 
 
 
 
 
 
 
 406
 407fail_brelse:
 408	brelse(bh);
 409fail:
 410	qd->qd_bh_count--;
 411	mutex_unlock(&sdp->sd_quota_mutex);
 412	return error;
 413}
 414
 415static void bh_put(struct gfs2_quota_data *qd)
 416{
 417	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 
 418
 419	mutex_lock(&sdp->sd_quota_mutex);
 420	gfs2_assert(sdp, qd->qd_bh_count);
 421	if (!--qd->qd_bh_count) {
 422		brelse(qd->qd_bh);
 423		qd->qd_bh = NULL;
 424		qd->qd_bh_qc = NULL;
 425	}
 426	mutex_unlock(&sdp->sd_quota_mutex);
 
 427}
 428
 429static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 430			 u64 *sync_gen)
 431{
 
 
 
 432	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 433	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 434	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
 435		return 0;
 436
 437	if (!lockref_get_not_dead(&qd->qd_lockref))
 438		return 0;
 
 439
 440	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 441	set_bit(QDF_LOCKED, &qd->qd_flags);
 442	qd->qd_change_sync = qd->qd_change;
 443	slot_hold(qd);
 444	return 1;
 
 
 
 
 445}
 446
 447static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 448{
 449	struct gfs2_quota_data *qd = NULL;
 450	int error;
 451	int found = 0;
 452
 453	*qdp = NULL;
 454
 455	if (sdp->sd_vfs->s_flags & MS_RDONLY)
 456		return 0;
 457
 458	spin_lock(&qd_lock);
 459
 460	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 461		found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
 462		if (found)
 463			break;
 464	}
 465
 466	if (!found)
 467		qd = NULL;
 468
 469	spin_unlock(&qd_lock);
 470
 471	if (qd) {
 472		gfs2_assert_warn(sdp, qd->qd_change_sync);
 473		error = bh_get(qd);
 474		if (error) {
 475			clear_bit(QDF_LOCKED, &qd->qd_flags);
 476			slot_put(qd);
 477			qd_put(qd);
 478			return error;
 479		}
 480	}
 481
 482	*qdp = qd;
 483
 484	return 0;
 485}
 486
 487static void qd_unlock(struct gfs2_quota_data *qd)
 488{
 489	gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
 490			 test_bit(QDF_LOCKED, &qd->qd_flags));
 491	clear_bit(QDF_LOCKED, &qd->qd_flags);
 492	bh_put(qd);
 493	slot_put(qd);
 494	qd_put(qd);
 495}
 496
 
 
 
 
 
 
 
 
 
 497static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 498		    struct gfs2_quota_data **qdp)
 499{
 500	int error;
 501
 502	error = qd_get(sdp, qid, qdp);
 503	if (error)
 504		return error;
 505
 506	error = slot_get(*qdp);
 507	if (error)
 508		goto fail;
 509
 510	error = bh_get(*qdp);
 511	if (error)
 512		goto fail_slot;
 513
 514	return 0;
 515
 516fail_slot:
 517	slot_put(*qdp);
 518fail:
 519	qd_put(*qdp);
 520	return error;
 521}
 522
 523static void qdsb_put(struct gfs2_quota_data *qd)
 524{
 525	bh_put(qd);
 526	slot_put(qd);
 527	qd_put(qd);
 528}
 529
 530/**
 531 * gfs2_qa_alloc - make sure we have a quota allocations data structure,
 532 *                 if necessary
 533 * @ip: the inode for this reservation
 534 */
 535int gfs2_qa_alloc(struct gfs2_inode *ip)
 536{
 537	int error = 0;
 538	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
 539
 540	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 541		return 0;
 542
 543	down_write(&ip->i_rw_mutex);
 544	if (ip->i_qadata == NULL) {
 545		ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
 546		if (!ip->i_qadata)
 547			error = -ENOMEM;
 
 
 
 
 
 
 
 
 
 548	}
 549	up_write(&ip->i_rw_mutex);
 550	return error;
 
 551}
 552
 553void gfs2_qa_delete(struct gfs2_inode *ip, atomic_t *wcount)
 554{
 555	down_write(&ip->i_rw_mutex);
 556	if (ip->i_qadata && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
 
 
 557		kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
 558		ip->i_qadata = NULL;
 559	}
 560	up_write(&ip->i_rw_mutex);
 561}
 562
 563int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 564{
 565	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 566	struct gfs2_quota_data **qd;
 567	int error;
 568
 569	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 570		return 0;
 571
 572	if (ip->i_qadata == NULL) {
 573		error = gfs2_rsqa_alloc(ip);
 574		if (error)
 575			return error;
 576	}
 577
 578	qd = ip->i_qadata->qa_qd;
 579
 580	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
 581	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
 582		return -EIO;
 
 
 
 583
 584	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 585	if (error)
 586		goto out;
 587	ip->i_qadata->qa_qd_num++;
 588	qd++;
 589
 590	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 591	if (error)
 592		goto out;
 593	ip->i_qadata->qa_qd_num++;
 594	qd++;
 595
 596	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 597	    !uid_eq(uid, ip->i_inode.i_uid)) {
 598		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 599		if (error)
 600			goto out;
 601		ip->i_qadata->qa_qd_num++;
 602		qd++;
 603	}
 604
 605	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 606	    !gid_eq(gid, ip->i_inode.i_gid)) {
 607		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 608		if (error)
 609			goto out;
 610		ip->i_qadata->qa_qd_num++;
 611		qd++;
 612	}
 613
 614out:
 615	if (error)
 616		gfs2_quota_unhold(ip);
 
 617	return error;
 618}
 619
 620void gfs2_quota_unhold(struct gfs2_inode *ip)
 621{
 622	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 623	u32 x;
 624
 625	if (ip->i_qadata == NULL)
 626		return;
 
 627	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 628
 629	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
 630		qdsb_put(ip->i_qadata->qa_qd[x]);
 631		ip->i_qadata->qa_qd[x] = NULL;
 632	}
 633	ip->i_qadata->qa_qd_num = 0;
 
 634}
 635
 636static int sort_qd(const void *a, const void *b)
 637{
 638	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 639	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 640
 641	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 642		return -1;
 643	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 644		return 1;
 645	return 0;
 646}
 647
 648static void do_qc(struct gfs2_quota_data *qd, s64 change)
 649{
 650	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 651	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 652	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 
 653	s64 x;
 654
 655	mutex_lock(&sdp->sd_quota_mutex);
 656	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 657
 658	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 659		qc->qc_change = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 660		qc->qc_flags = 0;
 661		if (qd->qd_id.type == USRQUOTA)
 662			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 663		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 664	}
 665
 666	x = be64_to_cpu(qc->qc_change) + change;
 667	qc->qc_change = cpu_to_be64(x);
 668
 669	spin_lock(&qd_lock);
 670	qd->qd_change = x;
 671	spin_unlock(&qd_lock);
 672
 673	if (!x) {
 674		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 675		clear_bit(QDF_CHANGE, &qd->qd_flags);
 676		qc->qc_flags = 0;
 677		qc->qc_id = 0;
 678		slot_put(qd);
 679		qd_put(qd);
 680	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 681		qd_hold(qd);
 682		slot_hold(qd);
 683	}
 684
 685	if (change < 0) /* Reset quiet flag if we freed some blocks */
 686		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
 687	mutex_unlock(&sdp->sd_quota_mutex);
 688}
 689
 690static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
 691				  unsigned off, void *buf, unsigned bytes)
 692{
 
 693	struct inode *inode = &ip->i_inode;
 694	struct gfs2_sbd *sdp = GFS2_SB(inode);
 695	struct address_space *mapping = inode->i_mapping;
 696	struct page *page;
 697	struct buffer_head *bh;
 698	void *kaddr;
 699	u64 blk;
 700	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
 701	unsigned to_write = bytes, pg_off = off;
 702	int done = 0;
 703
 704	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
 705	boff = off % bsize;
 706
 707	page = find_or_create_page(mapping, index, GFP_NOFS);
 708	if (!page)
 709		return -ENOMEM;
 710	if (!page_has_buffers(page))
 711		create_empty_buffers(page, bsize, 0);
 
 712
 713	bh = page_buffers(page);
 714	while (!done) {
 715		/* Find the beginning block within the page */
 716		if (pg_off >= ((bnum * bsize) + bsize)) {
 717			bh = bh->b_this_page;
 718			bnum++;
 719			blk++;
 720			continue;
 721		}
 722		if (!buffer_mapped(bh)) {
 723			gfs2_block_map(inode, blk, bh, 1);
 724			if (!buffer_mapped(bh))
 725				goto unlock_out;
 726			/* If it's a newly allocated disk block, zero it */
 727			if (buffer_new(bh))
 728				zero_user(page, bnum * bsize, bh->b_size);
 
 729		}
 730		if (PageUptodate(page))
 731			set_buffer_uptodate(bh);
 732		if (!buffer_uptodate(bh)) {
 733			ll_rw_block(READ | REQ_META, 1, &bh);
 734			wait_on_buffer(bh);
 735			if (!buffer_uptodate(bh))
 736				goto unlock_out;
 737		}
 738		gfs2_trans_add_data(ip->i_gl, bh);
 739
 740		/* If we need to write to the next block as well */
 741		if (to_write > (bsize - boff)) {
 742			pg_off += (bsize - boff);
 743			to_write -= (bsize - boff);
 744			boff = pg_off % bsize;
 745			continue;
 746		}
 747		done = 1;
 748	}
 749
 750	/* Write to the page, now that we have setup the buffer(s) */
 751	kaddr = kmap_atomic(page);
 752	memcpy(kaddr + off, buf, bytes);
 753	flush_dcache_page(page);
 754	kunmap_atomic(kaddr);
 755	unlock_page(page);
 756	put_page(page);
 757
 758	return 0;
 759
 760unlock_out:
 761	unlock_page(page);
 762	put_page(page);
 763	return -EIO;
 764}
 765
 766static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
 767				 loff_t loc)
 768{
 769	unsigned long pg_beg;
 770	unsigned pg_off, nbytes, overflow = 0;
 771	int pg_oflow = 0, error;
 772	void *ptr;
 773
 774	nbytes = sizeof(struct gfs2_quota);
 775
 776	pg_beg = loc >> PAGE_SHIFT;
 777	pg_off = loc % PAGE_SIZE;
 778
 779	/* If the quota straddles a page boundary, split the write in two */
 780	if ((pg_off + nbytes) > PAGE_SIZE) {
 781		pg_oflow = 1;
 782		overflow = (pg_off + nbytes) - PAGE_SIZE;
 783	}
 784
 785	ptr = qp;
 786	error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
 787				       nbytes - overflow);
 788	/* If there's an overflow, write the remaining bytes to the next page */
 789	if (!error && pg_oflow)
 790		error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
 791					       ptr + nbytes - overflow,
 792					       overflow);
 793	return error;
 794}
 795
 796/**
 797 * gfs2_adjust_quota - adjust record of current block usage
 798 * @ip: The quota inode
 799 * @loc: Offset of the entry in the quota file
 800 * @change: The amount of usage change to record
 801 * @qd: The quota data
 802 * @fdq: The updated limits to record
 803 *
 804 * This function was mostly borrowed from gfs2_block_truncate_page which was
 805 * in turn mostly borrowed from ext3
 806 *
 807 * Returns: 0 or -ve on error
 808 */
 809
 810static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 811			     s64 change, struct gfs2_quota_data *qd,
 812			     struct qc_dqblk *fdq)
 813{
 
 814	struct inode *inode = &ip->i_inode;
 815	struct gfs2_sbd *sdp = GFS2_SB(inode);
 816	struct gfs2_quota q;
 817	int err;
 818	u64 size;
 819
 820	if (gfs2_is_stuffed(ip)) {
 821		err = gfs2_unstuff_dinode(ip, NULL);
 822		if (err)
 823			return err;
 824	}
 825
 826	memset(&q, 0, sizeof(struct gfs2_quota));
 827	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 828	if (err < 0)
 829		return err;
 830
 831	loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
 832	err = -EIO;
 833	be64_add_cpu(&q.qu_value, change);
 834	if (((s64)be64_to_cpu(q.qu_value)) < 0)
 835		q.qu_value = 0; /* Never go negative on quota usage */
 
 836	qd->qd_qb.qb_value = q.qu_value;
 837	if (fdq) {
 838		if (fdq->d_fieldmask & QC_SPC_SOFT) {
 839			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
 840			qd->qd_qb.qb_warn = q.qu_warn;
 841		}
 842		if (fdq->d_fieldmask & QC_SPC_HARD) {
 843			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
 844			qd->qd_qb.qb_limit = q.qu_limit;
 845		}
 846		if (fdq->d_fieldmask & QC_SPACE) {
 847			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
 848			qd->qd_qb.qb_value = q.qu_value;
 849		}
 850	}
 
 851
 852	err = gfs2_write_disk_quota(ip, &q, loc);
 853	if (!err) {
 854		size = loc + sizeof(struct gfs2_quota);
 855		if (size > inode->i_size)
 856			i_size_write(inode, size);
 857		inode->i_mtime = inode->i_atime = CURRENT_TIME;
 858		mark_inode_dirty(inode);
 859		set_bit(QDF_REFRESH, &qd->qd_flags);
 860	}
 861
 862	return err;
 863}
 864
 865static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 
 866{
 867	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
 868	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 869	struct gfs2_alloc_parms ap = { .aflags = 0, };
 870	unsigned int data_blocks, ind_blocks;
 871	struct gfs2_holder *ghs, i_gh;
 872	unsigned int qx, x;
 873	struct gfs2_quota_data *qd;
 874	unsigned reserved;
 875	loff_t offset;
 876	unsigned int nalloc = 0, blocks;
 877	int error;
 878
 879	error = gfs2_rsqa_alloc(ip);
 880	if (error)
 881		return error;
 882
 883	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 884			      &data_blocks, &ind_blocks);
 885
 886	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 887	if (!ghs)
 888		return -ENOMEM;
 889
 890	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 891	inode_lock(&ip->i_inode);
 892	for (qx = 0; qx < num_qd; qx++) {
 893		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 894					   GL_NOCACHE, &ghs[qx]);
 895		if (error)
 896			goto out;
 897	}
 898
 899	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 900	if (error)
 901		goto out;
 902
 903	for (x = 0; x < num_qd; x++) {
 904		offset = qd2offset(qda[x]);
 905		if (gfs2_write_alloc_required(ip, offset,
 906					      sizeof(struct gfs2_quota)))
 907			nalloc++;
 908	}
 909
 910	/* 
 911	 * 1 blk for unstuffing inode if stuffed. We add this extra
 912	 * block to the reservation unconditionally. If the inode
 913	 * doesn't need unstuffing, the block will be released to the 
 914	 * rgrp since it won't be allocated during the transaction
 915	 */
 916	/* +3 in the end for unstuffing block, inode size update block
 917	 * and another block in case quota straddles page boundary and 
 918	 * two blocks need to be updated instead of 1 */
 919	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 920
 921	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 922	ap.target = reserved;
 923	error = gfs2_inplace_reserve(ip, &ap);
 924	if (error)
 925		goto out_alloc;
 926
 927	if (nalloc)
 928		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 929
 930	error = gfs2_trans_begin(sdp, blocks, 0);
 931	if (error)
 932		goto out_ipres;
 933
 934	for (x = 0; x < num_qd; x++) {
 935		qd = qda[x];
 936		offset = qd2offset(qd);
 937		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 
 938		if (error)
 939			goto out_end_trans;
 940
 941		do_qc(qd, -qd->qd_change_sync);
 942		set_bit(QDF_REFRESH, &qd->qd_flags);
 943	}
 944
 945	error = 0;
 946
 947out_end_trans:
 948	gfs2_trans_end(sdp);
 949out_ipres:
 950	gfs2_inplace_release(ip);
 951out_alloc:
 952	gfs2_glock_dq_uninit(&i_gh);
 953out:
 954	while (qx--)
 955		gfs2_glock_dq_uninit(&ghs[qx]);
 956	inode_unlock(&ip->i_inode);
 957	kfree(ghs);
 958	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH);
 
 
 
 
 
 
 
 
 
 
 959	return error;
 960}
 961
 962static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 963{
 964	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 965	struct gfs2_quota q;
 966	struct gfs2_quota_lvb *qlvb;
 967	loff_t pos;
 968	int error;
 969
 970	memset(&q, 0, sizeof(struct gfs2_quota));
 971	pos = qd2offset(qd);
 972	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 973	if (error < 0)
 974		return error;
 975
 976	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 977	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 978	qlvb->__pad = 0;
 979	qlvb->qb_limit = q.qu_limit;
 980	qlvb->qb_warn = q.qu_warn;
 981	qlvb->qb_value = q.qu_value;
 
 982	qd->qd_qb = *qlvb;
 
 983
 984	return 0;
 985}
 986
 987static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
 988		    struct gfs2_holder *q_gh)
 989{
 990	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 991	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 992	struct gfs2_holder i_gh;
 993	int error;
 994
 
 995restart:
 996	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
 997	if (error)
 998		return error;
 999
1000	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1001		force_refresh = FORCE;
1002
 
1003	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 
1004
1005	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1006		gfs2_glock_dq_uninit(q_gh);
1007		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1008					   GL_NOCACHE, q_gh);
1009		if (error)
1010			return error;
1011
1012		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1013		if (error)
1014			goto fail;
1015
1016		error = update_qd(sdp, qd);
1017		if (error)
1018			goto fail_gunlock;
1019
1020		gfs2_glock_dq_uninit(&i_gh);
1021		gfs2_glock_dq_uninit(q_gh);
1022		force_refresh = 0;
1023		goto restart;
1024	}
1025
1026	return 0;
1027
1028fail_gunlock:
1029	gfs2_glock_dq_uninit(&i_gh);
1030fail:
1031	gfs2_glock_dq_uninit(q_gh);
1032	return error;
1033}
1034
1035int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1036{
1037	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1038	struct gfs2_quota_data *qd;
1039	u32 x;
1040	int error = 0;
1041
1042	if (capable(CAP_SYS_RESOURCE) ||
1043	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1044		return 0;
1045
1046	error = gfs2_quota_hold(ip, uid, gid);
1047	if (error)
1048		return error;
1049
1050	sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1051	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
1052
1053	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1054		qd = ip->i_qadata->qa_qd[x];
1055		error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
1056		if (error)
1057			break;
1058	}
1059
1060	if (!error)
1061		set_bit(GIF_QD_LOCKED, &ip->i_flags);
1062	else {
1063		while (x--)
1064			gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1065		gfs2_quota_unhold(ip);
1066	}
1067
1068	return error;
1069}
1070
1071static int need_sync(struct gfs2_quota_data *qd)
1072{
1073	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1074	struct gfs2_tune *gt = &sdp->sd_tune;
1075	s64 value;
1076	unsigned int num, den;
1077	int do_sync = 1;
1078
 
1079	if (!qd->qd_qb.qb_limit)
1080		return 0;
1081
1082	spin_lock(&qd_lock);
1083	value = qd->qd_change;
1084	spin_unlock(&qd_lock);
 
 
 
 
1085
1086	spin_lock(&gt->gt_spin);
1087	num = gt->gt_quota_scale_num;
1088	den = gt->gt_quota_scale_den;
1089	spin_unlock(&gt->gt_spin);
1090
1091	if (value < 0)
1092		do_sync = 0;
1093	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1094		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1095		do_sync = 0;
1096	else {
1097		value *= gfs2_jindex_size(sdp) * num;
1098		value = div_s64(value, den);
1099		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1100		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1101			do_sync = 0;
1102	}
1103
1104	return do_sync;
 
 
 
1105}
1106
1107void gfs2_quota_unlock(struct gfs2_inode *ip)
1108{
1109	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1110	struct gfs2_quota_data *qda[4];
1111	unsigned int count = 0;
1112	u32 x;
1113	int found;
1114
1115	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1116		goto out;
1117
1118	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1119		struct gfs2_quota_data *qd;
1120		int sync;
 
1121
1122		qd = ip->i_qadata->qa_qd[x];
1123		sync = need_sync(qd);
1124
1125		gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1126		if (!sync)
1127			continue;
1128
1129		spin_lock(&qd_lock);
1130		found = qd_check_sync(sdp, qd, NULL);
1131		spin_unlock(&qd_lock);
1132
1133		if (!found)
1134			continue;
1135
1136		gfs2_assert_warn(sdp, qd->qd_change_sync);
1137		if (bh_get(qd)) {
1138			clear_bit(QDF_LOCKED, &qd->qd_flags);
1139			slot_put(qd);
1140			qd_put(qd);
1141			continue;
1142		}
1143
1144		qda[count++] = qd;
1145	}
1146
1147	if (count) {
1148		do_sync(count, qda);
 
 
1149		for (x = 0; x < count; x++)
1150			qd_unlock(qda[x]);
1151	}
1152
1153out:
1154	gfs2_quota_unhold(ip);
1155}
1156
1157#define MAX_LINE 256
1158
1159static int print_message(struct gfs2_quota_data *qd, char *type)
1160{
1161	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1162
1163	fs_info(sdp, "quota %s for %s %u\n",
1164		type,
1165		(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1166		from_kqid(&init_user_ns, qd->qd_id));
1167
1168	return 0;
 
 
 
 
 
1169}
1170
1171/**
1172 * gfs2_quota_check - check if allocating new blocks will exceed quota
1173 * @ip:  The inode for which this check is being performed
1174 * @uid: The uid to check against
1175 * @gid: The gid to check against
1176 * @ap:  The allocation parameters. ap->target contains the requested
1177 *       blocks. ap->min_target, if set, contains the minimum blks
1178 *       requested.
1179 *
1180 * Returns: 0 on success.
1181 *                  min_req = ap->min_target ? ap->min_target : ap->target;
1182 *                  quota must allow atleast min_req blks for success and
1183 *                  ap->allowed is set to the number of blocks allowed
1184 *
1185 *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1186 *                  of blocks available.
1187 */
1188int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1189		     struct gfs2_alloc_parms *ap)
1190{
1191	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1192	struct gfs2_quota_data *qd;
1193	s64 value, warn, limit;
1194	u32 x;
1195	int error = 0;
1196
1197	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1198	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1199		return 0;
1200
1201        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1202                return 0;
1203
1204	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1205		qd = ip->i_qadata->qa_qd[x];
1206
1207		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1208		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1209			continue;
1210
 
1211		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1212		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1213		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1214		spin_lock(&qd_lock);
1215		value += qd->qd_change;
1216		spin_unlock(&qd_lock);
1217
1218		if (limit > 0 && (limit - value) < ap->allowed)
1219			ap->allowed = limit - value;
1220		/* If we can't meet the target */
1221		if (limit && limit < (value + (s64)ap->target)) {
1222			/* If no min_target specified or we don't meet
1223			 * min_target, return -EDQUOT */
1224			if (!ap->min_target || ap->min_target > ap->allowed) {
1225				if (!test_and_set_bit(QDF_QMSG_QUIET,
1226						      &qd->qd_flags)) {
1227					print_message(qd, "exceeded");
1228					quota_send_warning(qd->qd_id,
1229							   sdp->sd_vfs->s_dev,
1230							   QUOTA_NL_BHARDWARN);
1231				}
1232				error = -EDQUOT;
1233				break;
1234			}
1235		} else if (warn && warn < value &&
1236			   time_after_eq(jiffies, qd->qd_last_warn +
1237					 gfs2_tune_get(sdp, gt_quota_warn_period)
1238					 * HZ)) {
1239			quota_send_warning(qd->qd_id,
1240					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1241			error = print_message(qd, "warning");
 
1242			qd->qd_last_warn = jiffies;
1243		}
1244	}
1245	return error;
1246}
1247
1248void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1249		       kuid_t uid, kgid_t gid)
1250{
1251	struct gfs2_quota_data *qd;
1252	u32 x;
1253	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1254
1255	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
1256	    gfs2_assert_warn(sdp, change))
1257		return;
1258	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1259		return;
1260
 
 
 
1261	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1262		qd = ip->i_qadata->qa_qd[x];
1263
1264		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1265		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1266			do_qc(qd, change);
1267		}
1268	}
1269}
1270
1271int gfs2_quota_sync(struct super_block *sb, int type)
1272{
1273	struct gfs2_sbd *sdp = sb->s_fs_info;
1274	struct gfs2_quota_data **qda;
1275	unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
1276	unsigned int num_qd;
1277	unsigned int x;
1278	int error = 0;
1279
 
 
 
1280	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1281	if (!qda)
1282		return -ENOMEM;
1283
1284	mutex_lock(&sdp->sd_quota_sync_mutex);
1285	sdp->sd_quota_sync_gen++;
1286
1287	do {
1288		num_qd = 0;
 
 
1289
1290		for (;;) {
1291			error = qd_fish(sdp, qda + num_qd);
1292			if (error || !qda[num_qd])
1293				break;
1294			if (++num_qd == max_qd)
1295				break;
 
1296		}
 
1297
1298		if (num_qd) {
1299			if (!error)
1300				error = do_sync(num_qd, qda);
 
 
1301			if (!error)
1302				for (x = 0; x < num_qd; x++)
1303					qda[x]->qd_sync_gen =
1304						sdp->sd_quota_sync_gen;
1305
1306			for (x = 0; x < num_qd; x++)
1307				qd_unlock(qda[x]);
 
1308		}
1309	} while (!error && num_qd == max_qd);
 
 
 
 
 
 
 
 
1310
1311	mutex_unlock(&sdp->sd_quota_sync_mutex);
1312	kfree(qda);
1313
1314	return error;
1315}
1316
1317int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1318{
1319	struct gfs2_quota_data *qd;
1320	struct gfs2_holder q_gh;
1321	int error;
1322
1323	error = qd_get(sdp, qid, &qd);
1324	if (error)
1325		return error;
1326
1327	error = do_glock(qd, FORCE, &q_gh);
1328	if (!error)
1329		gfs2_glock_dq_uninit(&q_gh);
1330
1331	qd_put(qd);
1332	return error;
1333}
1334
1335int gfs2_quota_init(struct gfs2_sbd *sdp)
1336{
1337	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1338	u64 size = i_size_read(sdp->sd_qc_inode);
1339	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1340	unsigned int x, slot = 0;
1341	unsigned int found = 0;
1342	unsigned int hash;
1343	unsigned int bm_size;
 
1344	u64 dblock;
1345	u32 extlen = 0;
1346	int error;
1347
1348	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1349		return -EIO;
1350
1351	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1352	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1353	bm_size *= sizeof(unsigned long);
1354	error = -ENOMEM;
1355	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1356	if (sdp->sd_quota_bitmap == NULL)
1357		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1358						 __GFP_ZERO, PAGE_KERNEL);
1359	if (!sdp->sd_quota_bitmap)
1360		return error;
1361
1362	for (x = 0; x < blocks; x++) {
1363		struct buffer_head *bh;
1364		const struct gfs2_quota_change *qc;
1365		unsigned int y;
1366
1367		if (!extlen) {
1368			int new = 0;
1369			error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1370			if (error)
1371				goto fail;
1372		}
1373		error = -EIO;
1374		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1375		if (!bh)
1376			goto fail;
1377		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1378			brelse(bh);
1379			goto fail;
1380		}
1381
1382		qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1383		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1384		     y++, slot++) {
1385			struct gfs2_quota_data *qd;
1386			s64 qc_change = be64_to_cpu(qc->qc_change);
1387			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1388			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1389						USRQUOTA : GRPQUOTA;
1390			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1391						      be32_to_cpu(qc->qc_id));
1392			qc++;
1393			if (!qc_change)
1394				continue;
1395
1396			hash = gfs2_qd_hash(sdp, qc_id);
1397			qd = qd_alloc(hash, sdp, qc_id);
1398			if (qd == NULL) {
1399				brelse(bh);
1400				goto fail;
1401			}
1402
1403			set_bit(QDF_CHANGE, &qd->qd_flags);
1404			qd->qd_change = qc_change;
1405			qd->qd_slot = slot;
1406			qd->qd_slot_count = 1;
1407
1408			spin_lock(&qd_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1409			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1410			list_add(&qd->qd_list, &sdp->sd_quota_list);
1411			atomic_inc(&sdp->sd_quota_count);
1412			spin_unlock(&qd_lock);
1413
1414			spin_lock_bucket(hash);
1415			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1416			spin_unlock_bucket(hash);
 
1417
1418			found++;
1419		}
1420
 
 
1421		brelse(bh);
1422		dblock++;
1423		extlen--;
1424	}
1425
1426	if (found)
1427		fs_info(sdp, "found %u quota changes\n", found);
1428
1429	return 0;
1430
 
 
 
 
1431fail:
1432	gfs2_quota_cleanup(sdp);
1433	return error;
1434}
1435
1436void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1437{
1438	struct list_head *head = &sdp->sd_quota_list;
1439	struct gfs2_quota_data *qd;
 
 
1440
1441	spin_lock(&qd_lock);
1442	while (!list_empty(head)) {
1443		qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1444
1445		list_del(&qd->qd_list);
1446
1447		/* Also remove if this qd exists in the reclaim list */
1448		list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
1449		atomic_dec(&sdp->sd_quota_count);
1450		spin_unlock(&qd_lock);
1451
1452		spin_lock_bucket(qd->qd_hash);
1453		hlist_bl_del_rcu(&qd->qd_hlist);
1454		spin_unlock_bucket(qd->qd_hash);
1455
1456		gfs2_assert_warn(sdp, !qd->qd_change);
1457		gfs2_assert_warn(sdp, !qd->qd_slot_count);
1458		gfs2_assert_warn(sdp, !qd->qd_bh_count);
1459
1460		gfs2_glock_put(qd->qd_gl);
1461		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 
 
 
1462
1463		spin_lock(&qd_lock);
 
1464	}
1465	spin_unlock(&qd_lock);
1466
1467	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
 
 
 
 
 
 
 
1468
1469	kvfree(sdp->sd_quota_bitmap);
1470	sdp->sd_quota_bitmap = NULL;
1471}
1472
1473static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1474{
1475	if (error == 0 || error == -EROFS)
1476		return;
1477	if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1478		fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
 
 
 
1479}
1480
1481static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1482			       int (*fxn)(struct super_block *sb, int type),
1483			       unsigned long t, unsigned long *timeo,
1484			       unsigned int *new_timeo)
1485{
1486	if (t >= *timeo) {
1487		int error = fxn(sdp->sd_vfs, 0);
1488		quotad_error(sdp, msg, error);
1489		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1490	} else {
1491		*timeo -= t;
1492	}
1493}
1494
1495static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1496{
1497	struct gfs2_inode *ip;
1498
1499	while(1) {
1500		ip = NULL;
1501		spin_lock(&sdp->sd_trunc_lock);
1502		if (!list_empty(&sdp->sd_trunc_list)) {
1503			ip = list_entry(sdp->sd_trunc_list.next,
1504					struct gfs2_inode, i_trunc_list);
1505			list_del_init(&ip->i_trunc_list);
1506		}
1507		spin_unlock(&sdp->sd_trunc_lock);
1508		if (ip == NULL)
1509			return;
1510		gfs2_glock_finish_truncate(ip);
1511	}
1512}
1513
1514void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1515	if (!sdp->sd_statfs_force_sync) {
1516		sdp->sd_statfs_force_sync = 1;
1517		wake_up(&sdp->sd_quota_wait);
1518	}
1519}
1520
1521
1522/**
1523 * gfs2_quotad - Write cached quota changes into the quota file
1524 * @sdp: Pointer to GFS2 superblock
1525 *
1526 */
1527
1528int gfs2_quotad(void *data)
1529{
1530	struct gfs2_sbd *sdp = data;
1531	struct gfs2_tune *tune = &sdp->sd_tune;
1532	unsigned long statfs_timeo = 0;
1533	unsigned long quotad_timeo = 0;
1534	unsigned long t = 0;
1535	DEFINE_WAIT(wait);
1536	int empty;
1537
 
1538	while (!kthread_should_stop()) {
 
 
1539
1540		/* Update the master statfs file */
1541		if (sdp->sd_statfs_force_sync) {
1542			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1543			quotad_error(sdp, "statfs", error);
1544			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1545		}
1546		else
1547			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1548				   	   &statfs_timeo,
1549					   &tune->gt_statfs_quantum);
1550
1551		/* Update quota file */
1552		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1553				   &quotad_timeo, &tune->gt_quota_quantum);
1554
1555		/* Check for & recover partially truncated inodes */
1556		quotad_check_trunc_list(sdp);
1557
1558		try_to_freeze();
1559
1560		t = min(quotad_timeo, statfs_timeo);
1561
1562		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1563		spin_lock(&sdp->sd_trunc_lock);
1564		empty = list_empty(&sdp->sd_trunc_list);
1565		spin_unlock(&sdp->sd_trunc_lock);
1566		if (empty && !sdp->sd_statfs_force_sync)
1567			t -= schedule_timeout(t);
1568		else
1569			t = 0;
1570		finish_wait(&sdp->sd_quota_wait, &wait);
1571	}
1572
1573	return 0;
1574}
1575
1576static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1577{
1578	struct gfs2_sbd *sdp = sb->s_fs_info;
1579
1580	memset(state, 0, sizeof(*state));
1581
1582	switch (sdp->sd_args.ar_quota) {
 
 
1583	case GFS2_QUOTA_ON:
1584		state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1585		state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1586		/*FALLTHRU*/
1587	case GFS2_QUOTA_ACCOUNT:
1588		state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1589						  QCI_SYSFILE;
1590		state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1591						  QCI_SYSFILE;
1592		break;
1593	case GFS2_QUOTA_OFF:
1594		break;
1595	}
1596	if (sdp->sd_quota_inode) {
1597		state->s_state[USRQUOTA].ino =
1598					GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1599		state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1600	}
1601	state->s_state[USRQUOTA].nextents = 1;	/* unsupported */
1602	state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1603	state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1604	return 0;
1605}
1606
1607static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1608			  struct qc_dqblk *fdq)
1609{
1610	struct gfs2_sbd *sdp = sb->s_fs_info;
1611	struct gfs2_quota_lvb *qlvb;
1612	struct gfs2_quota_data *qd;
1613	struct gfs2_holder q_gh;
1614	int error;
1615
1616	memset(fdq, 0, sizeof(*fdq));
1617
1618	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1619		return -ESRCH; /* Crazy XFS error code */
1620
1621	if ((qid.type != USRQUOTA) &&
1622	    (qid.type != GRPQUOTA))
1623		return -EINVAL;
1624
1625	error = qd_get(sdp, qid, &qd);
1626	if (error)
1627		return error;
1628	error = do_glock(qd, FORCE, &q_gh);
1629	if (error)
1630		goto out;
1631
1632	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1633	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1634	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1635	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
1636
1637	gfs2_glock_dq_uninit(&q_gh);
1638out:
1639	qd_put(qd);
1640	return error;
1641}
1642
1643/* GFS2 only supports a subset of the XFS fields */
1644#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1645
1646static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1647			  struct qc_dqblk *fdq)
1648{
1649	struct gfs2_sbd *sdp = sb->s_fs_info;
1650	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1651	struct gfs2_quota_data *qd;
1652	struct gfs2_holder q_gh, i_gh;
1653	unsigned int data_blocks, ind_blocks;
1654	unsigned int blocks = 0;
1655	int alloc_required;
1656	loff_t offset;
1657	int error;
1658
1659	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1660		return -ESRCH; /* Crazy XFS error code */
1661
1662	if ((qid.type != USRQUOTA) &&
1663	    (qid.type != GRPQUOTA))
1664		return -EINVAL;
1665
1666	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1667		return -EINVAL;
1668
1669	error = qd_get(sdp, qid, &qd);
1670	if (error)
1671		return error;
1672
1673	error = gfs2_rsqa_alloc(ip);
1674	if (error)
1675		goto out_put;
1676
1677	inode_lock(&ip->i_inode);
1678	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1679	if (error)
1680		goto out_unlockput;
1681	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1682	if (error)
1683		goto out_q;
1684
1685	/* Check for existing entry, if none then alloc new blocks */
1686	error = update_qd(sdp, qd);
1687	if (error)
1688		goto out_i;
1689
1690	/* If nothing has changed, this is a no-op */
1691	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1692	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1693		fdq->d_fieldmask ^= QC_SPC_SOFT;
1694
1695	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1696	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1697		fdq->d_fieldmask ^= QC_SPC_HARD;
1698
1699	if ((fdq->d_fieldmask & QC_SPACE) &&
1700	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1701		fdq->d_fieldmask ^= QC_SPACE;
1702
1703	if (fdq->d_fieldmask == 0)
1704		goto out_i;
1705
1706	offset = qd2offset(qd);
1707	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1708	if (gfs2_is_stuffed(ip))
1709		alloc_required = 1;
1710	if (alloc_required) {
1711		struct gfs2_alloc_parms ap = { .aflags = 0, };
1712		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1713				       &data_blocks, &ind_blocks);
1714		blocks = 1 + data_blocks + ind_blocks;
1715		ap.target = blocks;
1716		error = gfs2_inplace_reserve(ip, &ap);
1717		if (error)
1718			goto out_i;
1719		blocks += gfs2_rg_blocks(ip, blocks);
1720	}
1721
1722	/* Some quotas span block boundaries and can update two blocks,
1723	   adding an extra block to the transaction to handle such quotas */
1724	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1725	if (error)
1726		goto out_release;
1727
1728	/* Apply changes */
1729	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1730	if (!error)
1731		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1732
1733	gfs2_trans_end(sdp);
1734out_release:
1735	if (alloc_required)
1736		gfs2_inplace_release(ip);
1737out_i:
1738	gfs2_glock_dq_uninit(&i_gh);
1739out_q:
1740	gfs2_glock_dq_uninit(&q_gh);
1741out_unlockput:
 
1742	inode_unlock(&ip->i_inode);
1743out_put:
1744	qd_put(qd);
1745	return error;
1746}
1747
1748const struct quotactl_ops gfs2_quotactl_ops = {
1749	.quota_sync     = gfs2_quota_sync,
1750	.get_state	= gfs2_quota_get_state,
1751	.get_dqblk	= gfs2_get_dqblk,
1752	.set_dqblk	= gfs2_set_dqblk,
1753};
1754
1755void __init gfs2_quota_hash_init(void)
1756{
1757	unsigned i;
1758
1759	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1760		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1761}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7/*
   8 * Quota change tags are associated with each transaction that allocates or
   9 * deallocates space.  Those changes are accumulated locally to each node (in a
  10 * per-node file) and then are periodically synced to the quota file.  This
  11 * avoids the bottleneck of constantly touching the quota file, but introduces
  12 * fuzziness in the current usage value of IDs that are being used on different
  13 * nodes in the cluster simultaneously.  So, it is possible for a user on
  14 * multiple nodes to overrun their quota, but that overrun is controlable.
  15 * Since quota tags are part of transactions, there is no need for a quota check
  16 * program to be run on node crashes or anything like that.
  17 *
  18 * There are couple of knobs that let the administrator manage the quota
  19 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  20 * sitting on one node before being synced to the quota file.  (The default is
  21 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  22 * of quota file syncs increases as the user moves closer to their limit.  The
  23 * more frequent the syncs, the more accurate the quota enforcement, but that
  24 * means that there is more contention between the nodes for the quota file.
  25 * The default value is one.  This sets the maximum theoretical quota overrun
  26 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  27 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  28 * number greater than one makes quota syncs more frequent and reduces the
  29 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  30 * syncs less frequent.
  31 *
  32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  33 * the quota file, so it is not being constantly read.
  34 */
  35
  36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  37
  38#include <linux/sched.h>
  39#include <linux/slab.h>
  40#include <linux/mm.h>
  41#include <linux/spinlock.h>
  42#include <linux/completion.h>
  43#include <linux/buffer_head.h>
  44#include <linux/sort.h>
  45#include <linux/fs.h>
  46#include <linux/bio.h>
  47#include <linux/gfs2_ondisk.h>
  48#include <linux/kthread.h>
  49#include <linux/freezer.h>
  50#include <linux/quota.h>
  51#include <linux/dqblk_xfs.h>
  52#include <linux/lockref.h>
  53#include <linux/list_lru.h>
  54#include <linux/rcupdate.h>
  55#include <linux/rculist_bl.h>
  56#include <linux/bit_spinlock.h>
  57#include <linux/jhash.h>
  58#include <linux/vmalloc.h>
  59
  60#include "gfs2.h"
  61#include "incore.h"
  62#include "bmap.h"
  63#include "glock.h"
  64#include "glops.h"
  65#include "log.h"
  66#include "meta_io.h"
  67#include "quota.h"
  68#include "rgrp.h"
  69#include "super.h"
  70#include "trans.h"
  71#include "inode.h"
  72#include "util.h"
  73
  74#define GFS2_QD_HASH_SHIFT      12
  75#define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
  76#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
  77
  78/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  79/*                     -> sd_bitmap_lock                              */
  80static DEFINE_SPINLOCK(qd_lock);
  81struct list_lru gfs2_qd_lru;
  82
  83static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  84
  85static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  86				 const struct kqid qid)
  87{
  88	unsigned int h;
  89
  90	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  91	h = jhash(&qid, sizeof(struct kqid), h);
  92
  93	return h & GFS2_QD_HASH_MASK;
  94}
  95
  96static inline void spin_lock_bucket(unsigned int hash)
  97{
  98        hlist_bl_lock(&qd_hash_table[hash]);
  99}
 100
 101static inline void spin_unlock_bucket(unsigned int hash)
 102{
 103        hlist_bl_unlock(&qd_hash_table[hash]);
 104}
 105
 106static void gfs2_qd_dealloc(struct rcu_head *rcu)
 107{
 108	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
 109	struct gfs2_sbd *sdp = qd->qd_sbd;
 110
 111	kmem_cache_free(gfs2_quotad_cachep, qd);
 112	if (atomic_dec_and_test(&sdp->sd_quota_count))
 113		wake_up(&sdp->sd_kill_wait);
 114}
 115
 116static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
 117{
 118	struct gfs2_sbd *sdp = qd->qd_sbd;
 
 
 
 
 
 
 
 119
 120	spin_lock(&qd_lock);
 121	list_del(&qd->qd_list);
 122	spin_unlock(&qd_lock);
 
 123
 124	spin_lock_bucket(qd->qd_hash);
 125	hlist_bl_del_rcu(&qd->qd_hlist);
 126	spin_unlock_bucket(qd->qd_hash);
 127
 128	if (!gfs2_withdrawing_or_withdrawn(sdp)) {
 129		gfs2_assert_warn(sdp, !qd->qd_change);
 130		gfs2_assert_warn(sdp, !qd->qd_slot_ref);
 131		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 132	}
 133
 134	gfs2_glock_put(qd->qd_gl);
 135	call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 136}
 137
 138static void gfs2_qd_list_dispose(struct list_head *list)
 139{
 140	struct gfs2_quota_data *qd;
 141
 142	while (!list_empty(list)) {
 143		qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
 144		list_del(&qd->qd_lru);
 145
 146		gfs2_qd_dispose(qd);
 
 147	}
 148}
 149
 150
 151static enum lru_status gfs2_qd_isolate(struct list_head *item,
 152		struct list_lru_one *lru, void *arg)
 153{
 154	struct list_head *dispose = arg;
 155	struct gfs2_quota_data *qd =
 156		list_entry(item, struct gfs2_quota_data, qd_lru);
 157	enum lru_status status;
 158
 159	if (!spin_trylock(&qd->qd_lockref.lock))
 160		return LRU_SKIP;
 161
 162	status = LRU_SKIP;
 163	if (qd->qd_lockref.count == 0) {
 164		lockref_mark_dead(&qd->qd_lockref);
 165		list_lru_isolate_move(lru, &qd->qd_lru, dispose);
 166		status = LRU_REMOVED;
 167	}
 168
 169	spin_unlock(&qd->qd_lockref.lock);
 170	return status;
 171}
 172
 173static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
 174					 struct shrink_control *sc)
 175{
 176	LIST_HEAD(dispose);
 177	unsigned long freed;
 178
 179	if (!(sc->gfp_mask & __GFP_FS))
 180		return SHRINK_STOP;
 181
 182	freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
 183				     gfs2_qd_isolate, &dispose);
 184
 185	gfs2_qd_list_dispose(&dispose);
 186
 187	return freed;
 188}
 189
 190static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 191					  struct shrink_control *sc)
 192{
 193	return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
 194}
 195
 196static struct shrinker *gfs2_qd_shrinker;
 197
 198int __init gfs2_qd_shrinker_init(void)
 199{
 200	gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd");
 201	if (!gfs2_qd_shrinker)
 202		return -ENOMEM;
 203
 204	gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count;
 205	gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan;
 206
 207	shrinker_register(gfs2_qd_shrinker);
 208
 209	return 0;
 210}
 211
 212void gfs2_qd_shrinker_exit(void)
 213{
 214	shrinker_free(gfs2_qd_shrinker);
 215}
 216
 217static u64 qd2index(struct gfs2_quota_data *qd)
 218{
 219	struct kqid qid = qd->qd_id;
 220	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 221		((qid.type == USRQUOTA) ? 0 : 1);
 222}
 223
 224static u64 qd2offset(struct gfs2_quota_data *qd)
 225{
 226	return qd2index(qd) * sizeof(struct gfs2_quota);
 
 
 
 
 
 227}
 228
 229static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
 230{
 231	struct gfs2_quota_data *qd;
 232	int error;
 233
 234	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 235	if (!qd)
 236		return NULL;
 237
 238	qd->qd_sbd = sdp;
 239	qd->qd_lockref.count = 0;
 240	spin_lock_init(&qd->qd_lockref.lock);
 241	qd->qd_id = qid;
 242	qd->qd_slot = -1;
 243	INIT_LIST_HEAD(&qd->qd_lru);
 244	qd->qd_hash = hash;
 245
 246	error = gfs2_glock_get(sdp, qd2index(qd),
 247			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 248	if (error)
 249		goto fail;
 250
 251	return qd;
 252
 253fail:
 254	kmem_cache_free(gfs2_quotad_cachep, qd);
 255	return NULL;
 256}
 257
 258static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
 259						     const struct gfs2_sbd *sdp,
 260						     struct kqid qid)
 261{
 262	struct gfs2_quota_data *qd;
 263	struct hlist_bl_node *h;
 264
 265	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
 266		if (!qid_eq(qd->qd_id, qid))
 267			continue;
 268		if (qd->qd_sbd != sdp)
 269			continue;
 270		if (lockref_get_not_dead(&qd->qd_lockref)) {
 271			list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
 272			return qd;
 273		}
 274	}
 275
 276	return NULL;
 277}
 278
 279
 280static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 281		  struct gfs2_quota_data **qdp)
 282{
 283	struct gfs2_quota_data *qd, *new_qd;
 284	unsigned int hash = gfs2_qd_hash(sdp, qid);
 285
 286	rcu_read_lock();
 287	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 288	rcu_read_unlock();
 289
 290	if (qd)
 291		return 0;
 292
 293	new_qd = qd_alloc(hash, sdp, qid);
 294	if (!new_qd)
 295		return -ENOMEM;
 296
 297	spin_lock(&qd_lock);
 298	spin_lock_bucket(hash);
 299	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 300	if (qd == NULL) {
 301		new_qd->qd_lockref.count++;
 302		*qdp = new_qd;
 303		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
 304		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
 305		atomic_inc(&sdp->sd_quota_count);
 306	}
 307	spin_unlock_bucket(hash);
 308	spin_unlock(&qd_lock);
 309
 310	if (qd) {
 311		gfs2_glock_put(new_qd->qd_gl);
 312		kmem_cache_free(gfs2_quotad_cachep, new_qd);
 313	}
 314
 315	return 0;
 316}
 317
 318
 319static void __qd_hold(struct gfs2_quota_data *qd)
 320{
 321	struct gfs2_sbd *sdp = qd->qd_sbd;
 322	gfs2_assert(sdp, qd->qd_lockref.count > 0);
 323	qd->qd_lockref.count++;
 324}
 325
 326static void qd_put(struct gfs2_quota_data *qd)
 327{
 328	struct gfs2_sbd *sdp;
 329
 330	if (lockref_put_or_lock(&qd->qd_lockref))
 331		return;
 332
 333	BUG_ON(__lockref_is_dead(&qd->qd_lockref));
 334	sdp = qd->qd_sbd;
 335	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
 336		lockref_mark_dead(&qd->qd_lockref);
 337		spin_unlock(&qd->qd_lockref.lock);
 338
 339		gfs2_qd_dispose(qd);
 340		return;
 341	}
 342
 343	qd->qd_lockref.count = 0;
 344	list_lru_add_obj(&gfs2_qd_lru, &qd->qd_lru);
 345	spin_unlock(&qd->qd_lockref.lock);
 
 346}
 347
 348static int slot_get(struct gfs2_quota_data *qd)
 349{
 350	struct gfs2_sbd *sdp = qd->qd_sbd;
 351	unsigned int bit;
 352	int error = 0;
 353
 354	spin_lock(&sdp->sd_bitmap_lock);
 355	if (qd->qd_slot_ref == 0) {
 356		bit = find_first_zero_bit(sdp->sd_quota_bitmap,
 357					  sdp->sd_quota_slots);
 358		if (bit >= sdp->sd_quota_slots) {
 359			error = -ENOSPC;
 360			goto out;
 361		}
 362		set_bit(bit, sdp->sd_quota_bitmap);
 363		qd->qd_slot = bit;
 
 
 
 364	}
 365	qd->qd_slot_ref++;
 366out:
 367	spin_unlock(&sdp->sd_bitmap_lock);
 
 368	return error;
 369}
 370
 371static void slot_hold(struct gfs2_quota_data *qd)
 372{
 373	struct gfs2_sbd *sdp = qd->qd_sbd;
 374
 375	spin_lock(&sdp->sd_bitmap_lock);
 376	gfs2_assert(sdp, qd->qd_slot_ref);
 377	qd->qd_slot_ref++;
 378	spin_unlock(&sdp->sd_bitmap_lock);
 379}
 380
 381static void slot_put(struct gfs2_quota_data *qd)
 382{
 383	struct gfs2_sbd *sdp = qd->qd_sbd;
 384
 385	spin_lock(&sdp->sd_bitmap_lock);
 386	gfs2_assert(sdp, qd->qd_slot_ref);
 387	if (!--qd->qd_slot_ref) {
 388		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
 389		qd->qd_slot = -1;
 390	}
 391	spin_unlock(&sdp->sd_bitmap_lock);
 392}
 393
 394static int bh_get(struct gfs2_quota_data *qd)
 395{
 396	struct gfs2_sbd *sdp = qd->qd_sbd;
 397	struct inode *inode = sdp->sd_qc_inode;
 398	struct gfs2_inode *ip = GFS2_I(inode);
 399	unsigned int block, offset;
 400	struct buffer_head *bh = NULL;
 401	struct iomap iomap = { };
 402	int error;
 
 403
 404	spin_lock(&qd->qd_lockref.lock);
 405	if (qd->qd_bh_count) {
 406		qd->qd_bh_count++;
 407		spin_unlock(&qd->qd_lockref.lock);
 408		return 0;
 409	}
 410	spin_unlock(&qd->qd_lockref.lock);
 411
 412	block = qd->qd_slot / sdp->sd_qc_per_block;
 413	offset = qd->qd_slot % sdp->sd_qc_per_block;
 414
 415	error = gfs2_iomap_get(inode,
 416			       (loff_t)block << inode->i_blkbits,
 417			       i_blocksize(inode), &iomap);
 418	if (error)
 419		return error;
 420	error = -ENOENT;
 421	if (iomap.type != IOMAP_MAPPED)
 422		return error;
 423
 424	error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
 425			       DIO_WAIT, 0, &bh);
 426	if (error)
 427		return error;
 428	error = -EIO;
 429	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 430		goto out;
 
 
 
 
 
 
 
 431
 432	spin_lock(&qd->qd_lockref.lock);
 433	if (qd->qd_bh == NULL) {
 434		qd->qd_bh = bh;
 435		qd->qd_bh_qc = (struct gfs2_quota_change *)
 436			(bh->b_data + sizeof(struct gfs2_meta_header) +
 437			 offset * sizeof(struct gfs2_quota_change));
 438		bh = NULL;
 439	}
 440	qd->qd_bh_count++;
 441	spin_unlock(&qd->qd_lockref.lock);
 442	error = 0;
 443
 444out:
 445	brelse(bh);
 
 
 
 446	return error;
 447}
 448
 449static void bh_put(struct gfs2_quota_data *qd)
 450{
 451	struct gfs2_sbd *sdp = qd->qd_sbd;
 452	struct buffer_head *bh = NULL;
 453
 454	spin_lock(&qd->qd_lockref.lock);
 455	gfs2_assert(sdp, qd->qd_bh_count);
 456	if (!--qd->qd_bh_count) {
 457		bh = qd->qd_bh;
 458		qd->qd_bh = NULL;
 459		qd->qd_bh_qc = NULL;
 460	}
 461	spin_unlock(&qd->qd_lockref.lock);
 462	brelse(bh);
 463}
 464
 465static bool qd_grab_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 466			 u64 sync_gen)
 467{
 468	bool ret = false;
 469
 470	spin_lock(&qd->qd_lockref.lock);
 471	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 472	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 473	    qd->qd_sync_gen >= sync_gen)
 474		goto out;
 475
 476	if (__lockref_is_dead(&qd->qd_lockref))
 477		goto out;
 478	qd->qd_lockref.count++;
 479
 480	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 481	set_bit(QDF_LOCKED, &qd->qd_flags);
 482	qd->qd_change_sync = qd->qd_change;
 483	slot_hold(qd);
 484	ret = true;
 485
 486out:
 487	spin_unlock(&qd->qd_lockref.lock);
 488	return ret;
 489}
 490
 491static void qd_ungrab_sync(struct gfs2_quota_data *qd)
 492{
 493	clear_bit(QDF_LOCKED, &qd->qd_flags);
 494	slot_put(qd);
 495	qd_put(qd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 496}
 497
 498static void qdsb_put(struct gfs2_quota_data *qd)
 499{
 
 
 
 500	bh_put(qd);
 501	slot_put(qd);
 502	qd_put(qd);
 503}
 504
 505static void qd_unlock(struct gfs2_quota_data *qd)
 506{
 507	spin_lock(&qd->qd_lockref.lock);
 508	gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
 509	clear_bit(QDF_LOCKED, &qd->qd_flags);
 510	spin_unlock(&qd->qd_lockref.lock);
 511	qdsb_put(qd);
 512}
 513
 514static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 515		    struct gfs2_quota_data **qdp)
 516{
 517	int error;
 518
 519	error = qd_get(sdp, qid, qdp);
 520	if (error)
 521		return error;
 522
 523	error = slot_get(*qdp);
 524	if (error)
 525		goto fail;
 526
 527	error = bh_get(*qdp);
 528	if (error)
 529		goto fail_slot;
 530
 531	return 0;
 532
 533fail_slot:
 534	slot_put(*qdp);
 535fail:
 536	qd_put(*qdp);
 537	return error;
 538}
 539
 
 
 
 
 
 
 
 540/**
 541 * gfs2_qa_get - make sure we have a quota allocations data structure,
 542 *               if necessary
 543 * @ip: the inode for this reservation
 544 */
 545int gfs2_qa_get(struct gfs2_inode *ip)
 546{
 
 547	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 548	struct inode *inode = &ip->i_inode;
 549
 550	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 551		return 0;
 552
 553	spin_lock(&inode->i_lock);
 554	if (ip->i_qadata == NULL) {
 555		struct gfs2_qadata *tmp;
 556
 557		spin_unlock(&inode->i_lock);
 558		tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
 559		if (!tmp)
 560			return -ENOMEM;
 561
 562		spin_lock(&inode->i_lock);
 563		if (ip->i_qadata == NULL)
 564			ip->i_qadata = tmp;
 565		else
 566			kmem_cache_free(gfs2_qadata_cachep, tmp);
 567	}
 568	ip->i_qadata->qa_ref++;
 569	spin_unlock(&inode->i_lock);
 570	return 0;
 571}
 572
 573void gfs2_qa_put(struct gfs2_inode *ip)
 574{
 575	struct inode *inode = &ip->i_inode;
 576
 577	spin_lock(&inode->i_lock);
 578	if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
 579		kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
 580		ip->i_qadata = NULL;
 581	}
 582	spin_unlock(&inode->i_lock);
 583}
 584
 585int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 586{
 587	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 588	struct gfs2_quota_data **qd;
 589	int error;
 590
 591	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 592		return 0;
 593
 594	error = gfs2_qa_get(ip);
 595	if (error)
 596		return error;
 
 
 597
 598	qd = ip->i_qadata->qa_qd;
 599
 600	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
 601	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
 602		error = -EIO;
 603		gfs2_qa_put(ip);
 604		goto out;
 605	}
 606
 607	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 608	if (error)
 609		goto out_unhold;
 610	ip->i_qadata->qa_qd_num++;
 611	qd++;
 612
 613	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 614	if (error)
 615		goto out_unhold;
 616	ip->i_qadata->qa_qd_num++;
 617	qd++;
 618
 619	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 620	    !uid_eq(uid, ip->i_inode.i_uid)) {
 621		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 622		if (error)
 623			goto out_unhold;
 624		ip->i_qadata->qa_qd_num++;
 625		qd++;
 626	}
 627
 628	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 629	    !gid_eq(gid, ip->i_inode.i_gid)) {
 630		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 631		if (error)
 632			goto out_unhold;
 633		ip->i_qadata->qa_qd_num++;
 634		qd++;
 635	}
 636
 637out_unhold:
 638	if (error)
 639		gfs2_quota_unhold(ip);
 640out:
 641	return error;
 642}
 643
 644void gfs2_quota_unhold(struct gfs2_inode *ip)
 645{
 646	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 647	u32 x;
 648
 649	if (ip->i_qadata == NULL)
 650		return;
 651
 652	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 653
 654	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
 655		qdsb_put(ip->i_qadata->qa_qd[x]);
 656		ip->i_qadata->qa_qd[x] = NULL;
 657	}
 658	ip->i_qadata->qa_qd_num = 0;
 659	gfs2_qa_put(ip);
 660}
 661
 662static int sort_qd(const void *a, const void *b)
 663{
 664	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 665	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 666
 667	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 668		return -1;
 669	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 670		return 1;
 671	return 0;
 672}
 673
 674static void do_qc(struct gfs2_quota_data *qd, s64 change)
 675{
 676	struct gfs2_sbd *sdp = qd->qd_sbd;
 677	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 678	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 679	bool needs_put = false;
 680	s64 x;
 681
 
 682	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 683
 684	/*
 685	 * The QDF_CHANGE flag indicates that the slot in the quota change file
 686	 * is used.  Here, we use the value of qc->qc_change when the slot is
 687	 * used, and we assume a value of 0 otherwise.
 688	 */
 689
 690	spin_lock(&qd->qd_lockref.lock);
 691
 692	x = 0;
 693	if (test_bit(QDF_CHANGE, &qd->qd_flags))
 694		x = be64_to_cpu(qc->qc_change);
 695	x += change;
 696	qd->qd_change += change;
 697
 698	if (!x && test_bit(QDF_CHANGE, &qd->qd_flags)) {
 699		/* The slot in the quota change file becomes unused. */
 700		clear_bit(QDF_CHANGE, &qd->qd_flags);
 701		qc->qc_flags = 0;
 702		qc->qc_id = 0;
 703		needs_put = true;
 704	} else if (x && !test_bit(QDF_CHANGE, &qd->qd_flags)) {
 705		/* The slot in the quota change file becomes used. */
 706		set_bit(QDF_CHANGE, &qd->qd_flags);
 707		__qd_hold(qd);
 708		slot_hold(qd);
 709
 710		qc->qc_flags = 0;
 711		if (qd->qd_id.type == USRQUOTA)
 712			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 713		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 714	}
 
 
 715	qc->qc_change = cpu_to_be64(x);
 716
 717	spin_unlock(&qd->qd_lockref.lock);
 
 
 718
 719	if (needs_put) {
 
 
 
 
 720		slot_put(qd);
 721		qd_put(qd);
 
 
 
 722	}
 
 723	if (change < 0) /* Reset quiet flag if we freed some blocks */
 724		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
 
 725}
 726
 727static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
 728				  unsigned off, void *buf, unsigned bytes)
 729{
 730	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 731	struct inode *inode = &ip->i_inode;
 
 732	struct address_space *mapping = inode->i_mapping;
 733	struct folio *folio;
 734	struct buffer_head *bh;
 
 735	u64 blk;
 736	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
 737	unsigned to_write = bytes, pg_off = off;
 
 738
 739	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
 740	boff = off % bsize;
 741
 742	folio = filemap_grab_folio(mapping, index);
 743	if (IS_ERR(folio))
 744		return PTR_ERR(folio);
 745	bh = folio_buffers(folio);
 746	if (!bh)
 747		bh = create_empty_buffers(folio, bsize, 0);
 748
 749	for (;;) {
 750		/* Find the beginning block within the folio */
 
 751		if (pg_off >= ((bnum * bsize) + bsize)) {
 752			bh = bh->b_this_page;
 753			bnum++;
 754			blk++;
 755			continue;
 756		}
 757		if (!buffer_mapped(bh)) {
 758			gfs2_block_map(inode, blk, bh, 1);
 759			if (!buffer_mapped(bh))
 760				goto unlock_out;
 761			/* If it's a newly allocated disk block, zero it */
 762			if (buffer_new(bh))
 763				folio_zero_range(folio, bnum * bsize,
 764						bh->b_size);
 765		}
 766		if (folio_test_uptodate(folio))
 767			set_buffer_uptodate(bh);
 768		if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
 769			goto unlock_out;
 
 
 
 
 770		gfs2_trans_add_data(ip->i_gl, bh);
 771
 772		/* If we need to write to the next block as well */
 773		if (to_write > (bsize - boff)) {
 774			pg_off += (bsize - boff);
 775			to_write -= (bsize - boff);
 776			boff = pg_off % bsize;
 777			continue;
 778		}
 779		break;
 780	}
 781
 782	/* Write to the folio, now that we have setup the buffer(s) */
 783	memcpy_to_folio(folio, off, buf, bytes);
 784	flush_dcache_folio(folio);
 785	folio_unlock(folio);
 786	folio_put(folio);
 
 
 787
 788	return 0;
 789
 790unlock_out:
 791	folio_unlock(folio);
 792	folio_put(folio);
 793	return -EIO;
 794}
 795
 796static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp,
 797				 loff_t loc)
 798{
 799	unsigned long pg_beg;
 800	unsigned pg_off, nbytes, overflow = 0;
 801	int error;
 802	void *ptr;
 803
 804	nbytes = sizeof(struct gfs2_quota);
 805
 806	pg_beg = loc >> PAGE_SHIFT;
 807	pg_off = offset_in_page(loc);
 808
 809	/* If the quota straddles a page boundary, split the write in two */
 810	if ((pg_off + nbytes) > PAGE_SIZE)
 
 811		overflow = (pg_off + nbytes) - PAGE_SIZE;
 
 812
 813	ptr = qp;
 814	error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr,
 815				       nbytes - overflow);
 816	/* If there's an overflow, write the remaining bytes to the next page */
 817	if (!error && overflow)
 818		error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0,
 819					       ptr + nbytes - overflow,
 820					       overflow);
 821	return error;
 822}
 823
 824/**
 825 * gfs2_adjust_quota - adjust record of current block usage
 826 * @sdp: The superblock
 827 * @loc: Offset of the entry in the quota file
 828 * @change: The amount of usage change to record
 829 * @qd: The quota data
 830 * @fdq: The updated limits to record
 831 *
 832 * This function was mostly borrowed from gfs2_block_truncate_page which was
 833 * in turn mostly borrowed from ext3
 834 *
 835 * Returns: 0 or -ve on error
 836 */
 837
 838static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
 839			     s64 change, struct gfs2_quota_data *qd,
 840			     struct qc_dqblk *fdq)
 841{
 842	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 843	struct inode *inode = &ip->i_inode;
 
 844	struct gfs2_quota q;
 845	int err;
 846	u64 size;
 847
 848	if (gfs2_is_stuffed(ip)) {
 849		err = gfs2_unstuff_dinode(ip);
 850		if (err)
 851			return err;
 852	}
 853
 854	memset(&q, 0, sizeof(struct gfs2_quota));
 855	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 856	if (err < 0)
 857		return err;
 858
 859	loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
 
 860	be64_add_cpu(&q.qu_value, change);
 861	if (((s64)be64_to_cpu(q.qu_value)) < 0)
 862		q.qu_value = 0; /* Never go negative on quota usage */
 863	spin_lock(&qd->qd_lockref.lock);
 864	qd->qd_qb.qb_value = q.qu_value;
 865	if (fdq) {
 866		if (fdq->d_fieldmask & QC_SPC_SOFT) {
 867			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
 868			qd->qd_qb.qb_warn = q.qu_warn;
 869		}
 870		if (fdq->d_fieldmask & QC_SPC_HARD) {
 871			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
 872			qd->qd_qb.qb_limit = q.qu_limit;
 873		}
 874		if (fdq->d_fieldmask & QC_SPACE) {
 875			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
 876			qd->qd_qb.qb_value = q.qu_value;
 877		}
 878	}
 879	spin_unlock(&qd->qd_lockref.lock);
 880
 881	err = gfs2_write_disk_quota(sdp, &q, loc);
 882	if (!err) {
 883		size = loc + sizeof(struct gfs2_quota);
 884		if (size > inode->i_size)
 885			i_size_write(inode, size);
 886		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 887		mark_inode_dirty(inode);
 888		set_bit(QDF_REFRESH, &qd->qd_flags);
 889	}
 890
 891	return err;
 892}
 893
 894static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda,
 895		   u64 sync_gen)
 896{
 897	struct gfs2_sbd *sdp = (*qda)->qd_sbd;
 898	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 899	struct gfs2_alloc_parms ap = {};
 900	unsigned int data_blocks, ind_blocks;
 901	struct gfs2_holder *ghs, i_gh;
 902	unsigned int qx, x;
 903	struct gfs2_quota_data *qd;
 904	unsigned reserved;
 905	loff_t offset;
 906	unsigned int nalloc = 0, blocks;
 907	int error;
 908
 
 
 
 
 909	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 910			      &data_blocks, &ind_blocks);
 911
 912	ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 913	if (!ghs)
 914		return -ENOMEM;
 915
 916	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 917	inode_lock(&ip->i_inode);
 918	for (qx = 0; qx < num_qd; qx++) {
 919		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 920					   GL_NOCACHE, &ghs[qx]);
 921		if (error)
 922			goto out_dq;
 923	}
 924
 925	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 926	if (error)
 927		goto out_dq;
 928
 929	for (x = 0; x < num_qd; x++) {
 930		offset = qd2offset(qda[x]);
 931		if (gfs2_write_alloc_required(ip, offset,
 932					      sizeof(struct gfs2_quota)))
 933			nalloc++;
 934	}
 935
 936	/* 
 937	 * 1 blk for unstuffing inode if stuffed. We add this extra
 938	 * block to the reservation unconditionally. If the inode
 939	 * doesn't need unstuffing, the block will be released to the 
 940	 * rgrp since it won't be allocated during the transaction
 941	 */
 942	/* +3 in the end for unstuffing block, inode size update block
 943	 * and another block in case quota straddles page boundary and 
 944	 * two blocks need to be updated instead of 1 */
 945	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 946
 947	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 948	ap.target = reserved;
 949	error = gfs2_inplace_reserve(ip, &ap);
 950	if (error)
 951		goto out_alloc;
 952
 953	if (nalloc)
 954		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 955
 956	error = gfs2_trans_begin(sdp, blocks, 0);
 957	if (error)
 958		goto out_ipres;
 959
 960	for (x = 0; x < num_qd; x++) {
 961		qd = qda[x];
 962		offset = qd2offset(qd);
 963		error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd,
 964							NULL);
 965		if (error)
 966			goto out_end_trans;
 967
 968		do_qc(qd, -qd->qd_change_sync);
 969		set_bit(QDF_REFRESH, &qd->qd_flags);
 970	}
 971
 
 
 972out_end_trans:
 973	gfs2_trans_end(sdp);
 974out_ipres:
 975	gfs2_inplace_release(ip);
 976out_alloc:
 977	gfs2_glock_dq_uninit(&i_gh);
 978out_dq:
 979	while (qx--)
 980		gfs2_glock_dq_uninit(&ghs[qx]);
 981	inode_unlock(&ip->i_inode);
 982	kfree(ghs);
 983	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
 984		       GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
 985	if (!error) {
 986		for (x = 0; x < num_qd; x++) {
 987			qd = qda[x];
 988			spin_lock(&qd->qd_lockref.lock);
 989			if (qd->qd_sync_gen < sync_gen)
 990				qd->qd_sync_gen = sync_gen;
 991			spin_unlock(&qd->qd_lockref.lock);
 992		}
 993	}
 994	return error;
 995}
 996
 997static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 998{
 999	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1000	struct gfs2_quota q;
1001	struct gfs2_quota_lvb *qlvb;
1002	loff_t pos;
1003	int error;
1004
1005	memset(&q, 0, sizeof(struct gfs2_quota));
1006	pos = qd2offset(qd);
1007	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
1008	if (error < 0)
1009		return error;
1010
1011	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1012	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
1013	qlvb->__pad = 0;
1014	qlvb->qb_limit = q.qu_limit;
1015	qlvb->qb_warn = q.qu_warn;
1016	qlvb->qb_value = q.qu_value;
1017	spin_lock(&qd->qd_lockref.lock);
1018	qd->qd_qb = *qlvb;
1019	spin_unlock(&qd->qd_lockref.lock);
1020
1021	return 0;
1022}
1023
1024static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1025		    struct gfs2_holder *q_gh)
1026{
1027	struct gfs2_sbd *sdp = qd->qd_sbd;
1028	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1029	struct gfs2_holder i_gh;
1030	int error;
1031
1032	gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
1033restart:
1034	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1035	if (error)
1036		return error;
1037
1038	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1039		force_refresh = FORCE;
1040
1041	spin_lock(&qd->qd_lockref.lock);
1042	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1043	spin_unlock(&qd->qd_lockref.lock);
1044
1045	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1046		gfs2_glock_dq_uninit(q_gh);
1047		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1048					   GL_NOCACHE, q_gh);
1049		if (error)
1050			return error;
1051
1052		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1053		if (error)
1054			goto fail;
1055
1056		error = update_qd(sdp, qd);
1057		if (error)
1058			goto fail_gunlock;
1059
1060		gfs2_glock_dq_uninit(&i_gh);
1061		gfs2_glock_dq_uninit(q_gh);
1062		force_refresh = 0;
1063		goto restart;
1064	}
1065
1066	return 0;
1067
1068fail_gunlock:
1069	gfs2_glock_dq_uninit(&i_gh);
1070fail:
1071	gfs2_glock_dq_uninit(q_gh);
1072	return error;
1073}
1074
1075int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1076{
1077	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1078	struct gfs2_quota_data *qd;
1079	u32 x;
1080	int error;
1081
1082	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 
1083		return 0;
1084
1085	error = gfs2_quota_hold(ip, uid, gid);
1086	if (error)
1087		return error;
1088
1089	sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1090	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
1091
1092	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1093		qd = ip->i_qadata->qa_qd[x];
1094		error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
1095		if (error)
1096			break;
1097	}
1098
1099	if (!error)
1100		set_bit(GIF_QD_LOCKED, &ip->i_flags);
1101	else {
1102		while (x--)
1103			gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1104		gfs2_quota_unhold(ip);
1105	}
1106
1107	return error;
1108}
1109
1110static bool need_sync(struct gfs2_quota_data *qd)
1111{
1112	struct gfs2_sbd *sdp = qd->qd_sbd;
1113	struct gfs2_tune *gt = &sdp->sd_tune;
1114	s64 value, change, limit;
1115	unsigned int num, den;
1116	int ret = false;
1117
1118	spin_lock(&qd->qd_lockref.lock);
1119	if (!qd->qd_qb.qb_limit)
1120		goto out;
1121
1122	change = qd->qd_change;
1123	if (change <= 0)
1124		goto out;
1125	value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1126	limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1127	if (value >= limit)
1128		goto out;
1129
1130	spin_lock(&gt->gt_spin);
1131	num = gt->gt_quota_scale_num;
1132	den = gt->gt_quota_scale_den;
1133	spin_unlock(&gt->gt_spin);
1134
1135	change *= gfs2_jindex_size(sdp) * num;
1136	change = div_s64(change, den);
1137	if (value + change < limit)
1138		goto out;
 
 
 
 
 
 
 
 
1139
1140	ret = true;
1141out:
1142	spin_unlock(&qd->qd_lockref.lock);
1143	return ret;
1144}
1145
1146void gfs2_quota_unlock(struct gfs2_inode *ip)
1147{
1148	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1149	struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
1150	unsigned int count = 0;
1151	u32 x;
 
1152
1153	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1154		return;
1155
1156	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1157		struct gfs2_quota_data *qd;
1158		bool sync;
1159		int error;
1160
1161		qd = ip->i_qadata->qa_qd[x];
1162		sync = need_sync(qd);
1163
1164		gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1165		if (!sync)
1166			continue;
1167
1168		spin_lock(&qd_lock);
1169		sync = qd_grab_sync(sdp, qd, U64_MAX);
1170		spin_unlock(&qd_lock);
1171
1172		if (!sync)
1173			continue;
1174
1175		gfs2_assert_warn(sdp, qd->qd_change_sync);
1176		error = bh_get(qd);
1177		if (error) {
1178			qd_ungrab_sync(qd);
 
1179			continue;
1180		}
1181
1182		qda[count++] = qd;
1183	}
1184
1185	if (count) {
1186		u64 sync_gen = READ_ONCE(sdp->sd_quota_sync_gen);
1187
1188		do_sync(count, qda, sync_gen);
1189		for (x = 0; x < count; x++)
1190			qd_unlock(qda[x]);
1191	}
1192
 
1193	gfs2_quota_unhold(ip);
1194}
1195
1196#define MAX_LINE 256
1197
1198static void print_message(struct gfs2_quota_data *qd, char *type)
1199{
1200	struct gfs2_sbd *sdp = qd->qd_sbd;
 
 
 
 
 
1201
1202	if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) {
1203		fs_info(sdp, "quota %s for %s %u\n",
1204			type,
1205			(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1206			from_kqid(&init_user_ns, qd->qd_id));
1207	}
1208}
1209
1210/**
1211 * gfs2_quota_check - check if allocating new blocks will exceed quota
1212 * @ip:  The inode for which this check is being performed
1213 * @uid: The uid to check against
1214 * @gid: The gid to check against
1215 * @ap:  The allocation parameters. ap->target contains the requested
1216 *       blocks. ap->min_target, if set, contains the minimum blks
1217 *       requested.
1218 *
1219 * Returns: 0 on success.
1220 *                  min_req = ap->min_target ? ap->min_target : ap->target;
1221 *                  quota must allow at least min_req blks for success and
1222 *                  ap->allowed is set to the number of blocks allowed
1223 *
1224 *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1225 *                  of blocks available.
1226 */
1227int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1228		     struct gfs2_alloc_parms *ap)
1229{
1230	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1231	struct gfs2_quota_data *qd;
1232	s64 value, warn, limit;
1233	u32 x;
1234	int error = 0;
1235
1236	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1237	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1238		return 0;
1239
 
 
 
1240	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1241		qd = ip->i_qadata->qa_qd[x];
1242
1243		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1244		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1245			continue;
1246
1247		spin_lock(&qd->qd_lockref.lock);
1248		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1249		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1250		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
 
1251		value += qd->qd_change;
1252		spin_unlock(&qd->qd_lockref.lock);
1253
1254		if (limit > 0 && (limit - value) < ap->allowed)
1255			ap->allowed = limit - value;
1256		/* If we can't meet the target */
1257		if (limit && limit < (value + (s64)ap->target)) {
1258			/* If no min_target specified or we don't meet
1259			 * min_target, return -EDQUOT */
1260			if (!ap->min_target || ap->min_target > ap->allowed) {
1261				if (!test_and_set_bit(QDF_QMSG_QUIET,
1262						      &qd->qd_flags)) {
1263					print_message(qd, "exceeded");
1264					quota_send_warning(qd->qd_id,
1265							   sdp->sd_vfs->s_dev,
1266							   QUOTA_NL_BHARDWARN);
1267				}
1268				error = -EDQUOT;
1269				break;
1270			}
1271		} else if (warn && warn < value &&
1272			   time_after_eq(jiffies, qd->qd_last_warn +
1273					 gfs2_tune_get(sdp, gt_quota_warn_period)
1274					 * HZ)) {
1275			quota_send_warning(qd->qd_id,
1276					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1277			print_message(qd, "warning");
1278			error = 0;
1279			qd->qd_last_warn = jiffies;
1280		}
1281	}
1282	return error;
1283}
1284
1285void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1286		       kuid_t uid, kgid_t gid)
1287{
1288	struct gfs2_quota_data *qd;
1289	u32 x;
1290	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1291
1292	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ||
1293	    gfs2_assert_warn(sdp, change))
1294		return;
1295	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1296		return;
1297
1298	if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1299				 ip->i_qadata->qa_ref > 0))
1300		return;
1301	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1302		qd = ip->i_qadata->qa_qd[x];
1303
1304		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1305		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1306			do_qc(qd, change);
1307		}
1308	}
1309}
1310
1311int gfs2_quota_sync(struct super_block *sb, int type)
1312{
1313	struct gfs2_sbd *sdp = sb->s_fs_info;
1314	struct gfs2_quota_data **qda;
1315	unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
1316	u64 sync_gen;
 
1317	int error = 0;
1318
1319	if (sb_rdonly(sdp->sd_vfs))
1320		return 0;
1321
1322	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1323	if (!qda)
1324		return -ENOMEM;
1325
1326	mutex_lock(&sdp->sd_quota_sync_mutex);
1327	sync_gen = sdp->sd_quota_sync_gen + 1;
1328
1329	do {
1330		struct gfs2_quota_data *iter;
1331		unsigned int num_qd = 0;
1332		unsigned int x;
1333
1334		spin_lock(&qd_lock);
1335		list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
1336			if (qd_grab_sync(sdp, iter, sync_gen)) {
1337				qda[num_qd++] = iter;
1338				if (num_qd == max_qd)
1339					break;
1340			}
1341		}
1342		spin_unlock(&qd_lock);
1343
1344		if (!num_qd)
1345			break;
1346
1347		for (x = 0; x < num_qd; x++) {
1348			error = bh_get(qda[x]);
1349			if (!error)
1350				continue;
 
 
1351
1352			while (x < num_qd)
1353				qd_ungrab_sync(qda[--num_qd]);
1354			break;
1355		}
1356
1357		if (!error) {
1358			WRITE_ONCE(sdp->sd_quota_sync_gen, sync_gen);
1359			error = do_sync(num_qd, qda, sync_gen);
1360		}
1361
1362		for (x = 0; x < num_qd; x++)
1363			qd_unlock(qda[x]);
1364	} while (!error);
1365
1366	mutex_unlock(&sdp->sd_quota_sync_mutex);
1367	kfree(qda);
1368
1369	return error;
1370}
1371
1372int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1373{
1374	struct gfs2_quota_data *qd;
1375	struct gfs2_holder q_gh;
1376	int error;
1377
1378	error = qd_get(sdp, qid, &qd);
1379	if (error)
1380		return error;
1381
1382	error = do_glock(qd, FORCE, &q_gh);
1383	if (!error)
1384		gfs2_glock_dq_uninit(&q_gh);
1385
1386	qd_put(qd);
1387	return error;
1388}
1389
1390int gfs2_quota_init(struct gfs2_sbd *sdp)
1391{
1392	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1393	u64 size = i_size_read(sdp->sd_qc_inode);
1394	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1395	unsigned int x, slot = 0;
1396	unsigned int found = 0;
1397	unsigned int hash;
1398	unsigned int bm_size;
1399	struct buffer_head *bh;
1400	u64 dblock;
1401	u32 extlen = 0;
1402	int error;
1403
1404	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1405		return -EIO;
1406
1407	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1408	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1409	bm_size *= sizeof(unsigned long);
1410	error = -ENOMEM;
1411	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1412	if (sdp->sd_quota_bitmap == NULL)
1413		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1414						 __GFP_ZERO);
1415	if (!sdp->sd_quota_bitmap)
1416		return error;
1417
1418	for (x = 0; x < blocks; x++) {
1419		struct gfs2_quota_change *qc;
 
1420		unsigned int y;
1421
1422		if (!extlen) {
1423			extlen = 32;
1424			error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
1425			if (error)
1426				goto fail;
1427		}
1428		error = -EIO;
1429		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1430		if (!bh)
1431			goto fail;
1432		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
1433			goto fail_brelse;
 
 
1434
1435		qc = (struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1436		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1437		     y++, slot++) {
1438			struct gfs2_quota_data *old_qd, *qd;
1439			s64 qc_change = be64_to_cpu(qc->qc_change);
1440			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1441			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1442						USRQUOTA : GRPQUOTA;
1443			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1444						      be32_to_cpu(qc->qc_id));
1445			qc++;
1446			if (!qc_change)
1447				continue;
1448
1449			hash = gfs2_qd_hash(sdp, qc_id);
1450			qd = qd_alloc(hash, sdp, qc_id);
1451			if (qd == NULL)
1452				goto fail_brelse;
 
 
1453
1454			set_bit(QDF_CHANGE, &qd->qd_flags);
1455			qd->qd_change = qc_change;
1456			qd->qd_slot = slot;
1457			qd->qd_slot_ref = 1;
1458
1459			spin_lock(&qd_lock);
1460			spin_lock_bucket(hash);
1461			old_qd = gfs2_qd_search_bucket(hash, sdp, qc_id);
1462			if (old_qd) {
1463				fs_err(sdp, "Corruption found in quota_change%u"
1464					    "file: duplicate identifier in "
1465					    "slot %u\n",
1466					    sdp->sd_jdesc->jd_jid, slot);
1467
1468				spin_unlock_bucket(hash);
1469				spin_unlock(&qd_lock);
1470				qd_put(old_qd);
1471
1472				gfs2_glock_put(qd->qd_gl);
1473				kmem_cache_free(gfs2_quotad_cachep, qd);
1474
1475				/* zero out the duplicate slot */
1476				lock_buffer(bh);
1477				memset(qc, 0, sizeof(*qc));
1478				mark_buffer_dirty(bh);
1479				unlock_buffer(bh);
1480
1481				continue;
1482			}
1483			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1484			list_add(&qd->qd_list, &sdp->sd_quota_list);
1485			atomic_inc(&sdp->sd_quota_count);
 
 
 
1486			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1487			spin_unlock_bucket(hash);
1488			spin_unlock(&qd_lock);
1489
1490			found++;
1491		}
1492
1493		if (buffer_dirty(bh))
1494			sync_dirty_buffer(bh);
1495		brelse(bh);
1496		dblock++;
1497		extlen--;
1498	}
1499
1500	if (found)
1501		fs_info(sdp, "found %u quota changes\n", found);
1502
1503	return 0;
1504
1505fail_brelse:
1506	if (buffer_dirty(bh))
1507		sync_dirty_buffer(bh);
1508	brelse(bh);
1509fail:
1510	gfs2_quota_cleanup(sdp);
1511	return error;
1512}
1513
1514void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1515{
 
1516	struct gfs2_quota_data *qd;
1517	LIST_HEAD(dispose);
1518	int count;
1519
1520	BUG_ON(!test_bit(SDF_NORECOVERY, &sdp->sd_flags) &&
1521		test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
 
 
 
 
 
 
 
 
 
 
 
 
1522
1523	spin_lock(&qd_lock);
1524	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
1525		spin_lock(&qd->qd_lockref.lock);
1526		if (qd->qd_lockref.count != 0) {
1527			spin_unlock(&qd->qd_lockref.lock);
1528			continue;
1529		}
1530		lockref_mark_dead(&qd->qd_lockref);
1531		spin_unlock(&qd->qd_lockref.lock);
1532
1533		list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
1534		list_add(&qd->qd_lru, &dispose);
1535	}
1536	spin_unlock(&qd_lock);
1537
1538	gfs2_qd_list_dispose(&dispose);
1539
1540	wait_event_timeout(sdp->sd_kill_wait,
1541		(count = atomic_read(&sdp->sd_quota_count)) == 0,
1542		HZ * 60);
1543
1544	if (count != 0)
1545		fs_err(sdp, "%d left-over quota data objects\n", count);
1546
1547	kvfree(sdp->sd_quota_bitmap);
1548	sdp->sd_quota_bitmap = NULL;
1549}
1550
1551static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1552{
1553	if (error == 0 || error == -EROFS)
1554		return;
1555	if (!gfs2_withdrawing_or_withdrawn(sdp)) {
1556		if (!cmpxchg(&sdp->sd_log_error, 0, error))
1557			fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1558		wake_up(&sdp->sd_logd_waitq);
1559	}
1560}
1561
1562static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1563			       int (*fxn)(struct super_block *sb, int type),
1564			       unsigned long t, unsigned long *timeo,
1565			       unsigned int *new_timeo)
1566{
1567	if (t >= *timeo) {
1568		int error = fxn(sdp->sd_vfs, 0);
1569		quotad_error(sdp, msg, error);
1570		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1571	} else {
1572		*timeo -= t;
1573	}
1574}
1575
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1576void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1577	if (!sdp->sd_statfs_force_sync) {
1578		sdp->sd_statfs_force_sync = 1;
1579		wake_up(&sdp->sd_quota_wait);
1580	}
1581}
1582
1583
1584/**
1585 * gfs2_quotad - Write cached quota changes into the quota file
1586 * @data: Pointer to GFS2 superblock
1587 *
1588 */
1589
1590int gfs2_quotad(void *data)
1591{
1592	struct gfs2_sbd *sdp = data;
1593	struct gfs2_tune *tune = &sdp->sd_tune;
1594	unsigned long statfs_timeo = 0;
1595	unsigned long quotad_timeo = 0;
1596	unsigned long t = 0;
 
 
1597
1598	set_freezable();
1599	while (!kthread_should_stop()) {
1600		if (gfs2_withdrawing_or_withdrawn(sdp))
1601			break;
1602
1603		/* Update the master statfs file */
1604		if (sdp->sd_statfs_force_sync) {
1605			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1606			quotad_error(sdp, "statfs", error);
1607			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1608		}
1609		else
1610			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1611				   	   &statfs_timeo,
1612					   &tune->gt_statfs_quantum);
1613
1614		/* Update quota file */
1615		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1616				   &quotad_timeo, &tune->gt_quota_quantum);
1617
 
 
 
 
 
1618		t = min(quotad_timeo, statfs_timeo);
1619
1620		t = wait_event_freezable_timeout(sdp->sd_quota_wait,
1621				sdp->sd_statfs_force_sync ||
1622				gfs2_withdrawing_or_withdrawn(sdp) ||
1623				kthread_should_stop(),
1624				t);
1625
1626		if (sdp->sd_statfs_force_sync)
1627			t = 0;
 
1628	}
1629
1630	return 0;
1631}
1632
1633static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1634{
1635	struct gfs2_sbd *sdp = sb->s_fs_info;
1636
1637	memset(state, 0, sizeof(*state));
1638
1639	switch (sdp->sd_args.ar_quota) {
1640	case GFS2_QUOTA_QUIET:
1641		fallthrough;
1642	case GFS2_QUOTA_ON:
1643		state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1644		state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1645		fallthrough;
1646	case GFS2_QUOTA_ACCOUNT:
1647		state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1648						  QCI_SYSFILE;
1649		state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1650						  QCI_SYSFILE;
1651		break;
1652	case GFS2_QUOTA_OFF:
1653		break;
1654	}
1655	if (sdp->sd_quota_inode) {
1656		state->s_state[USRQUOTA].ino =
1657					GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1658		state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1659	}
1660	state->s_state[USRQUOTA].nextents = 1;	/* unsupported */
1661	state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1662	state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1663	return 0;
1664}
1665
1666static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1667			  struct qc_dqblk *fdq)
1668{
1669	struct gfs2_sbd *sdp = sb->s_fs_info;
1670	struct gfs2_quota_lvb *qlvb;
1671	struct gfs2_quota_data *qd;
1672	struct gfs2_holder q_gh;
1673	int error;
1674
1675	memset(fdq, 0, sizeof(*fdq));
1676
1677	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1678		return -ESRCH; /* Crazy XFS error code */
1679
1680	if ((qid.type != USRQUOTA) &&
1681	    (qid.type != GRPQUOTA))
1682		return -EINVAL;
1683
1684	error = qd_get(sdp, qid, &qd);
1685	if (error)
1686		return error;
1687	error = do_glock(qd, FORCE, &q_gh);
1688	if (error)
1689		goto out;
1690
1691	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1692	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1693	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1694	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
1695
1696	gfs2_glock_dq_uninit(&q_gh);
1697out:
1698	qd_put(qd);
1699	return error;
1700}
1701
1702/* GFS2 only supports a subset of the XFS fields */
1703#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1704
1705static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1706			  struct qc_dqblk *fdq)
1707{
1708	struct gfs2_sbd *sdp = sb->s_fs_info;
1709	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1710	struct gfs2_quota_data *qd;
1711	struct gfs2_holder q_gh, i_gh;
1712	unsigned int data_blocks, ind_blocks;
1713	unsigned int blocks = 0;
1714	int alloc_required;
1715	loff_t offset;
1716	int error;
1717
1718	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1719		return -ESRCH; /* Crazy XFS error code */
1720
1721	if ((qid.type != USRQUOTA) &&
1722	    (qid.type != GRPQUOTA))
1723		return -EINVAL;
1724
1725	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1726		return -EINVAL;
1727
1728	error = qd_get(sdp, qid, &qd);
1729	if (error)
1730		return error;
1731
1732	error = gfs2_qa_get(ip);
1733	if (error)
1734		goto out_put;
1735
1736	inode_lock(&ip->i_inode);
1737	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1738	if (error)
1739		goto out_unlockput;
1740	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1741	if (error)
1742		goto out_q;
1743
1744	/* Check for existing entry, if none then alloc new blocks */
1745	error = update_qd(sdp, qd);
1746	if (error)
1747		goto out_i;
1748
1749	/* If nothing has changed, this is a no-op */
1750	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1751	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1752		fdq->d_fieldmask ^= QC_SPC_SOFT;
1753
1754	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1755	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1756		fdq->d_fieldmask ^= QC_SPC_HARD;
1757
1758	if ((fdq->d_fieldmask & QC_SPACE) &&
1759	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1760		fdq->d_fieldmask ^= QC_SPACE;
1761
1762	if (fdq->d_fieldmask == 0)
1763		goto out_i;
1764
1765	offset = qd2offset(qd);
1766	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1767	if (gfs2_is_stuffed(ip))
1768		alloc_required = 1;
1769	if (alloc_required) {
1770		struct gfs2_alloc_parms ap = {};
1771		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1772				       &data_blocks, &ind_blocks);
1773		blocks = 1 + data_blocks + ind_blocks;
1774		ap.target = blocks;
1775		error = gfs2_inplace_reserve(ip, &ap);
1776		if (error)
1777			goto out_i;
1778		blocks += gfs2_rg_blocks(ip, blocks);
1779	}
1780
1781	/* Some quotas span block boundaries and can update two blocks,
1782	   adding an extra block to the transaction to handle such quotas */
1783	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1784	if (error)
1785		goto out_release;
1786
1787	/* Apply changes */
1788	error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq);
1789	if (!error)
1790		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1791
1792	gfs2_trans_end(sdp);
1793out_release:
1794	if (alloc_required)
1795		gfs2_inplace_release(ip);
1796out_i:
1797	gfs2_glock_dq_uninit(&i_gh);
1798out_q:
1799	gfs2_glock_dq_uninit(&q_gh);
1800out_unlockput:
1801	gfs2_qa_put(ip);
1802	inode_unlock(&ip->i_inode);
1803out_put:
1804	qd_put(qd);
1805	return error;
1806}
1807
1808const struct quotactl_ops gfs2_quotactl_ops = {
1809	.quota_sync     = gfs2_quota_sync,
1810	.get_state	= gfs2_quota_get_state,
1811	.get_dqblk	= gfs2_get_dqblk,
1812	.set_dqblk	= gfs2_set_dqblk,
1813};
1814
1815void __init gfs2_quota_hash_init(void)
1816{
1817	unsigned i;
1818
1819	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1820		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1821}