Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   5 */
   6
   7/*
   8 * Quota change tags are associated with each transaction that allocates or
   9 * deallocates space.  Those changes are accumulated locally to each node (in a
  10 * per-node file) and then are periodically synced to the quota file.  This
  11 * avoids the bottleneck of constantly touching the quota file, but introduces
  12 * fuzziness in the current usage value of IDs that are being used on different
  13 * nodes in the cluster simultaneously.  So, it is possible for a user on
  14 * multiple nodes to overrun their quota, but that overrun is controlable.
  15 * Since quota tags are part of transactions, there is no need for a quota check
  16 * program to be run on node crashes or anything like that.
  17 *
  18 * There are couple of knobs that let the administrator manage the quota
  19 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  20 * sitting on one node before being synced to the quota file.  (The default is
  21 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  22 * of quota file syncs increases as the user moves closer to their limit.  The
  23 * more frequent the syncs, the more accurate the quota enforcement, but that
  24 * means that there is more contention between the nodes for the quota file.
  25 * The default value is one.  This sets the maximum theoretical quota overrun
  26 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  27 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  28 * number greater than one makes quota syncs more frequent and reduces the
  29 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  30 * syncs less frequent.
  31 *
  32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  33 * the quota file, so it is not being constantly read.
  34 */
  35
  36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  37
  38#include <linux/sched.h>
  39#include <linux/slab.h>
  40#include <linux/mm.h>
  41#include <linux/spinlock.h>
  42#include <linux/completion.h>
  43#include <linux/buffer_head.h>
  44#include <linux/sort.h>
  45#include <linux/fs.h>
  46#include <linux/bio.h>
  47#include <linux/gfs2_ondisk.h>
  48#include <linux/kthread.h>
  49#include <linux/freezer.h>
  50#include <linux/quota.h>
  51#include <linux/dqblk_xfs.h>
  52#include <linux/lockref.h>
  53#include <linux/list_lru.h>
  54#include <linux/rcupdate.h>
  55#include <linux/rculist_bl.h>
  56#include <linux/bit_spinlock.h>
  57#include <linux/jhash.h>
  58#include <linux/vmalloc.h>
  59
  60#include "gfs2.h"
  61#include "incore.h"
  62#include "bmap.h"
  63#include "glock.h"
  64#include "glops.h"
  65#include "log.h"
  66#include "meta_io.h"
  67#include "quota.h"
  68#include "rgrp.h"
  69#include "super.h"
  70#include "trans.h"
  71#include "inode.h"
  72#include "util.h"
  73
  74#define GFS2_QD_HASH_SHIFT      12
  75#define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
  76#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
  77
 
 
 
  78/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  79/*                     -> sd_bitmap_lock                              */
  80static DEFINE_SPINLOCK(qd_lock);
  81struct list_lru gfs2_qd_lru;
  82
  83static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  84
  85static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  86				 const struct kqid qid)
  87{
  88	unsigned int h;
  89
  90	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  91	h = jhash(&qid, sizeof(struct kqid), h);
  92
  93	return h & GFS2_QD_HASH_MASK;
  94}
  95
  96static inline void spin_lock_bucket(unsigned int hash)
  97{
  98        hlist_bl_lock(&qd_hash_table[hash]);
  99}
 100
 101static inline void spin_unlock_bucket(unsigned int hash)
 102{
 103        hlist_bl_unlock(&qd_hash_table[hash]);
 104}
 105
 106static void gfs2_qd_dealloc(struct rcu_head *rcu)
 107{
 108	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
 
 
 109	kmem_cache_free(gfs2_quotad_cachep, qd);
 
 
 110}
 111
 112static void gfs2_qd_dispose(struct list_head *list)
 113{
 114	struct gfs2_quota_data *qd;
 115	struct gfs2_sbd *sdp;
 116
 117	while (!list_empty(list)) {
 118		qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
 119		sdp = qd->qd_gl->gl_name.ln_sbd;
 120
 121		list_del(&qd->qd_lru);
 122
 123		/* Free from the filesystem-specific list */
 124		spin_lock(&qd_lock);
 125		list_del(&qd->qd_list);
 126		spin_unlock(&qd_lock);
 127
 128		spin_lock_bucket(qd->qd_hash);
 129		hlist_bl_del_rcu(&qd->qd_hlist);
 130		spin_unlock_bucket(qd->qd_hash);
 131
 
 132		gfs2_assert_warn(sdp, !qd->qd_change);
 133		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 134		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 
 
 
 
 
 
 
 
 
 135
 136		gfs2_glock_put(qd->qd_gl);
 137		atomic_dec(&sdp->sd_quota_count);
 
 138
 139		/* Delete it from the common reclaim list */
 140		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 141	}
 142}
 143
 144
 145static enum lru_status gfs2_qd_isolate(struct list_head *item,
 146		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 147{
 148	struct list_head *dispose = arg;
 149	struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
 
 
 150
 151	if (!spin_trylock(&qd->qd_lockref.lock))
 152		return LRU_SKIP;
 153
 
 154	if (qd->qd_lockref.count == 0) {
 155		lockref_mark_dead(&qd->qd_lockref);
 156		list_lru_isolate_move(lru, &qd->qd_lru, dispose);
 
 157	}
 158
 159	spin_unlock(&qd->qd_lockref.lock);
 160	return LRU_REMOVED;
 161}
 162
 163static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
 164					 struct shrink_control *sc)
 165{
 166	LIST_HEAD(dispose);
 167	unsigned long freed;
 168
 169	if (!(sc->gfp_mask & __GFP_FS))
 170		return SHRINK_STOP;
 171
 172	freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
 173				     gfs2_qd_isolate, &dispose);
 174
 175	gfs2_qd_dispose(&dispose);
 176
 177	return freed;
 178}
 179
 180static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 181					  struct shrink_control *sc)
 182{
 183	return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
 184}
 185
 186struct shrinker gfs2_qd_shrinker = {
 187	.count_objects = gfs2_qd_shrink_count,
 188	.scan_objects = gfs2_qd_shrink_scan,
 189	.seeks = DEFAULT_SEEKS,
 190	.flags = SHRINKER_NUMA_AWARE,
 191};
 
 
 
 
 
 
 
 
 
 192
 
 
 
 
 193
 194static u64 qd2index(struct gfs2_quota_data *qd)
 195{
 196	struct kqid qid = qd->qd_id;
 197	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 198		((qid.type == USRQUOTA) ? 0 : 1);
 199}
 200
 201static u64 qd2offset(struct gfs2_quota_data *qd)
 202{
 203	u64 offset;
 204
 205	offset = qd2index(qd);
 206	offset *= sizeof(struct gfs2_quota);
 207
 208	return offset;
 209}
 210
 211static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
 212{
 213	struct gfs2_quota_data *qd;
 214	int error;
 215
 216	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 217	if (!qd)
 218		return NULL;
 219
 220	qd->qd_sbd = sdp;
 221	qd->qd_lockref.count = 1;
 222	spin_lock_init(&qd->qd_lockref.lock);
 223	qd->qd_id = qid;
 224	qd->qd_slot = -1;
 225	INIT_LIST_HEAD(&qd->qd_lru);
 226	qd->qd_hash = hash;
 227
 228	error = gfs2_glock_get(sdp, qd2index(qd),
 229			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 230	if (error)
 231		goto fail;
 232
 233	return qd;
 234
 235fail:
 236	kmem_cache_free(gfs2_quotad_cachep, qd);
 237	return NULL;
 238}
 239
 240static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
 241						     const struct gfs2_sbd *sdp,
 242						     struct kqid qid)
 243{
 244	struct gfs2_quota_data *qd;
 245	struct hlist_bl_node *h;
 246
 247	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
 248		if (!qid_eq(qd->qd_id, qid))
 249			continue;
 250		if (qd->qd_sbd != sdp)
 251			continue;
 252		if (lockref_get_not_dead(&qd->qd_lockref)) {
 253			list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
 254			return qd;
 255		}
 256	}
 257
 258	return NULL;
 259}
 260
 261
 262static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 263		  struct gfs2_quota_data **qdp)
 264{
 265	struct gfs2_quota_data *qd, *new_qd;
 266	unsigned int hash = gfs2_qd_hash(sdp, qid);
 267
 268	rcu_read_lock();
 269	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 270	rcu_read_unlock();
 271
 272	if (qd)
 273		return 0;
 274
 275	new_qd = qd_alloc(hash, sdp, qid);
 276	if (!new_qd)
 277		return -ENOMEM;
 278
 279	spin_lock(&qd_lock);
 280	spin_lock_bucket(hash);
 281	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 282	if (qd == NULL) {
 
 283		*qdp = new_qd;
 284		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
 285		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
 286		atomic_inc(&sdp->sd_quota_count);
 287	}
 288	spin_unlock_bucket(hash);
 289	spin_unlock(&qd_lock);
 290
 291	if (qd) {
 292		gfs2_glock_put(new_qd->qd_gl);
 293		kmem_cache_free(gfs2_quotad_cachep, new_qd);
 294	}
 295
 296	return 0;
 297}
 298
 299
 300static void qd_hold(struct gfs2_quota_data *qd)
 301{
 302	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 303	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
 304	lockref_get(&qd->qd_lockref);
 305}
 306
 307static void qd_put(struct gfs2_quota_data *qd)
 308{
 
 
 309	if (lockref_put_or_lock(&qd->qd_lockref))
 310		return;
 311
 
 
 
 
 
 
 
 
 
 
 312	qd->qd_lockref.count = 0;
 313	list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
 314	spin_unlock(&qd->qd_lockref.lock);
 315
 316}
 317
 318static int slot_get(struct gfs2_quota_data *qd)
 319{
 320	struct gfs2_sbd *sdp = qd->qd_sbd;
 321	unsigned int bit;
 322	int error = 0;
 323
 324	spin_lock(&sdp->sd_bitmap_lock);
 325	if (qd->qd_slot_count != 0)
 326		goto out;
 327
 328	error = -ENOSPC;
 329	bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
 330	if (bit < sdp->sd_quota_slots) {
 
 331		set_bit(bit, sdp->sd_quota_bitmap);
 332		qd->qd_slot = bit;
 333		error = 0;
 334out:
 335		qd->qd_slot_count++;
 336	}
 
 
 337	spin_unlock(&sdp->sd_bitmap_lock);
 338
 339	return error;
 340}
 341
 342static void slot_hold(struct gfs2_quota_data *qd)
 343{
 344	struct gfs2_sbd *sdp = qd->qd_sbd;
 345
 346	spin_lock(&sdp->sd_bitmap_lock);
 347	gfs2_assert(sdp, qd->qd_slot_count);
 348	qd->qd_slot_count++;
 349	spin_unlock(&sdp->sd_bitmap_lock);
 350}
 351
 352static void slot_put(struct gfs2_quota_data *qd)
 353{
 354	struct gfs2_sbd *sdp = qd->qd_sbd;
 355
 356	spin_lock(&sdp->sd_bitmap_lock);
 357	gfs2_assert(sdp, qd->qd_slot_count);
 358	if (!--qd->qd_slot_count) {
 359		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
 360		qd->qd_slot = -1;
 361	}
 362	spin_unlock(&sdp->sd_bitmap_lock);
 363}
 364
 365static int bh_get(struct gfs2_quota_data *qd)
 366{
 367	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 368	struct inode *inode = sdp->sd_qc_inode;
 369	struct gfs2_inode *ip = GFS2_I(inode);
 370	unsigned int block, offset;
 371	struct buffer_head *bh;
 372	struct iomap iomap = { };
 373	int error;
 374
 375	mutex_lock(&sdp->sd_quota_mutex);
 376
 377	if (qd->qd_bh_count++) {
 378		mutex_unlock(&sdp->sd_quota_mutex);
 379		return 0;
 380	}
 381
 382	block = qd->qd_slot / sdp->sd_qc_per_block;
 383	offset = qd->qd_slot % sdp->sd_qc_per_block;
 384
 385	error = gfs2_iomap_get(inode,
 386			       (loff_t)block << inode->i_blkbits,
 387			       i_blocksize(inode), &iomap);
 388	if (error)
 389		goto fail;
 390	error = -ENOENT;
 391	if (iomap.type != IOMAP_MAPPED)
 392		goto fail;
 393
 394	error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
 395			       DIO_WAIT, 0, &bh);
 396	if (error)
 397		goto fail;
 398	error = -EIO;
 399	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 400		goto fail_brelse;
 401
 402	qd->qd_bh = bh;
 403	qd->qd_bh_qc = (struct gfs2_quota_change *)
 404		(bh->b_data + sizeof(struct gfs2_meta_header) +
 405		 offset * sizeof(struct gfs2_quota_change));
 406
 407	mutex_unlock(&sdp->sd_quota_mutex);
 408
 409	return 0;
 410
 411fail_brelse:
 412	brelse(bh);
 413fail:
 414	qd->qd_bh_count--;
 415	mutex_unlock(&sdp->sd_quota_mutex);
 416	return error;
 417}
 418
 419static void bh_put(struct gfs2_quota_data *qd)
 420{
 421	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 422
 423	mutex_lock(&sdp->sd_quota_mutex);
 424	gfs2_assert(sdp, qd->qd_bh_count);
 425	if (!--qd->qd_bh_count) {
 426		brelse(qd->qd_bh);
 427		qd->qd_bh = NULL;
 428		qd->qd_bh_qc = NULL;
 429	}
 430	mutex_unlock(&sdp->sd_quota_mutex);
 431}
 432
 433static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 434			 u64 *sync_gen)
 435{
 436	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 437	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 438	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
 439		return 0;
 440
 
 
 
 
 
 
 
 
 
 
 
 441	if (!lockref_get_not_dead(&qd->qd_lockref))
 442		return 0;
 443
 444	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 445	set_bit(QDF_LOCKED, &qd->qd_flags);
 446	qd->qd_change_sync = qd->qd_change;
 447	slot_hold(qd);
 448	return 1;
 449}
 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 452{
 453	struct gfs2_quota_data *qd = NULL, *iter;
 454	int error;
 455
 456	*qdp = NULL;
 457
 458	if (sb_rdonly(sdp->sd_vfs))
 459		return 0;
 460
 461	spin_lock(&qd_lock);
 462
 463	list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
 464		if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
 465			qd = iter;
 466			break;
 467		}
 468	}
 469
 470	spin_unlock(&qd_lock);
 471
 472	if (qd) {
 473		gfs2_assert_warn(sdp, qd->qd_change_sync);
 474		error = bh_get(qd);
 475		if (error) {
 476			clear_bit(QDF_LOCKED, &qd->qd_flags);
 477			slot_put(qd);
 478			qd_put(qd);
 479			return error;
 480		}
 481	}
 482
 483	*qdp = qd;
 484
 485	return 0;
 486}
 487
 488static void qd_unlock(struct gfs2_quota_data *qd)
 489{
 490	gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
 491			 test_bit(QDF_LOCKED, &qd->qd_flags));
 492	clear_bit(QDF_LOCKED, &qd->qd_flags);
 493	bh_put(qd);
 494	slot_put(qd);
 495	qd_put(qd);
 496}
 497
 
 
 
 
 
 
 
 498static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 499		    struct gfs2_quota_data **qdp)
 500{
 501	int error;
 502
 503	error = qd_get(sdp, qid, qdp);
 504	if (error)
 505		return error;
 506
 507	error = slot_get(*qdp);
 508	if (error)
 509		goto fail;
 510
 511	error = bh_get(*qdp);
 512	if (error)
 513		goto fail_slot;
 514
 515	return 0;
 516
 517fail_slot:
 518	slot_put(*qdp);
 519fail:
 520	qd_put(*qdp);
 521	return error;
 522}
 523
 524static void qdsb_put(struct gfs2_quota_data *qd)
 525{
 526	bh_put(qd);
 527	slot_put(qd);
 528	qd_put(qd);
 529}
 530
 531/**
 532 * gfs2_qa_get - make sure we have a quota allocations data structure,
 533 *               if necessary
 534 * @ip: the inode for this reservation
 535 */
 536int gfs2_qa_get(struct gfs2_inode *ip)
 537{
 538	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 539	struct inode *inode = &ip->i_inode;
 540
 541	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 542		return 0;
 543
 544	spin_lock(&inode->i_lock);
 545	if (ip->i_qadata == NULL) {
 546		struct gfs2_qadata *tmp;
 547
 548		spin_unlock(&inode->i_lock);
 549		tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
 550		if (!tmp)
 551			return -ENOMEM;
 552
 553		spin_lock(&inode->i_lock);
 554		if (ip->i_qadata == NULL)
 555			ip->i_qadata = tmp;
 556		else
 557			kmem_cache_free(gfs2_qadata_cachep, tmp);
 558	}
 559	ip->i_qadata->qa_ref++;
 560	spin_unlock(&inode->i_lock);
 561	return 0;
 562}
 563
 564void gfs2_qa_put(struct gfs2_inode *ip)
 565{
 566	struct inode *inode = &ip->i_inode;
 567
 568	spin_lock(&inode->i_lock);
 569	if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
 570		kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
 571		ip->i_qadata = NULL;
 572	}
 573	spin_unlock(&inode->i_lock);
 574}
 575
 576int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 577{
 578	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 579	struct gfs2_quota_data **qd;
 580	int error;
 581
 582	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 583		return 0;
 584
 585	error = gfs2_qa_get(ip);
 586	if (error)
 587		return error;
 588
 589	qd = ip->i_qadata->qa_qd;
 590
 591	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
 592	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
 593		error = -EIO;
 
 594		goto out;
 595	}
 596
 597	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 598	if (error)
 599		goto out_unhold;
 600	ip->i_qadata->qa_qd_num++;
 601	qd++;
 602
 603	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 604	if (error)
 605		goto out_unhold;
 606	ip->i_qadata->qa_qd_num++;
 607	qd++;
 608
 609	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 610	    !uid_eq(uid, ip->i_inode.i_uid)) {
 611		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 612		if (error)
 613			goto out_unhold;
 614		ip->i_qadata->qa_qd_num++;
 615		qd++;
 616	}
 617
 618	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 619	    !gid_eq(gid, ip->i_inode.i_gid)) {
 620		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 621		if (error)
 622			goto out_unhold;
 623		ip->i_qadata->qa_qd_num++;
 624		qd++;
 625	}
 626
 627out_unhold:
 628	if (error)
 629		gfs2_quota_unhold(ip);
 630out:
 631	return error;
 632}
 633
 634void gfs2_quota_unhold(struct gfs2_inode *ip)
 635{
 636	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 637	u32 x;
 638
 639	if (ip->i_qadata == NULL)
 640		return;
 641
 642	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 643
 644	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
 645		qdsb_put(ip->i_qadata->qa_qd[x]);
 646		ip->i_qadata->qa_qd[x] = NULL;
 647	}
 648	ip->i_qadata->qa_qd_num = 0;
 649	gfs2_qa_put(ip);
 650}
 651
 652static int sort_qd(const void *a, const void *b)
 653{
 654	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 655	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 656
 657	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 658		return -1;
 659	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 660		return 1;
 661	return 0;
 662}
 663
 664static void do_qc(struct gfs2_quota_data *qd, s64 change)
 665{
 666	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 667	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 668	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 669	s64 x;
 670
 671	mutex_lock(&sdp->sd_quota_mutex);
 672	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 673
 674	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 675		qc->qc_change = 0;
 676		qc->qc_flags = 0;
 677		if (qd->qd_id.type == USRQUOTA)
 678			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 679		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 680	}
 681
 682	x = be64_to_cpu(qc->qc_change) + change;
 683	qc->qc_change = cpu_to_be64(x);
 684
 685	spin_lock(&qd_lock);
 686	qd->qd_change = x;
 687	spin_unlock(&qd_lock);
 688
 689	if (!x) {
 
 
 
 
 
 690		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 691		clear_bit(QDF_CHANGE, &qd->qd_flags);
 692		qc->qc_flags = 0;
 693		qc->qc_id = 0;
 694		slot_put(qd);
 695		qd_put(qd);
 696	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 697		qd_hold(qd);
 698		slot_hold(qd);
 699	}
 700
 701	if (change < 0) /* Reset quiet flag if we freed some blocks */
 702		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
 703	mutex_unlock(&sdp->sd_quota_mutex);
 704}
 705
 706static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
 707				  unsigned off, void *buf, unsigned bytes)
 708{
 
 709	struct inode *inode = &ip->i_inode;
 710	struct gfs2_sbd *sdp = GFS2_SB(inode);
 711	struct address_space *mapping = inode->i_mapping;
 712	struct page *page;
 713	struct buffer_head *bh;
 714	void *kaddr;
 715	u64 blk;
 716	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
 717	unsigned to_write = bytes, pg_off = off;
 718	int done = 0;
 719
 720	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
 721	boff = off % bsize;
 722
 723	page = find_or_create_page(mapping, index, GFP_NOFS);
 724	if (!page)
 725		return -ENOMEM;
 726	if (!page_has_buffers(page))
 727		create_empty_buffers(page, bsize, 0);
 
 728
 729	bh = page_buffers(page);
 730	while (!done) {
 731		/* Find the beginning block within the page */
 732		if (pg_off >= ((bnum * bsize) + bsize)) {
 733			bh = bh->b_this_page;
 734			bnum++;
 735			blk++;
 736			continue;
 737		}
 738		if (!buffer_mapped(bh)) {
 739			gfs2_block_map(inode, blk, bh, 1);
 740			if (!buffer_mapped(bh))
 741				goto unlock_out;
 742			/* If it's a newly allocated disk block, zero it */
 743			if (buffer_new(bh))
 744				zero_user(page, bnum * bsize, bh->b_size);
 
 745		}
 746		if (PageUptodate(page))
 747			set_buffer_uptodate(bh);
 748		if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
 749			goto unlock_out;
 750		if (gfs2_is_jdata(ip))
 751			gfs2_trans_add_data(ip->i_gl, bh);
 752		else
 753			gfs2_ordered_add_inode(ip);
 754
 755		/* If we need to write to the next block as well */
 756		if (to_write > (bsize - boff)) {
 757			pg_off += (bsize - boff);
 758			to_write -= (bsize - boff);
 759			boff = pg_off % bsize;
 760			continue;
 761		}
 762		done = 1;
 763	}
 764
 765	/* Write to the page, now that we have setup the buffer(s) */
 766	kaddr = kmap_atomic(page);
 767	memcpy(kaddr + off, buf, bytes);
 768	flush_dcache_page(page);
 769	kunmap_atomic(kaddr);
 770	unlock_page(page);
 771	put_page(page);
 772
 773	return 0;
 774
 775unlock_out:
 776	unlock_page(page);
 777	put_page(page);
 778	return -EIO;
 779}
 780
 781static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
 782				 loff_t loc)
 783{
 784	unsigned long pg_beg;
 785	unsigned pg_off, nbytes, overflow = 0;
 786	int pg_oflow = 0, error;
 787	void *ptr;
 788
 789	nbytes = sizeof(struct gfs2_quota);
 790
 791	pg_beg = loc >> PAGE_SHIFT;
 792	pg_off = offset_in_page(loc);
 793
 794	/* If the quota straddles a page boundary, split the write in two */
 795	if ((pg_off + nbytes) > PAGE_SIZE) {
 796		pg_oflow = 1;
 797		overflow = (pg_off + nbytes) - PAGE_SIZE;
 798	}
 799
 800	ptr = qp;
 801	error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
 802				       nbytes - overflow);
 803	/* If there's an overflow, write the remaining bytes to the next page */
 804	if (!error && pg_oflow)
 805		error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
 806					       ptr + nbytes - overflow,
 807					       overflow);
 808	return error;
 809}
 810
 811/**
 812 * gfs2_adjust_quota - adjust record of current block usage
 813 * @ip: The quota inode
 814 * @loc: Offset of the entry in the quota file
 815 * @change: The amount of usage change to record
 816 * @qd: The quota data
 817 * @fdq: The updated limits to record
 818 *
 819 * This function was mostly borrowed from gfs2_block_truncate_page which was
 820 * in turn mostly borrowed from ext3
 821 *
 822 * Returns: 0 or -ve on error
 823 */
 824
 825static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 826			     s64 change, struct gfs2_quota_data *qd,
 827			     struct qc_dqblk *fdq)
 828{
 
 829	struct inode *inode = &ip->i_inode;
 830	struct gfs2_sbd *sdp = GFS2_SB(inode);
 831	struct gfs2_quota q;
 832	int err;
 833	u64 size;
 834
 835	if (gfs2_is_stuffed(ip)) {
 836		err = gfs2_unstuff_dinode(ip);
 837		if (err)
 838			return err;
 839	}
 840
 841	memset(&q, 0, sizeof(struct gfs2_quota));
 842	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 843	if (err < 0)
 844		return err;
 845
 846	loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
 847	err = -EIO;
 848	be64_add_cpu(&q.qu_value, change);
 849	if (((s64)be64_to_cpu(q.qu_value)) < 0)
 850		q.qu_value = 0; /* Never go negative on quota usage */
 851	qd->qd_qb.qb_value = q.qu_value;
 852	if (fdq) {
 853		if (fdq->d_fieldmask & QC_SPC_SOFT) {
 854			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
 855			qd->qd_qb.qb_warn = q.qu_warn;
 856		}
 857		if (fdq->d_fieldmask & QC_SPC_HARD) {
 858			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
 859			qd->qd_qb.qb_limit = q.qu_limit;
 860		}
 861		if (fdq->d_fieldmask & QC_SPACE) {
 862			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
 863			qd->qd_qb.qb_value = q.qu_value;
 864		}
 865	}
 866
 867	err = gfs2_write_disk_quota(ip, &q, loc);
 868	if (!err) {
 869		size = loc + sizeof(struct gfs2_quota);
 870		if (size > inode->i_size)
 871			i_size_write(inode, size);
 872		inode->i_mtime = inode->i_atime = current_time(inode);
 873		mark_inode_dirty(inode);
 874		set_bit(QDF_REFRESH, &qd->qd_flags);
 875	}
 876
 877	return err;
 878}
 879
 880static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 881{
 882	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
 883	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 884	struct gfs2_alloc_parms ap = { .aflags = 0, };
 885	unsigned int data_blocks, ind_blocks;
 886	struct gfs2_holder *ghs, i_gh;
 887	unsigned int qx, x;
 888	struct gfs2_quota_data *qd;
 889	unsigned reserved;
 890	loff_t offset;
 891	unsigned int nalloc = 0, blocks;
 892	int error;
 893
 894	error = gfs2_qa_get(ip);
 895	if (error)
 896		return error;
 897
 898	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 899			      &data_blocks, &ind_blocks);
 900
 901	ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 902	if (!ghs) {
 903		error = -ENOMEM;
 904		goto out;
 905	}
 906
 907	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 908	inode_lock(&ip->i_inode);
 909	for (qx = 0; qx < num_qd; qx++) {
 910		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 911					   GL_NOCACHE, &ghs[qx]);
 912		if (error)
 913			goto out_dq;
 914	}
 915
 916	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 917	if (error)
 918		goto out_dq;
 919
 920	for (x = 0; x < num_qd; x++) {
 921		offset = qd2offset(qda[x]);
 922		if (gfs2_write_alloc_required(ip, offset,
 923					      sizeof(struct gfs2_quota)))
 924			nalloc++;
 925	}
 926
 927	/* 
 928	 * 1 blk for unstuffing inode if stuffed. We add this extra
 929	 * block to the reservation unconditionally. If the inode
 930	 * doesn't need unstuffing, the block will be released to the 
 931	 * rgrp since it won't be allocated during the transaction
 932	 */
 933	/* +3 in the end for unstuffing block, inode size update block
 934	 * and another block in case quota straddles page boundary and 
 935	 * two blocks need to be updated instead of 1 */
 936	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 937
 938	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 939	ap.target = reserved;
 940	error = gfs2_inplace_reserve(ip, &ap);
 941	if (error)
 942		goto out_alloc;
 943
 944	if (nalloc)
 945		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 946
 947	error = gfs2_trans_begin(sdp, blocks, 0);
 948	if (error)
 949		goto out_ipres;
 950
 951	for (x = 0; x < num_qd; x++) {
 952		qd = qda[x];
 953		offset = qd2offset(qd);
 954		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 
 955		if (error)
 956			goto out_end_trans;
 957
 958		do_qc(qd, -qd->qd_change_sync);
 959		set_bit(QDF_REFRESH, &qd->qd_flags);
 960	}
 961
 962	error = 0;
 963
 964out_end_trans:
 965	gfs2_trans_end(sdp);
 966out_ipres:
 967	gfs2_inplace_release(ip);
 968out_alloc:
 969	gfs2_glock_dq_uninit(&i_gh);
 970out_dq:
 971	while (qx--)
 972		gfs2_glock_dq_uninit(&ghs[qx]);
 973	inode_unlock(&ip->i_inode);
 974	kfree(ghs);
 975	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
 976		       GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
 977out:
 978	gfs2_qa_put(ip);
 
 
 979	return error;
 980}
 981
 982static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 983{
 984	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 985	struct gfs2_quota q;
 986	struct gfs2_quota_lvb *qlvb;
 987	loff_t pos;
 988	int error;
 989
 990	memset(&q, 0, sizeof(struct gfs2_quota));
 991	pos = qd2offset(qd);
 992	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 993	if (error < 0)
 994		return error;
 995
 996	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 997	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 998	qlvb->__pad = 0;
 999	qlvb->qb_limit = q.qu_limit;
1000	qlvb->qb_warn = q.qu_warn;
1001	qlvb->qb_value = q.qu_value;
1002	qd->qd_qb = *qlvb;
1003
1004	return 0;
1005}
1006
1007static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1008		    struct gfs2_holder *q_gh)
1009{
1010	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1011	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1012	struct gfs2_holder i_gh;
1013	int error;
1014
 
1015restart:
1016	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1017	if (error)
1018		return error;
1019
1020	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1021		force_refresh = FORCE;
1022
1023	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1024
1025	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1026		gfs2_glock_dq_uninit(q_gh);
1027		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1028					   GL_NOCACHE, q_gh);
1029		if (error)
1030			return error;
1031
1032		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1033		if (error)
1034			goto fail;
1035
1036		error = update_qd(sdp, qd);
1037		if (error)
1038			goto fail_gunlock;
1039
1040		gfs2_glock_dq_uninit(&i_gh);
1041		gfs2_glock_dq_uninit(q_gh);
1042		force_refresh = 0;
1043		goto restart;
1044	}
1045
1046	return 0;
1047
1048fail_gunlock:
1049	gfs2_glock_dq_uninit(&i_gh);
1050fail:
1051	gfs2_glock_dq_uninit(q_gh);
1052	return error;
1053}
1054
1055int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1056{
1057	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1058	struct gfs2_quota_data *qd;
1059	u32 x;
1060	int error = 0;
1061
1062	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1063		return 0;
1064
1065	error = gfs2_quota_hold(ip, uid, gid);
1066	if (error)
1067		return error;
1068
1069	sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1070	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
1071
1072	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1073		qd = ip->i_qadata->qa_qd[x];
1074		error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
1075		if (error)
1076			break;
1077	}
1078
1079	if (!error)
1080		set_bit(GIF_QD_LOCKED, &ip->i_flags);
1081	else {
1082		while (x--)
1083			gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1084		gfs2_quota_unhold(ip);
1085	}
1086
1087	return error;
1088}
1089
1090static int need_sync(struct gfs2_quota_data *qd)
1091{
1092	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1093	struct gfs2_tune *gt = &sdp->sd_tune;
1094	s64 value;
1095	unsigned int num, den;
1096	int do_sync = 1;
1097
1098	if (!qd->qd_qb.qb_limit)
1099		return 0;
1100
1101	spin_lock(&qd_lock);
1102	value = qd->qd_change;
1103	spin_unlock(&qd_lock);
1104
1105	spin_lock(&gt->gt_spin);
1106	num = gt->gt_quota_scale_num;
1107	den = gt->gt_quota_scale_den;
1108	spin_unlock(&gt->gt_spin);
1109
1110	if (value < 0)
1111		do_sync = 0;
1112	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1113		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1114		do_sync = 0;
1115	else {
1116		value *= gfs2_jindex_size(sdp) * num;
1117		value = div_s64(value, den);
1118		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1119		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1120			do_sync = 0;
1121	}
1122
1123	return do_sync;
1124}
1125
1126void gfs2_quota_unlock(struct gfs2_inode *ip)
1127{
1128	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1129	struct gfs2_quota_data *qda[4];
1130	unsigned int count = 0;
1131	u32 x;
1132	int found;
1133
1134	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1135		return;
1136
1137	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1138		struct gfs2_quota_data *qd;
1139		int sync;
1140
1141		qd = ip->i_qadata->qa_qd[x];
1142		sync = need_sync(qd);
1143
1144		gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1145		if (!sync)
1146			continue;
1147
1148		spin_lock(&qd_lock);
1149		found = qd_check_sync(sdp, qd, NULL);
1150		spin_unlock(&qd_lock);
1151
1152		if (!found)
1153			continue;
1154
1155		gfs2_assert_warn(sdp, qd->qd_change_sync);
1156		if (bh_get(qd)) {
1157			clear_bit(QDF_LOCKED, &qd->qd_flags);
1158			slot_put(qd);
1159			qd_put(qd);
1160			continue;
1161		}
1162
1163		qda[count++] = qd;
1164	}
1165
1166	if (count) {
1167		do_sync(count, qda);
1168		for (x = 0; x < count; x++)
1169			qd_unlock(qda[x]);
1170	}
1171
1172	gfs2_quota_unhold(ip);
1173}
1174
1175#define MAX_LINE 256
1176
1177static int print_message(struct gfs2_quota_data *qd, char *type)
1178{
1179	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1180
1181	fs_info(sdp, "quota %s for %s %u\n",
1182		type,
1183		(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1184		from_kqid(&init_user_ns, qd->qd_id));
1185
1186	return 0;
 
 
 
 
 
1187}
1188
1189/**
1190 * gfs2_quota_check - check if allocating new blocks will exceed quota
1191 * @ip:  The inode for which this check is being performed
1192 * @uid: The uid to check against
1193 * @gid: The gid to check against
1194 * @ap:  The allocation parameters. ap->target contains the requested
1195 *       blocks. ap->min_target, if set, contains the minimum blks
1196 *       requested.
1197 *
1198 * Returns: 0 on success.
1199 *                  min_req = ap->min_target ? ap->min_target : ap->target;
1200 *                  quota must allow at least min_req blks for success and
1201 *                  ap->allowed is set to the number of blocks allowed
1202 *
1203 *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1204 *                  of blocks available.
1205 */
1206int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1207		     struct gfs2_alloc_parms *ap)
1208{
1209	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1210	struct gfs2_quota_data *qd;
1211	s64 value, warn, limit;
1212	u32 x;
1213	int error = 0;
1214
1215	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1216	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1217		return 0;
1218
1219	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1220		qd = ip->i_qadata->qa_qd[x];
1221
1222		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1223		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1224			continue;
1225
1226		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1227		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1228		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1229		spin_lock(&qd_lock);
1230		value += qd->qd_change;
1231		spin_unlock(&qd_lock);
1232
1233		if (limit > 0 && (limit - value) < ap->allowed)
1234			ap->allowed = limit - value;
1235		/* If we can't meet the target */
1236		if (limit && limit < (value + (s64)ap->target)) {
1237			/* If no min_target specified or we don't meet
1238			 * min_target, return -EDQUOT */
1239			if (!ap->min_target || ap->min_target > ap->allowed) {
1240				if (!test_and_set_bit(QDF_QMSG_QUIET,
1241						      &qd->qd_flags)) {
1242					print_message(qd, "exceeded");
1243					quota_send_warning(qd->qd_id,
1244							   sdp->sd_vfs->s_dev,
1245							   QUOTA_NL_BHARDWARN);
1246				}
1247				error = -EDQUOT;
1248				break;
1249			}
1250		} else if (warn && warn < value &&
1251			   time_after_eq(jiffies, qd->qd_last_warn +
1252					 gfs2_tune_get(sdp, gt_quota_warn_period)
1253					 * HZ)) {
1254			quota_send_warning(qd->qd_id,
1255					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1256			error = print_message(qd, "warning");
 
1257			qd->qd_last_warn = jiffies;
1258		}
1259	}
1260	return error;
1261}
1262
1263void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1264		       kuid_t uid, kgid_t gid)
1265{
1266	struct gfs2_quota_data *qd;
1267	u32 x;
1268	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1269
1270	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
1271	    gfs2_assert_warn(sdp, change))
1272		return;
1273	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1274		return;
1275
1276	if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1277				 ip->i_qadata->qa_ref > 0))
1278		return;
1279	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1280		qd = ip->i_qadata->qa_qd[x];
1281
1282		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1283		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1284			do_qc(qd, change);
1285		}
1286	}
1287}
1288
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1289int gfs2_quota_sync(struct super_block *sb, int type)
1290{
1291	struct gfs2_sbd *sdp = sb->s_fs_info;
1292	struct gfs2_quota_data **qda;
1293	unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
1294	unsigned int num_qd;
1295	unsigned int x;
1296	int error = 0;
1297
 
 
 
1298	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1299	if (!qda)
1300		return -ENOMEM;
1301
1302	mutex_lock(&sdp->sd_quota_sync_mutex);
1303	sdp->sd_quota_sync_gen++;
1304
1305	do {
1306		num_qd = 0;
1307
1308		for (;;) {
1309			error = qd_fish(sdp, qda + num_qd);
1310			if (error || !qda[num_qd])
1311				break;
1312			if (++num_qd == max_qd)
1313				break;
1314		}
1315
1316		if (num_qd) {
1317			if (!error)
1318				error = do_sync(num_qd, qda);
1319			if (!error)
1320				for (x = 0; x < num_qd; x++)
1321					qda[x]->qd_sync_gen =
1322						sdp->sd_quota_sync_gen;
1323
1324			for (x = 0; x < num_qd; x++)
1325				qd_unlock(qda[x]);
1326		}
1327	} while (!error && num_qd == max_qd);
1328
1329	mutex_unlock(&sdp->sd_quota_sync_mutex);
1330	kfree(qda);
1331
1332	return error;
1333}
1334
1335int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1336{
1337	struct gfs2_quota_data *qd;
1338	struct gfs2_holder q_gh;
1339	int error;
1340
1341	error = qd_get(sdp, qid, &qd);
1342	if (error)
1343		return error;
1344
1345	error = do_glock(qd, FORCE, &q_gh);
1346	if (!error)
1347		gfs2_glock_dq_uninit(&q_gh);
1348
1349	qd_put(qd);
1350	return error;
1351}
1352
1353int gfs2_quota_init(struct gfs2_sbd *sdp)
1354{
1355	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1356	u64 size = i_size_read(sdp->sd_qc_inode);
1357	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1358	unsigned int x, slot = 0;
1359	unsigned int found = 0;
1360	unsigned int hash;
1361	unsigned int bm_size;
1362	u64 dblock;
1363	u32 extlen = 0;
1364	int error;
1365
1366	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1367		return -EIO;
1368
1369	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1370	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1371	bm_size *= sizeof(unsigned long);
1372	error = -ENOMEM;
1373	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1374	if (sdp->sd_quota_bitmap == NULL)
1375		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1376						 __GFP_ZERO);
1377	if (!sdp->sd_quota_bitmap)
1378		return error;
1379
1380	for (x = 0; x < blocks; x++) {
1381		struct buffer_head *bh;
1382		const struct gfs2_quota_change *qc;
1383		unsigned int y;
1384
1385		if (!extlen) {
1386			extlen = 32;
1387			error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
1388			if (error)
1389				goto fail;
1390		}
1391		error = -EIO;
1392		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1393		if (!bh)
1394			goto fail;
1395		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1396			brelse(bh);
1397			goto fail;
1398		}
1399
1400		qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1401		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1402		     y++, slot++) {
1403			struct gfs2_quota_data *qd;
1404			s64 qc_change = be64_to_cpu(qc->qc_change);
1405			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1406			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1407						USRQUOTA : GRPQUOTA;
1408			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1409						      be32_to_cpu(qc->qc_id));
1410			qc++;
1411			if (!qc_change)
1412				continue;
1413
1414			hash = gfs2_qd_hash(sdp, qc_id);
1415			qd = qd_alloc(hash, sdp, qc_id);
1416			if (qd == NULL) {
1417				brelse(bh);
1418				goto fail;
1419			}
1420
1421			set_bit(QDF_CHANGE, &qd->qd_flags);
1422			qd->qd_change = qc_change;
1423			qd->qd_slot = slot;
1424			qd->qd_slot_count = 1;
1425
1426			spin_lock(&qd_lock);
1427			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1428			list_add(&qd->qd_list, &sdp->sd_quota_list);
1429			atomic_inc(&sdp->sd_quota_count);
1430			spin_unlock(&qd_lock);
1431
1432			spin_lock_bucket(hash);
1433			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1434			spin_unlock_bucket(hash);
1435
1436			found++;
1437		}
1438
1439		brelse(bh);
1440		dblock++;
1441		extlen--;
1442	}
1443
1444	if (found)
1445		fs_info(sdp, "found %u quota changes\n", found);
1446
1447	return 0;
1448
1449fail:
1450	gfs2_quota_cleanup(sdp);
1451	return error;
1452}
1453
1454void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1455{
1456	struct list_head *head = &sdp->sd_quota_list;
1457	struct gfs2_quota_data *qd;
 
 
1458
1459	spin_lock(&qd_lock);
1460	while (!list_empty(head)) {
1461		qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
1462
1463		list_del(&qd->qd_list);
1464
1465		/* Also remove if this qd exists in the reclaim list */
1466		list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
1467		atomic_dec(&sdp->sd_quota_count);
1468		spin_unlock(&qd_lock);
1469
1470		spin_lock_bucket(qd->qd_hash);
1471		hlist_bl_del_rcu(&qd->qd_hlist);
1472		spin_unlock_bucket(qd->qd_hash);
1473
1474		gfs2_assert_warn(sdp, !qd->qd_change);
1475		gfs2_assert_warn(sdp, !qd->qd_slot_count);
1476		gfs2_assert_warn(sdp, !qd->qd_bh_count);
1477
1478		gfs2_glock_put(qd->qd_gl);
1479		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 
 
 
 
 
 
 
1480
1481		spin_lock(&qd_lock);
 
1482	}
1483	spin_unlock(&qd_lock);
1484
1485	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
 
 
 
 
 
 
 
1486
1487	kvfree(sdp->sd_quota_bitmap);
1488	sdp->sd_quota_bitmap = NULL;
1489}
1490
1491static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1492{
1493	if (error == 0 || error == -EROFS)
1494		return;
1495	if (!gfs2_withdrawn(sdp)) {
1496		if (!cmpxchg(&sdp->sd_log_error, 0, error))
1497			fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1498		wake_up(&sdp->sd_logd_waitq);
1499	}
1500}
1501
1502static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1503			       int (*fxn)(struct super_block *sb, int type),
1504			       unsigned long t, unsigned long *timeo,
1505			       unsigned int *new_timeo)
1506{
1507	if (t >= *timeo) {
1508		int error = fxn(sdp->sd_vfs, 0);
1509		quotad_error(sdp, msg, error);
1510		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1511	} else {
1512		*timeo -= t;
1513	}
1514}
1515
1516void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1517	if (!sdp->sd_statfs_force_sync) {
1518		sdp->sd_statfs_force_sync = 1;
1519		wake_up(&sdp->sd_quota_wait);
1520	}
1521}
1522
1523
1524/**
1525 * gfs2_quotad - Write cached quota changes into the quota file
1526 * @data: Pointer to GFS2 superblock
1527 *
1528 */
1529
1530int gfs2_quotad(void *data)
1531{
1532	struct gfs2_sbd *sdp = data;
1533	struct gfs2_tune *tune = &sdp->sd_tune;
1534	unsigned long statfs_timeo = 0;
1535	unsigned long quotad_timeo = 0;
1536	unsigned long t = 0;
1537	DEFINE_WAIT(wait);
1538
 
1539	while (!kthread_should_stop()) {
 
 
1540
1541		if (gfs2_withdrawn(sdp))
1542			goto bypass;
1543		/* Update the master statfs file */
1544		if (sdp->sd_statfs_force_sync) {
1545			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1546			quotad_error(sdp, "statfs", error);
1547			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1548		}
1549		else
1550			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1551				   	   &statfs_timeo,
1552					   &tune->gt_statfs_quantum);
1553
1554		/* Update quota file */
1555		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1556				   &quotad_timeo, &tune->gt_quota_quantum);
1557
1558		try_to_freeze();
1559
1560bypass:
1561		t = min(quotad_timeo, statfs_timeo);
1562
1563		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1564		if (!sdp->sd_statfs_force_sync)
1565			t -= schedule_timeout(t);
1566		else
 
 
 
1567			t = 0;
1568		finish_wait(&sdp->sd_quota_wait, &wait);
1569	}
1570
1571	return 0;
1572}
1573
1574static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1575{
1576	struct gfs2_sbd *sdp = sb->s_fs_info;
1577
1578	memset(state, 0, sizeof(*state));
1579
1580	switch (sdp->sd_args.ar_quota) {
 
 
1581	case GFS2_QUOTA_ON:
1582		state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1583		state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1584		fallthrough;
1585	case GFS2_QUOTA_ACCOUNT:
1586		state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1587						  QCI_SYSFILE;
1588		state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1589						  QCI_SYSFILE;
1590		break;
1591	case GFS2_QUOTA_OFF:
1592		break;
1593	}
1594	if (sdp->sd_quota_inode) {
1595		state->s_state[USRQUOTA].ino =
1596					GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1597		state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1598	}
1599	state->s_state[USRQUOTA].nextents = 1;	/* unsupported */
1600	state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1601	state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1602	return 0;
1603}
1604
1605static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1606			  struct qc_dqblk *fdq)
1607{
1608	struct gfs2_sbd *sdp = sb->s_fs_info;
1609	struct gfs2_quota_lvb *qlvb;
1610	struct gfs2_quota_data *qd;
1611	struct gfs2_holder q_gh;
1612	int error;
1613
1614	memset(fdq, 0, sizeof(*fdq));
1615
1616	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1617		return -ESRCH; /* Crazy XFS error code */
1618
1619	if ((qid.type != USRQUOTA) &&
1620	    (qid.type != GRPQUOTA))
1621		return -EINVAL;
1622
1623	error = qd_get(sdp, qid, &qd);
1624	if (error)
1625		return error;
1626	error = do_glock(qd, FORCE, &q_gh);
1627	if (error)
1628		goto out;
1629
1630	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1631	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1632	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1633	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
1634
1635	gfs2_glock_dq_uninit(&q_gh);
1636out:
1637	qd_put(qd);
1638	return error;
1639}
1640
1641/* GFS2 only supports a subset of the XFS fields */
1642#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1643
1644static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1645			  struct qc_dqblk *fdq)
1646{
1647	struct gfs2_sbd *sdp = sb->s_fs_info;
1648	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1649	struct gfs2_quota_data *qd;
1650	struct gfs2_holder q_gh, i_gh;
1651	unsigned int data_blocks, ind_blocks;
1652	unsigned int blocks = 0;
1653	int alloc_required;
1654	loff_t offset;
1655	int error;
1656
1657	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1658		return -ESRCH; /* Crazy XFS error code */
1659
1660	if ((qid.type != USRQUOTA) &&
1661	    (qid.type != GRPQUOTA))
1662		return -EINVAL;
1663
1664	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1665		return -EINVAL;
1666
1667	error = qd_get(sdp, qid, &qd);
1668	if (error)
1669		return error;
1670
1671	error = gfs2_qa_get(ip);
1672	if (error)
1673		goto out_put;
1674
1675	inode_lock(&ip->i_inode);
1676	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1677	if (error)
1678		goto out_unlockput;
1679	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1680	if (error)
1681		goto out_q;
1682
1683	/* Check for existing entry, if none then alloc new blocks */
1684	error = update_qd(sdp, qd);
1685	if (error)
1686		goto out_i;
1687
1688	/* If nothing has changed, this is a no-op */
1689	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1690	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1691		fdq->d_fieldmask ^= QC_SPC_SOFT;
1692
1693	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1694	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1695		fdq->d_fieldmask ^= QC_SPC_HARD;
1696
1697	if ((fdq->d_fieldmask & QC_SPACE) &&
1698	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1699		fdq->d_fieldmask ^= QC_SPACE;
1700
1701	if (fdq->d_fieldmask == 0)
1702		goto out_i;
1703
1704	offset = qd2offset(qd);
1705	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1706	if (gfs2_is_stuffed(ip))
1707		alloc_required = 1;
1708	if (alloc_required) {
1709		struct gfs2_alloc_parms ap = { .aflags = 0, };
1710		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1711				       &data_blocks, &ind_blocks);
1712		blocks = 1 + data_blocks + ind_blocks;
1713		ap.target = blocks;
1714		error = gfs2_inplace_reserve(ip, &ap);
1715		if (error)
1716			goto out_i;
1717		blocks += gfs2_rg_blocks(ip, blocks);
1718	}
1719
1720	/* Some quotas span block boundaries and can update two blocks,
1721	   adding an extra block to the transaction to handle such quotas */
1722	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1723	if (error)
1724		goto out_release;
1725
1726	/* Apply changes */
1727	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1728	if (!error)
1729		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1730
1731	gfs2_trans_end(sdp);
1732out_release:
1733	if (alloc_required)
1734		gfs2_inplace_release(ip);
1735out_i:
1736	gfs2_glock_dq_uninit(&i_gh);
1737out_q:
1738	gfs2_glock_dq_uninit(&q_gh);
1739out_unlockput:
1740	gfs2_qa_put(ip);
1741	inode_unlock(&ip->i_inode);
1742out_put:
1743	qd_put(qd);
1744	return error;
1745}
1746
1747const struct quotactl_ops gfs2_quotactl_ops = {
1748	.quota_sync     = gfs2_quota_sync,
1749	.get_state	= gfs2_quota_get_state,
1750	.get_dqblk	= gfs2_get_dqblk,
1751	.set_dqblk	= gfs2_set_dqblk,
1752};
1753
1754void __init gfs2_quota_hash_init(void)
1755{
1756	unsigned i;
1757
1758	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1759		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1760}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   5 */
   6
   7/*
   8 * Quota change tags are associated with each transaction that allocates or
   9 * deallocates space.  Those changes are accumulated locally to each node (in a
  10 * per-node file) and then are periodically synced to the quota file.  This
  11 * avoids the bottleneck of constantly touching the quota file, but introduces
  12 * fuzziness in the current usage value of IDs that are being used on different
  13 * nodes in the cluster simultaneously.  So, it is possible for a user on
  14 * multiple nodes to overrun their quota, but that overrun is controlable.
  15 * Since quota tags are part of transactions, there is no need for a quota check
  16 * program to be run on node crashes or anything like that.
  17 *
  18 * There are couple of knobs that let the administrator manage the quota
  19 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  20 * sitting on one node before being synced to the quota file.  (The default is
  21 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  22 * of quota file syncs increases as the user moves closer to their limit.  The
  23 * more frequent the syncs, the more accurate the quota enforcement, but that
  24 * means that there is more contention between the nodes for the quota file.
  25 * The default value is one.  This sets the maximum theoretical quota overrun
  26 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  27 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  28 * number greater than one makes quota syncs more frequent and reduces the
  29 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  30 * syncs less frequent.
  31 *
  32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  33 * the quota file, so it is not being constantly read.
  34 */
  35
  36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  37
  38#include <linux/sched.h>
  39#include <linux/slab.h>
  40#include <linux/mm.h>
  41#include <linux/spinlock.h>
  42#include <linux/completion.h>
  43#include <linux/buffer_head.h>
  44#include <linux/sort.h>
  45#include <linux/fs.h>
  46#include <linux/bio.h>
  47#include <linux/gfs2_ondisk.h>
  48#include <linux/kthread.h>
  49#include <linux/freezer.h>
  50#include <linux/quota.h>
  51#include <linux/dqblk_xfs.h>
  52#include <linux/lockref.h>
  53#include <linux/list_lru.h>
  54#include <linux/rcupdate.h>
  55#include <linux/rculist_bl.h>
  56#include <linux/bit_spinlock.h>
  57#include <linux/jhash.h>
  58#include <linux/vmalloc.h>
  59
  60#include "gfs2.h"
  61#include "incore.h"
  62#include "bmap.h"
  63#include "glock.h"
  64#include "glops.h"
  65#include "log.h"
  66#include "meta_io.h"
  67#include "quota.h"
  68#include "rgrp.h"
  69#include "super.h"
  70#include "trans.h"
  71#include "inode.h"
  72#include "util.h"
  73
  74#define GFS2_QD_HASH_SHIFT      12
  75#define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
  76#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
  77
  78#define QC_CHANGE 0
  79#define QC_SYNC 1
  80
  81/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  82/*                     -> sd_bitmap_lock                              */
  83static DEFINE_SPINLOCK(qd_lock);
  84struct list_lru gfs2_qd_lru;
  85
  86static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  87
  88static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  89				 const struct kqid qid)
  90{
  91	unsigned int h;
  92
  93	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  94	h = jhash(&qid, sizeof(struct kqid), h);
  95
  96	return h & GFS2_QD_HASH_MASK;
  97}
  98
  99static inline void spin_lock_bucket(unsigned int hash)
 100{
 101        hlist_bl_lock(&qd_hash_table[hash]);
 102}
 103
 104static inline void spin_unlock_bucket(unsigned int hash)
 105{
 106        hlist_bl_unlock(&qd_hash_table[hash]);
 107}
 108
 109static void gfs2_qd_dealloc(struct rcu_head *rcu)
 110{
 111	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
 112	struct gfs2_sbd *sdp = qd->qd_sbd;
 113
 114	kmem_cache_free(gfs2_quotad_cachep, qd);
 115	if (atomic_dec_and_test(&sdp->sd_quota_count))
 116		wake_up(&sdp->sd_kill_wait);
 117}
 118
 119static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
 120{
 121	struct gfs2_sbd *sdp = qd->qd_sbd;
 
 
 
 
 
 
 
 122
 123	spin_lock(&qd_lock);
 124	list_del(&qd->qd_list);
 125	spin_unlock(&qd_lock);
 
 126
 127	spin_lock_bucket(qd->qd_hash);
 128	hlist_bl_del_rcu(&qd->qd_hlist);
 129	spin_unlock_bucket(qd->qd_hash);
 130
 131	if (!gfs2_withdrawing_or_withdrawn(sdp)) {
 132		gfs2_assert_warn(sdp, !qd->qd_change);
 133		gfs2_assert_warn(sdp, !qd->qd_slot_ref);
 134		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 135	}
 136
 137	gfs2_glock_put(qd->qd_gl);
 138	call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 139}
 140
 141static void gfs2_qd_list_dispose(struct list_head *list)
 142{
 143	struct gfs2_quota_data *qd;
 144
 145	while (!list_empty(list)) {
 146		qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
 147		list_del(&qd->qd_lru);
 148
 149		gfs2_qd_dispose(qd);
 
 150	}
 151}
 152
 153
 154static enum lru_status gfs2_qd_isolate(struct list_head *item,
 155		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 156{
 157	struct list_head *dispose = arg;
 158	struct gfs2_quota_data *qd =
 159		list_entry(item, struct gfs2_quota_data, qd_lru);
 160	enum lru_status status;
 161
 162	if (!spin_trylock(&qd->qd_lockref.lock))
 163		return LRU_SKIP;
 164
 165	status = LRU_SKIP;
 166	if (qd->qd_lockref.count == 0) {
 167		lockref_mark_dead(&qd->qd_lockref);
 168		list_lru_isolate_move(lru, &qd->qd_lru, dispose);
 169		status = LRU_REMOVED;
 170	}
 171
 172	spin_unlock(&qd->qd_lockref.lock);
 173	return status;
 174}
 175
 176static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
 177					 struct shrink_control *sc)
 178{
 179	LIST_HEAD(dispose);
 180	unsigned long freed;
 181
 182	if (!(sc->gfp_mask & __GFP_FS))
 183		return SHRINK_STOP;
 184
 185	freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
 186				     gfs2_qd_isolate, &dispose);
 187
 188	gfs2_qd_list_dispose(&dispose);
 189
 190	return freed;
 191}
 192
 193static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 194					  struct shrink_control *sc)
 195{
 196	return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
 197}
 198
 199static struct shrinker *gfs2_qd_shrinker;
 200
 201int __init gfs2_qd_shrinker_init(void)
 202{
 203	gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd");
 204	if (!gfs2_qd_shrinker)
 205		return -ENOMEM;
 206
 207	gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count;
 208	gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan;
 209
 210	shrinker_register(gfs2_qd_shrinker);
 211
 212	return 0;
 213}
 214
 215void gfs2_qd_shrinker_exit(void)
 216{
 217	shrinker_free(gfs2_qd_shrinker);
 218}
 219
 220static u64 qd2index(struct gfs2_quota_data *qd)
 221{
 222	struct kqid qid = qd->qd_id;
 223	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 224		((qid.type == USRQUOTA) ? 0 : 1);
 225}
 226
 227static u64 qd2offset(struct gfs2_quota_data *qd)
 228{
 229	return qd2index(qd) * sizeof(struct gfs2_quota);
 
 
 
 
 
 230}
 231
 232static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
 233{
 234	struct gfs2_quota_data *qd;
 235	int error;
 236
 237	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 238	if (!qd)
 239		return NULL;
 240
 241	qd->qd_sbd = sdp;
 242	qd->qd_lockref.count = 0;
 243	spin_lock_init(&qd->qd_lockref.lock);
 244	qd->qd_id = qid;
 245	qd->qd_slot = -1;
 246	INIT_LIST_HEAD(&qd->qd_lru);
 247	qd->qd_hash = hash;
 248
 249	error = gfs2_glock_get(sdp, qd2index(qd),
 250			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 251	if (error)
 252		goto fail;
 253
 254	return qd;
 255
 256fail:
 257	kmem_cache_free(gfs2_quotad_cachep, qd);
 258	return NULL;
 259}
 260
 261static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
 262						     const struct gfs2_sbd *sdp,
 263						     struct kqid qid)
 264{
 265	struct gfs2_quota_data *qd;
 266	struct hlist_bl_node *h;
 267
 268	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
 269		if (!qid_eq(qd->qd_id, qid))
 270			continue;
 271		if (qd->qd_sbd != sdp)
 272			continue;
 273		if (lockref_get_not_dead(&qd->qd_lockref)) {
 274			list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
 275			return qd;
 276		}
 277	}
 278
 279	return NULL;
 280}
 281
 282
 283static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 284		  struct gfs2_quota_data **qdp)
 285{
 286	struct gfs2_quota_data *qd, *new_qd;
 287	unsigned int hash = gfs2_qd_hash(sdp, qid);
 288
 289	rcu_read_lock();
 290	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 291	rcu_read_unlock();
 292
 293	if (qd)
 294		return 0;
 295
 296	new_qd = qd_alloc(hash, sdp, qid);
 297	if (!new_qd)
 298		return -ENOMEM;
 299
 300	spin_lock(&qd_lock);
 301	spin_lock_bucket(hash);
 302	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 303	if (qd == NULL) {
 304		new_qd->qd_lockref.count++;
 305		*qdp = new_qd;
 306		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
 307		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
 308		atomic_inc(&sdp->sd_quota_count);
 309	}
 310	spin_unlock_bucket(hash);
 311	spin_unlock(&qd_lock);
 312
 313	if (qd) {
 314		gfs2_glock_put(new_qd->qd_gl);
 315		kmem_cache_free(gfs2_quotad_cachep, new_qd);
 316	}
 317
 318	return 0;
 319}
 320
 321
 322static void qd_hold(struct gfs2_quota_data *qd)
 323{
 324	struct gfs2_sbd *sdp = qd->qd_sbd;
 325	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
 326	lockref_get(&qd->qd_lockref);
 327}
 328
 329static void qd_put(struct gfs2_quota_data *qd)
 330{
 331	struct gfs2_sbd *sdp;
 332
 333	if (lockref_put_or_lock(&qd->qd_lockref))
 334		return;
 335
 336	BUG_ON(__lockref_is_dead(&qd->qd_lockref));
 337	sdp = qd->qd_sbd;
 338	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
 339		lockref_mark_dead(&qd->qd_lockref);
 340		spin_unlock(&qd->qd_lockref.lock);
 341
 342		gfs2_qd_dispose(qd);
 343		return;
 344	}
 345
 346	qd->qd_lockref.count = 0;
 347	list_lru_add_obj(&gfs2_qd_lru, &qd->qd_lru);
 348	spin_unlock(&qd->qd_lockref.lock);
 
 349}
 350
 351static int slot_get(struct gfs2_quota_data *qd)
 352{
 353	struct gfs2_sbd *sdp = qd->qd_sbd;
 354	unsigned int bit;
 355	int error = 0;
 356
 357	spin_lock(&sdp->sd_bitmap_lock);
 358	if (qd->qd_slot_ref == 0) {
 359		bit = find_first_zero_bit(sdp->sd_quota_bitmap,
 360					  sdp->sd_quota_slots);
 361		if (bit >= sdp->sd_quota_slots) {
 362			error = -ENOSPC;
 363			goto out;
 364		}
 365		set_bit(bit, sdp->sd_quota_bitmap);
 366		qd->qd_slot = bit;
 
 
 
 367	}
 368	qd->qd_slot_ref++;
 369out:
 370	spin_unlock(&sdp->sd_bitmap_lock);
 
 371	return error;
 372}
 373
 374static void slot_hold(struct gfs2_quota_data *qd)
 375{
 376	struct gfs2_sbd *sdp = qd->qd_sbd;
 377
 378	spin_lock(&sdp->sd_bitmap_lock);
 379	gfs2_assert(sdp, qd->qd_slot_ref);
 380	qd->qd_slot_ref++;
 381	spin_unlock(&sdp->sd_bitmap_lock);
 382}
 383
 384static void slot_put(struct gfs2_quota_data *qd)
 385{
 386	struct gfs2_sbd *sdp = qd->qd_sbd;
 387
 388	spin_lock(&sdp->sd_bitmap_lock);
 389	gfs2_assert(sdp, qd->qd_slot_ref);
 390	if (!--qd->qd_slot_ref) {
 391		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
 392		qd->qd_slot = -1;
 393	}
 394	spin_unlock(&sdp->sd_bitmap_lock);
 395}
 396
 397static int bh_get(struct gfs2_quota_data *qd)
 398{
 399	struct gfs2_sbd *sdp = qd->qd_sbd;
 400	struct inode *inode = sdp->sd_qc_inode;
 401	struct gfs2_inode *ip = GFS2_I(inode);
 402	unsigned int block, offset;
 403	struct buffer_head *bh;
 404	struct iomap iomap = { };
 405	int error;
 406
 407	mutex_lock(&sdp->sd_quota_mutex);
 408
 409	if (qd->qd_bh_count++) {
 410		mutex_unlock(&sdp->sd_quota_mutex);
 411		return 0;
 412	}
 413
 414	block = qd->qd_slot / sdp->sd_qc_per_block;
 415	offset = qd->qd_slot % sdp->sd_qc_per_block;
 416
 417	error = gfs2_iomap_get(inode,
 418			       (loff_t)block << inode->i_blkbits,
 419			       i_blocksize(inode), &iomap);
 420	if (error)
 421		goto fail;
 422	error = -ENOENT;
 423	if (iomap.type != IOMAP_MAPPED)
 424		goto fail;
 425
 426	error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
 427			       DIO_WAIT, 0, &bh);
 428	if (error)
 429		goto fail;
 430	error = -EIO;
 431	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 432		goto fail_brelse;
 433
 434	qd->qd_bh = bh;
 435	qd->qd_bh_qc = (struct gfs2_quota_change *)
 436		(bh->b_data + sizeof(struct gfs2_meta_header) +
 437		 offset * sizeof(struct gfs2_quota_change));
 438
 439	mutex_unlock(&sdp->sd_quota_mutex);
 440
 441	return 0;
 442
 443fail_brelse:
 444	brelse(bh);
 445fail:
 446	qd->qd_bh_count--;
 447	mutex_unlock(&sdp->sd_quota_mutex);
 448	return error;
 449}
 450
 451static void bh_put(struct gfs2_quota_data *qd)
 452{
 453	struct gfs2_sbd *sdp = qd->qd_sbd;
 454
 455	mutex_lock(&sdp->sd_quota_mutex);
 456	gfs2_assert(sdp, qd->qd_bh_count);
 457	if (!--qd->qd_bh_count) {
 458		brelse(qd->qd_bh);
 459		qd->qd_bh = NULL;
 460		qd->qd_bh_qc = NULL;
 461	}
 462	mutex_unlock(&sdp->sd_quota_mutex);
 463}
 464
 465static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 466			 u64 *sync_gen)
 467{
 468	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 469	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 470	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
 471		return 0;
 472
 473	/*
 474	 * If qd_change is 0 it means a pending quota change was negated.
 475	 * We should not sync it, but we still have a qd reference and slot
 476	 * reference taken by gfs2_quota_change -> do_qc that need to be put.
 477	 */
 478	if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
 479		slot_put(qd);
 480		qd_put(qd);
 481		return 0;
 482	}
 483
 484	if (!lockref_get_not_dead(&qd->qd_lockref))
 485		return 0;
 486
 487	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 488	set_bit(QDF_LOCKED, &qd->qd_flags);
 489	qd->qd_change_sync = qd->qd_change;
 490	slot_hold(qd);
 491	return 1;
 492}
 493
 494static int qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 495{
 496	int error;
 497
 498	error = bh_get(qd);
 499	if (!error)
 500		return 0;
 501
 502	clear_bit(QDF_LOCKED, &qd->qd_flags);
 503	slot_put(qd);
 504	qd_put(qd);
 505	return error;
 506}
 507
 508static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 509{
 510	struct gfs2_quota_data *qd = NULL, *iter;
 511	int error;
 512
 513	*qdp = NULL;
 514
 515	if (sb_rdonly(sdp->sd_vfs))
 516		return 0;
 517
 518	spin_lock(&qd_lock);
 519
 520	list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
 521		if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
 522			qd = iter;
 523			break;
 524		}
 525	}
 526
 527	spin_unlock(&qd_lock);
 528
 529	if (qd) {
 530		error = qd_bh_get_or_undo(sdp, qd);
 531		if (error)
 
 
 
 
 532			return error;
 533		*qdp = qd;
 534	}
 535
 
 
 536	return 0;
 537}
 538
 539static void qdsb_put(struct gfs2_quota_data *qd)
 540{
 
 
 
 541	bh_put(qd);
 542	slot_put(qd);
 543	qd_put(qd);
 544}
 545
 546static void qd_unlock(struct gfs2_quota_data *qd)
 547{
 548	gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
 549	clear_bit(QDF_LOCKED, &qd->qd_flags);
 550	qdsb_put(qd);
 551}
 552
 553static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 554		    struct gfs2_quota_data **qdp)
 555{
 556	int error;
 557
 558	error = qd_get(sdp, qid, qdp);
 559	if (error)
 560		return error;
 561
 562	error = slot_get(*qdp);
 563	if (error)
 564		goto fail;
 565
 566	error = bh_get(*qdp);
 567	if (error)
 568		goto fail_slot;
 569
 570	return 0;
 571
 572fail_slot:
 573	slot_put(*qdp);
 574fail:
 575	qd_put(*qdp);
 576	return error;
 577}
 578
 
 
 
 
 
 
 
 579/**
 580 * gfs2_qa_get - make sure we have a quota allocations data structure,
 581 *               if necessary
 582 * @ip: the inode for this reservation
 583 */
 584int gfs2_qa_get(struct gfs2_inode *ip)
 585{
 586	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 587	struct inode *inode = &ip->i_inode;
 588
 589	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 590		return 0;
 591
 592	spin_lock(&inode->i_lock);
 593	if (ip->i_qadata == NULL) {
 594		struct gfs2_qadata *tmp;
 595
 596		spin_unlock(&inode->i_lock);
 597		tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
 598		if (!tmp)
 599			return -ENOMEM;
 600
 601		spin_lock(&inode->i_lock);
 602		if (ip->i_qadata == NULL)
 603			ip->i_qadata = tmp;
 604		else
 605			kmem_cache_free(gfs2_qadata_cachep, tmp);
 606	}
 607	ip->i_qadata->qa_ref++;
 608	spin_unlock(&inode->i_lock);
 609	return 0;
 610}
 611
 612void gfs2_qa_put(struct gfs2_inode *ip)
 613{
 614	struct inode *inode = &ip->i_inode;
 615
 616	spin_lock(&inode->i_lock);
 617	if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
 618		kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
 619		ip->i_qadata = NULL;
 620	}
 621	spin_unlock(&inode->i_lock);
 622}
 623
 624int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 625{
 626	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 627	struct gfs2_quota_data **qd;
 628	int error;
 629
 630	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 631		return 0;
 632
 633	error = gfs2_qa_get(ip);
 634	if (error)
 635		return error;
 636
 637	qd = ip->i_qadata->qa_qd;
 638
 639	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
 640	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
 641		error = -EIO;
 642		gfs2_qa_put(ip);
 643		goto out;
 644	}
 645
 646	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 647	if (error)
 648		goto out_unhold;
 649	ip->i_qadata->qa_qd_num++;
 650	qd++;
 651
 652	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 653	if (error)
 654		goto out_unhold;
 655	ip->i_qadata->qa_qd_num++;
 656	qd++;
 657
 658	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 659	    !uid_eq(uid, ip->i_inode.i_uid)) {
 660		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 661		if (error)
 662			goto out_unhold;
 663		ip->i_qadata->qa_qd_num++;
 664		qd++;
 665	}
 666
 667	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 668	    !gid_eq(gid, ip->i_inode.i_gid)) {
 669		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 670		if (error)
 671			goto out_unhold;
 672		ip->i_qadata->qa_qd_num++;
 673		qd++;
 674	}
 675
 676out_unhold:
 677	if (error)
 678		gfs2_quota_unhold(ip);
 679out:
 680	return error;
 681}
 682
 683void gfs2_quota_unhold(struct gfs2_inode *ip)
 684{
 685	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 686	u32 x;
 687
 688	if (ip->i_qadata == NULL)
 689		return;
 690
 691	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 692
 693	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
 694		qdsb_put(ip->i_qadata->qa_qd[x]);
 695		ip->i_qadata->qa_qd[x] = NULL;
 696	}
 697	ip->i_qadata->qa_qd_num = 0;
 698	gfs2_qa_put(ip);
 699}
 700
 701static int sort_qd(const void *a, const void *b)
 702{
 703	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 704	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 705
 706	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 707		return -1;
 708	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 709		return 1;
 710	return 0;
 711}
 712
 713static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
 714{
 715	struct gfs2_sbd *sdp = qd->qd_sbd;
 716	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 717	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 718	s64 x;
 719
 720	mutex_lock(&sdp->sd_quota_mutex);
 721	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 722
 723	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 724		qc->qc_change = 0;
 725		qc->qc_flags = 0;
 726		if (qd->qd_id.type == USRQUOTA)
 727			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 728		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 729	}
 730
 731	x = be64_to_cpu(qc->qc_change) + change;
 732	qc->qc_change = cpu_to_be64(x);
 733
 734	spin_lock(&qd_lock);
 735	qd->qd_change = x;
 736	spin_unlock(&qd_lock);
 737
 738	if (qc_type == QC_CHANGE) {
 739		if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 740			qd_hold(qd);
 741			slot_hold(qd);
 742		}
 743	} else {
 744		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 745		clear_bit(QDF_CHANGE, &qd->qd_flags);
 746		qc->qc_flags = 0;
 747		qc->qc_id = 0;
 748		slot_put(qd);
 749		qd_put(qd);
 
 
 
 750	}
 751
 752	if (change < 0) /* Reset quiet flag if we freed some blocks */
 753		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
 754	mutex_unlock(&sdp->sd_quota_mutex);
 755}
 756
 757static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
 758				  unsigned off, void *buf, unsigned bytes)
 759{
 760	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 761	struct inode *inode = &ip->i_inode;
 
 762	struct address_space *mapping = inode->i_mapping;
 763	struct folio *folio;
 764	struct buffer_head *bh;
 
 765	u64 blk;
 766	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
 767	unsigned to_write = bytes, pg_off = off;
 
 768
 769	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
 770	boff = off % bsize;
 771
 772	folio = filemap_grab_folio(mapping, index);
 773	if (IS_ERR(folio))
 774		return PTR_ERR(folio);
 775	bh = folio_buffers(folio);
 776	if (!bh)
 777		bh = create_empty_buffers(folio, bsize, 0);
 778
 779	for (;;) {
 780		/* Find the beginning block within the folio */
 
 781		if (pg_off >= ((bnum * bsize) + bsize)) {
 782			bh = bh->b_this_page;
 783			bnum++;
 784			blk++;
 785			continue;
 786		}
 787		if (!buffer_mapped(bh)) {
 788			gfs2_block_map(inode, blk, bh, 1);
 789			if (!buffer_mapped(bh))
 790				goto unlock_out;
 791			/* If it's a newly allocated disk block, zero it */
 792			if (buffer_new(bh))
 793				folio_zero_range(folio, bnum * bsize,
 794						bh->b_size);
 795		}
 796		if (folio_test_uptodate(folio))
 797			set_buffer_uptodate(bh);
 798		if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
 799			goto unlock_out;
 800		gfs2_trans_add_data(ip->i_gl, bh);
 
 
 
 801
 802		/* If we need to write to the next block as well */
 803		if (to_write > (bsize - boff)) {
 804			pg_off += (bsize - boff);
 805			to_write -= (bsize - boff);
 806			boff = pg_off % bsize;
 807			continue;
 808		}
 809		break;
 810	}
 811
 812	/* Write to the folio, now that we have setup the buffer(s) */
 813	memcpy_to_folio(folio, off, buf, bytes);
 814	flush_dcache_folio(folio);
 815	folio_unlock(folio);
 816	folio_put(folio);
 
 
 817
 818	return 0;
 819
 820unlock_out:
 821	folio_unlock(folio);
 822	folio_put(folio);
 823	return -EIO;
 824}
 825
 826static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp,
 827				 loff_t loc)
 828{
 829	unsigned long pg_beg;
 830	unsigned pg_off, nbytes, overflow = 0;
 831	int error;
 832	void *ptr;
 833
 834	nbytes = sizeof(struct gfs2_quota);
 835
 836	pg_beg = loc >> PAGE_SHIFT;
 837	pg_off = offset_in_page(loc);
 838
 839	/* If the quota straddles a page boundary, split the write in two */
 840	if ((pg_off + nbytes) > PAGE_SIZE)
 
 841		overflow = (pg_off + nbytes) - PAGE_SIZE;
 
 842
 843	ptr = qp;
 844	error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr,
 845				       nbytes - overflow);
 846	/* If there's an overflow, write the remaining bytes to the next page */
 847	if (!error && overflow)
 848		error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0,
 849					       ptr + nbytes - overflow,
 850					       overflow);
 851	return error;
 852}
 853
 854/**
 855 * gfs2_adjust_quota - adjust record of current block usage
 856 * @sdp: The superblock
 857 * @loc: Offset of the entry in the quota file
 858 * @change: The amount of usage change to record
 859 * @qd: The quota data
 860 * @fdq: The updated limits to record
 861 *
 862 * This function was mostly borrowed from gfs2_block_truncate_page which was
 863 * in turn mostly borrowed from ext3
 864 *
 865 * Returns: 0 or -ve on error
 866 */
 867
 868static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
 869			     s64 change, struct gfs2_quota_data *qd,
 870			     struct qc_dqblk *fdq)
 871{
 872	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 873	struct inode *inode = &ip->i_inode;
 
 874	struct gfs2_quota q;
 875	int err;
 876	u64 size;
 877
 878	if (gfs2_is_stuffed(ip)) {
 879		err = gfs2_unstuff_dinode(ip);
 880		if (err)
 881			return err;
 882	}
 883
 884	memset(&q, 0, sizeof(struct gfs2_quota));
 885	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 886	if (err < 0)
 887		return err;
 888
 889	loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
 
 890	be64_add_cpu(&q.qu_value, change);
 891	if (((s64)be64_to_cpu(q.qu_value)) < 0)
 892		q.qu_value = 0; /* Never go negative on quota usage */
 893	qd->qd_qb.qb_value = q.qu_value;
 894	if (fdq) {
 895		if (fdq->d_fieldmask & QC_SPC_SOFT) {
 896			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
 897			qd->qd_qb.qb_warn = q.qu_warn;
 898		}
 899		if (fdq->d_fieldmask & QC_SPC_HARD) {
 900			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
 901			qd->qd_qb.qb_limit = q.qu_limit;
 902		}
 903		if (fdq->d_fieldmask & QC_SPACE) {
 904			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
 905			qd->qd_qb.qb_value = q.qu_value;
 906		}
 907	}
 908
 909	err = gfs2_write_disk_quota(sdp, &q, loc);
 910	if (!err) {
 911		size = loc + sizeof(struct gfs2_quota);
 912		if (size > inode->i_size)
 913			i_size_write(inode, size);
 914		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 915		mark_inode_dirty(inode);
 916		set_bit(QDF_REFRESH, &qd->qd_flags);
 917	}
 918
 919	return err;
 920}
 921
 922static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 923{
 924	struct gfs2_sbd *sdp = (*qda)->qd_sbd;
 925	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 926	struct gfs2_alloc_parms ap = {};
 927	unsigned int data_blocks, ind_blocks;
 928	struct gfs2_holder *ghs, i_gh;
 929	unsigned int qx, x;
 930	struct gfs2_quota_data *qd;
 931	unsigned reserved;
 932	loff_t offset;
 933	unsigned int nalloc = 0, blocks;
 934	int error;
 935
 
 
 
 
 936	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 937			      &data_blocks, &ind_blocks);
 938
 939	ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 940	if (!ghs)
 941		return -ENOMEM;
 
 
 942
 943	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 944	inode_lock(&ip->i_inode);
 945	for (qx = 0; qx < num_qd; qx++) {
 946		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 947					   GL_NOCACHE, &ghs[qx]);
 948		if (error)
 949			goto out_dq;
 950	}
 951
 952	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 953	if (error)
 954		goto out_dq;
 955
 956	for (x = 0; x < num_qd; x++) {
 957		offset = qd2offset(qda[x]);
 958		if (gfs2_write_alloc_required(ip, offset,
 959					      sizeof(struct gfs2_quota)))
 960			nalloc++;
 961	}
 962
 963	/* 
 964	 * 1 blk for unstuffing inode if stuffed. We add this extra
 965	 * block to the reservation unconditionally. If the inode
 966	 * doesn't need unstuffing, the block will be released to the 
 967	 * rgrp since it won't be allocated during the transaction
 968	 */
 969	/* +3 in the end for unstuffing block, inode size update block
 970	 * and another block in case quota straddles page boundary and 
 971	 * two blocks need to be updated instead of 1 */
 972	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 973
 974	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 975	ap.target = reserved;
 976	error = gfs2_inplace_reserve(ip, &ap);
 977	if (error)
 978		goto out_alloc;
 979
 980	if (nalloc)
 981		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 982
 983	error = gfs2_trans_begin(sdp, blocks, 0);
 984	if (error)
 985		goto out_ipres;
 986
 987	for (x = 0; x < num_qd; x++) {
 988		qd = qda[x];
 989		offset = qd2offset(qd);
 990		error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd,
 991							NULL);
 992		if (error)
 993			goto out_end_trans;
 994
 995		do_qc(qd, -qd->qd_change_sync, QC_SYNC);
 996		set_bit(QDF_REFRESH, &qd->qd_flags);
 997	}
 998
 
 
 999out_end_trans:
1000	gfs2_trans_end(sdp);
1001out_ipres:
1002	gfs2_inplace_release(ip);
1003out_alloc:
1004	gfs2_glock_dq_uninit(&i_gh);
1005out_dq:
1006	while (qx--)
1007		gfs2_glock_dq_uninit(&ghs[qx]);
1008	inode_unlock(&ip->i_inode);
1009	kfree(ghs);
1010	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
1011		       GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
1012	if (!error) {
1013		for (x = 0; x < num_qd; x++)
1014			qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen;
1015	}
1016	return error;
1017}
1018
1019static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
1020{
1021	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1022	struct gfs2_quota q;
1023	struct gfs2_quota_lvb *qlvb;
1024	loff_t pos;
1025	int error;
1026
1027	memset(&q, 0, sizeof(struct gfs2_quota));
1028	pos = qd2offset(qd);
1029	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
1030	if (error < 0)
1031		return error;
1032
1033	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1034	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
1035	qlvb->__pad = 0;
1036	qlvb->qb_limit = q.qu_limit;
1037	qlvb->qb_warn = q.qu_warn;
1038	qlvb->qb_value = q.qu_value;
1039	qd->qd_qb = *qlvb;
1040
1041	return 0;
1042}
1043
1044static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1045		    struct gfs2_holder *q_gh)
1046{
1047	struct gfs2_sbd *sdp = qd->qd_sbd;
1048	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1049	struct gfs2_holder i_gh;
1050	int error;
1051
1052	gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
1053restart:
1054	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1055	if (error)
1056		return error;
1057
1058	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1059		force_refresh = FORCE;
1060
1061	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1062
1063	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1064		gfs2_glock_dq_uninit(q_gh);
1065		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1066					   GL_NOCACHE, q_gh);
1067		if (error)
1068			return error;
1069
1070		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1071		if (error)
1072			goto fail;
1073
1074		error = update_qd(sdp, qd);
1075		if (error)
1076			goto fail_gunlock;
1077
1078		gfs2_glock_dq_uninit(&i_gh);
1079		gfs2_glock_dq_uninit(q_gh);
1080		force_refresh = 0;
1081		goto restart;
1082	}
1083
1084	return 0;
1085
1086fail_gunlock:
1087	gfs2_glock_dq_uninit(&i_gh);
1088fail:
1089	gfs2_glock_dq_uninit(q_gh);
1090	return error;
1091}
1092
1093int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1094{
1095	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1096	struct gfs2_quota_data *qd;
1097	u32 x;
1098	int error;
1099
1100	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1101		return 0;
1102
1103	error = gfs2_quota_hold(ip, uid, gid);
1104	if (error)
1105		return error;
1106
1107	sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1108	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
1109
1110	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1111		qd = ip->i_qadata->qa_qd[x];
1112		error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
1113		if (error)
1114			break;
1115	}
1116
1117	if (!error)
1118		set_bit(GIF_QD_LOCKED, &ip->i_flags);
1119	else {
1120		while (x--)
1121			gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1122		gfs2_quota_unhold(ip);
1123	}
1124
1125	return error;
1126}
1127
1128static bool need_sync(struct gfs2_quota_data *qd)
1129{
1130	struct gfs2_sbd *sdp = qd->qd_sbd;
1131	struct gfs2_tune *gt = &sdp->sd_tune;
1132	s64 value;
1133	unsigned int num, den;
 
1134
1135	if (!qd->qd_qb.qb_limit)
1136		return false;
1137
1138	spin_lock(&qd_lock);
1139	value = qd->qd_change;
1140	spin_unlock(&qd_lock);
1141
1142	spin_lock(&gt->gt_spin);
1143	num = gt->gt_quota_scale_num;
1144	den = gt->gt_quota_scale_den;
1145	spin_unlock(&gt->gt_spin);
1146
1147	if (value <= 0)
1148		return false;
1149	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1150		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1151		return false;
1152	else {
1153		value *= gfs2_jindex_size(sdp) * num;
1154		value = div_s64(value, den);
1155		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1156		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1157			return false;
1158	}
1159
1160	return true;
1161}
1162
1163void gfs2_quota_unlock(struct gfs2_inode *ip)
1164{
1165	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1166	struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
1167	unsigned int count = 0;
1168	u32 x;
1169	int found;
1170
1171	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1172		return;
1173
1174	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1175		struct gfs2_quota_data *qd;
1176		bool sync;
1177
1178		qd = ip->i_qadata->qa_qd[x];
1179		sync = need_sync(qd);
1180
1181		gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1182		if (!sync)
1183			continue;
1184
1185		spin_lock(&qd_lock);
1186		found = qd_check_sync(sdp, qd, NULL);
1187		spin_unlock(&qd_lock);
1188
1189		if (!found)
1190			continue;
1191
1192		if (!qd_bh_get_or_undo(sdp, qd))
1193			qda[count++] = qd;
 
 
 
 
 
 
 
1194	}
1195
1196	if (count) {
1197		do_sync(count, qda);
1198		for (x = 0; x < count; x++)
1199			qd_unlock(qda[x]);
1200	}
1201
1202	gfs2_quota_unhold(ip);
1203}
1204
1205#define MAX_LINE 256
1206
1207static void print_message(struct gfs2_quota_data *qd, char *type)
1208{
1209	struct gfs2_sbd *sdp = qd->qd_sbd;
 
 
 
 
 
1210
1211	if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) {
1212		fs_info(sdp, "quota %s for %s %u\n",
1213			type,
1214			(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1215			from_kqid(&init_user_ns, qd->qd_id));
1216	}
1217}
1218
1219/**
1220 * gfs2_quota_check - check if allocating new blocks will exceed quota
1221 * @ip:  The inode for which this check is being performed
1222 * @uid: The uid to check against
1223 * @gid: The gid to check against
1224 * @ap:  The allocation parameters. ap->target contains the requested
1225 *       blocks. ap->min_target, if set, contains the minimum blks
1226 *       requested.
1227 *
1228 * Returns: 0 on success.
1229 *                  min_req = ap->min_target ? ap->min_target : ap->target;
1230 *                  quota must allow at least min_req blks for success and
1231 *                  ap->allowed is set to the number of blocks allowed
1232 *
1233 *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1234 *                  of blocks available.
1235 */
1236int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1237		     struct gfs2_alloc_parms *ap)
1238{
1239	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1240	struct gfs2_quota_data *qd;
1241	s64 value, warn, limit;
1242	u32 x;
1243	int error = 0;
1244
1245	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1246	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1247		return 0;
1248
1249	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1250		qd = ip->i_qadata->qa_qd[x];
1251
1252		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1253		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1254			continue;
1255
1256		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1257		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1258		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1259		spin_lock(&qd_lock);
1260		value += qd->qd_change;
1261		spin_unlock(&qd_lock);
1262
1263		if (limit > 0 && (limit - value) < ap->allowed)
1264			ap->allowed = limit - value;
1265		/* If we can't meet the target */
1266		if (limit && limit < (value + (s64)ap->target)) {
1267			/* If no min_target specified or we don't meet
1268			 * min_target, return -EDQUOT */
1269			if (!ap->min_target || ap->min_target > ap->allowed) {
1270				if (!test_and_set_bit(QDF_QMSG_QUIET,
1271						      &qd->qd_flags)) {
1272					print_message(qd, "exceeded");
1273					quota_send_warning(qd->qd_id,
1274							   sdp->sd_vfs->s_dev,
1275							   QUOTA_NL_BHARDWARN);
1276				}
1277				error = -EDQUOT;
1278				break;
1279			}
1280		} else if (warn && warn < value &&
1281			   time_after_eq(jiffies, qd->qd_last_warn +
1282					 gfs2_tune_get(sdp, gt_quota_warn_period)
1283					 * HZ)) {
1284			quota_send_warning(qd->qd_id,
1285					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1286			print_message(qd, "warning");
1287			error = 0;
1288			qd->qd_last_warn = jiffies;
1289		}
1290	}
1291	return error;
1292}
1293
1294void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1295		       kuid_t uid, kgid_t gid)
1296{
1297	struct gfs2_quota_data *qd;
1298	u32 x;
1299	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1300
1301	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ||
1302	    gfs2_assert_warn(sdp, change))
1303		return;
1304	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1305		return;
1306
1307	if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1308				 ip->i_qadata->qa_ref > 0))
1309		return;
1310	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1311		qd = ip->i_qadata->qa_qd[x];
1312
1313		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1314		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1315			do_qc(qd, change, QC_CHANGE);
1316		}
1317	}
1318}
1319
1320static bool qd_changed(struct gfs2_sbd *sdp)
1321{
1322	struct gfs2_quota_data *qd;
1323	bool changed = false;
1324
1325	spin_lock(&qd_lock);
1326	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
1327		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
1328		    !test_bit(QDF_CHANGE, &qd->qd_flags))
1329			continue;
1330
1331		changed = true;
1332		break;
1333	}
1334	spin_unlock(&qd_lock);
1335	return changed;
1336}
1337
1338int gfs2_quota_sync(struct super_block *sb, int type)
1339{
1340	struct gfs2_sbd *sdp = sb->s_fs_info;
1341	struct gfs2_quota_data **qda;
1342	unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
1343	unsigned int num_qd;
1344	unsigned int x;
1345	int error = 0;
1346
1347	if (!qd_changed(sdp))
1348		return 0;
1349
1350	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1351	if (!qda)
1352		return -ENOMEM;
1353
1354	mutex_lock(&sdp->sd_quota_sync_mutex);
1355	sdp->sd_quota_sync_gen++;
1356
1357	do {
1358		num_qd = 0;
1359
1360		for (;;) {
1361			error = qd_fish(sdp, qda + num_qd);
1362			if (error || !qda[num_qd])
1363				break;
1364			if (++num_qd == max_qd)
1365				break;
1366		}
1367
1368		if (num_qd) {
1369			if (!error)
1370				error = do_sync(num_qd, qda);
 
 
 
 
1371
1372			for (x = 0; x < num_qd; x++)
1373				qd_unlock(qda[x]);
1374		}
1375	} while (!error && num_qd == max_qd);
1376
1377	mutex_unlock(&sdp->sd_quota_sync_mutex);
1378	kfree(qda);
1379
1380	return error;
1381}
1382
1383int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1384{
1385	struct gfs2_quota_data *qd;
1386	struct gfs2_holder q_gh;
1387	int error;
1388
1389	error = qd_get(sdp, qid, &qd);
1390	if (error)
1391		return error;
1392
1393	error = do_glock(qd, FORCE, &q_gh);
1394	if (!error)
1395		gfs2_glock_dq_uninit(&q_gh);
1396
1397	qd_put(qd);
1398	return error;
1399}
1400
1401int gfs2_quota_init(struct gfs2_sbd *sdp)
1402{
1403	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1404	u64 size = i_size_read(sdp->sd_qc_inode);
1405	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1406	unsigned int x, slot = 0;
1407	unsigned int found = 0;
1408	unsigned int hash;
1409	unsigned int bm_size;
1410	u64 dblock;
1411	u32 extlen = 0;
1412	int error;
1413
1414	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1415		return -EIO;
1416
1417	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1418	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1419	bm_size *= sizeof(unsigned long);
1420	error = -ENOMEM;
1421	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1422	if (sdp->sd_quota_bitmap == NULL)
1423		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1424						 __GFP_ZERO);
1425	if (!sdp->sd_quota_bitmap)
1426		return error;
1427
1428	for (x = 0; x < blocks; x++) {
1429		struct buffer_head *bh;
1430		const struct gfs2_quota_change *qc;
1431		unsigned int y;
1432
1433		if (!extlen) {
1434			extlen = 32;
1435			error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
1436			if (error)
1437				goto fail;
1438		}
1439		error = -EIO;
1440		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1441		if (!bh)
1442			goto fail;
1443		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1444			brelse(bh);
1445			goto fail;
1446		}
1447
1448		qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1449		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1450		     y++, slot++) {
1451			struct gfs2_quota_data *qd;
1452			s64 qc_change = be64_to_cpu(qc->qc_change);
1453			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1454			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1455						USRQUOTA : GRPQUOTA;
1456			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1457						      be32_to_cpu(qc->qc_id));
1458			qc++;
1459			if (!qc_change)
1460				continue;
1461
1462			hash = gfs2_qd_hash(sdp, qc_id);
1463			qd = qd_alloc(hash, sdp, qc_id);
1464			if (qd == NULL) {
1465				brelse(bh);
1466				goto fail;
1467			}
1468
1469			set_bit(QDF_CHANGE, &qd->qd_flags);
1470			qd->qd_change = qc_change;
1471			qd->qd_slot = slot;
1472			qd->qd_slot_ref = 1;
1473
1474			spin_lock(&qd_lock);
1475			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1476			list_add(&qd->qd_list, &sdp->sd_quota_list);
1477			atomic_inc(&sdp->sd_quota_count);
1478			spin_unlock(&qd_lock);
1479
1480			spin_lock_bucket(hash);
1481			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1482			spin_unlock_bucket(hash);
1483
1484			found++;
1485		}
1486
1487		brelse(bh);
1488		dblock++;
1489		extlen--;
1490	}
1491
1492	if (found)
1493		fs_info(sdp, "found %u quota changes\n", found);
1494
1495	return 0;
1496
1497fail:
1498	gfs2_quota_cleanup(sdp);
1499	return error;
1500}
1501
1502void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1503{
 
1504	struct gfs2_quota_data *qd;
1505	LIST_HEAD(dispose);
1506	int count;
1507
1508	BUG_ON(!test_bit(SDF_NORECOVERY, &sdp->sd_flags) &&
1509		test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1510
1511	spin_lock(&qd_lock);
1512	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
1513		spin_lock(&qd->qd_lockref.lock);
1514		if (qd->qd_lockref.count != 0) {
1515			spin_unlock(&qd->qd_lockref.lock);
1516			continue;
1517		}
1518		lockref_mark_dead(&qd->qd_lockref);
1519		spin_unlock(&qd->qd_lockref.lock);
1520
1521		list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
1522		list_add(&qd->qd_lru, &dispose);
1523	}
1524	spin_unlock(&qd_lock);
1525
1526	gfs2_qd_list_dispose(&dispose);
1527
1528	wait_event_timeout(sdp->sd_kill_wait,
1529		(count = atomic_read(&sdp->sd_quota_count)) == 0,
1530		HZ * 60);
1531
1532	if (count != 0)
1533		fs_err(sdp, "%d left-over quota data objects\n", count);
1534
1535	kvfree(sdp->sd_quota_bitmap);
1536	sdp->sd_quota_bitmap = NULL;
1537}
1538
1539static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1540{
1541	if (error == 0 || error == -EROFS)
1542		return;
1543	if (!gfs2_withdrawing_or_withdrawn(sdp)) {
1544		if (!cmpxchg(&sdp->sd_log_error, 0, error))
1545			fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1546		wake_up(&sdp->sd_logd_waitq);
1547	}
1548}
1549
1550static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1551			       int (*fxn)(struct super_block *sb, int type),
1552			       unsigned long t, unsigned long *timeo,
1553			       unsigned int *new_timeo)
1554{
1555	if (t >= *timeo) {
1556		int error = fxn(sdp->sd_vfs, 0);
1557		quotad_error(sdp, msg, error);
1558		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1559	} else {
1560		*timeo -= t;
1561	}
1562}
1563
1564void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1565	if (!sdp->sd_statfs_force_sync) {
1566		sdp->sd_statfs_force_sync = 1;
1567		wake_up(&sdp->sd_quota_wait);
1568	}
1569}
1570
1571
1572/**
1573 * gfs2_quotad - Write cached quota changes into the quota file
1574 * @data: Pointer to GFS2 superblock
1575 *
1576 */
1577
1578int gfs2_quotad(void *data)
1579{
1580	struct gfs2_sbd *sdp = data;
1581	struct gfs2_tune *tune = &sdp->sd_tune;
1582	unsigned long statfs_timeo = 0;
1583	unsigned long quotad_timeo = 0;
1584	unsigned long t = 0;
 
1585
1586	set_freezable();
1587	while (!kthread_should_stop()) {
1588		if (gfs2_withdrawing_or_withdrawn(sdp))
1589			break;
1590
 
 
1591		/* Update the master statfs file */
1592		if (sdp->sd_statfs_force_sync) {
1593			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1594			quotad_error(sdp, "statfs", error);
1595			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1596		}
1597		else
1598			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1599				   	   &statfs_timeo,
1600					   &tune->gt_statfs_quantum);
1601
1602		/* Update quota file */
1603		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1604				   &quotad_timeo, &tune->gt_quota_quantum);
1605
 
 
 
1606		t = min(quotad_timeo, statfs_timeo);
1607
1608		t = wait_event_freezable_timeout(sdp->sd_quota_wait,
1609				sdp->sd_statfs_force_sync ||
1610				gfs2_withdrawing_or_withdrawn(sdp) ||
1611				kthread_should_stop(),
1612				t);
1613
1614		if (sdp->sd_statfs_force_sync)
1615			t = 0;
 
1616	}
1617
1618	return 0;
1619}
1620
1621static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1622{
1623	struct gfs2_sbd *sdp = sb->s_fs_info;
1624
1625	memset(state, 0, sizeof(*state));
1626
1627	switch (sdp->sd_args.ar_quota) {
1628	case GFS2_QUOTA_QUIET:
1629		fallthrough;
1630	case GFS2_QUOTA_ON:
1631		state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1632		state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1633		fallthrough;
1634	case GFS2_QUOTA_ACCOUNT:
1635		state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1636						  QCI_SYSFILE;
1637		state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1638						  QCI_SYSFILE;
1639		break;
1640	case GFS2_QUOTA_OFF:
1641		break;
1642	}
1643	if (sdp->sd_quota_inode) {
1644		state->s_state[USRQUOTA].ino =
1645					GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1646		state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1647	}
1648	state->s_state[USRQUOTA].nextents = 1;	/* unsupported */
1649	state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1650	state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1651	return 0;
1652}
1653
1654static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1655			  struct qc_dqblk *fdq)
1656{
1657	struct gfs2_sbd *sdp = sb->s_fs_info;
1658	struct gfs2_quota_lvb *qlvb;
1659	struct gfs2_quota_data *qd;
1660	struct gfs2_holder q_gh;
1661	int error;
1662
1663	memset(fdq, 0, sizeof(*fdq));
1664
1665	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1666		return -ESRCH; /* Crazy XFS error code */
1667
1668	if ((qid.type != USRQUOTA) &&
1669	    (qid.type != GRPQUOTA))
1670		return -EINVAL;
1671
1672	error = qd_get(sdp, qid, &qd);
1673	if (error)
1674		return error;
1675	error = do_glock(qd, FORCE, &q_gh);
1676	if (error)
1677		goto out;
1678
1679	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1680	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1681	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1682	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
1683
1684	gfs2_glock_dq_uninit(&q_gh);
1685out:
1686	qd_put(qd);
1687	return error;
1688}
1689
1690/* GFS2 only supports a subset of the XFS fields */
1691#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1692
1693static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1694			  struct qc_dqblk *fdq)
1695{
1696	struct gfs2_sbd *sdp = sb->s_fs_info;
1697	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1698	struct gfs2_quota_data *qd;
1699	struct gfs2_holder q_gh, i_gh;
1700	unsigned int data_blocks, ind_blocks;
1701	unsigned int blocks = 0;
1702	int alloc_required;
1703	loff_t offset;
1704	int error;
1705
1706	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1707		return -ESRCH; /* Crazy XFS error code */
1708
1709	if ((qid.type != USRQUOTA) &&
1710	    (qid.type != GRPQUOTA))
1711		return -EINVAL;
1712
1713	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1714		return -EINVAL;
1715
1716	error = qd_get(sdp, qid, &qd);
1717	if (error)
1718		return error;
1719
1720	error = gfs2_qa_get(ip);
1721	if (error)
1722		goto out_put;
1723
1724	inode_lock(&ip->i_inode);
1725	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1726	if (error)
1727		goto out_unlockput;
1728	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1729	if (error)
1730		goto out_q;
1731
1732	/* Check for existing entry, if none then alloc new blocks */
1733	error = update_qd(sdp, qd);
1734	if (error)
1735		goto out_i;
1736
1737	/* If nothing has changed, this is a no-op */
1738	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1739	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1740		fdq->d_fieldmask ^= QC_SPC_SOFT;
1741
1742	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1743	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1744		fdq->d_fieldmask ^= QC_SPC_HARD;
1745
1746	if ((fdq->d_fieldmask & QC_SPACE) &&
1747	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1748		fdq->d_fieldmask ^= QC_SPACE;
1749
1750	if (fdq->d_fieldmask == 0)
1751		goto out_i;
1752
1753	offset = qd2offset(qd);
1754	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1755	if (gfs2_is_stuffed(ip))
1756		alloc_required = 1;
1757	if (alloc_required) {
1758		struct gfs2_alloc_parms ap = {};
1759		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1760				       &data_blocks, &ind_blocks);
1761		blocks = 1 + data_blocks + ind_blocks;
1762		ap.target = blocks;
1763		error = gfs2_inplace_reserve(ip, &ap);
1764		if (error)
1765			goto out_i;
1766		blocks += gfs2_rg_blocks(ip, blocks);
1767	}
1768
1769	/* Some quotas span block boundaries and can update two blocks,
1770	   adding an extra block to the transaction to handle such quotas */
1771	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1772	if (error)
1773		goto out_release;
1774
1775	/* Apply changes */
1776	error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq);
1777	if (!error)
1778		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1779
1780	gfs2_trans_end(sdp);
1781out_release:
1782	if (alloc_required)
1783		gfs2_inplace_release(ip);
1784out_i:
1785	gfs2_glock_dq_uninit(&i_gh);
1786out_q:
1787	gfs2_glock_dq_uninit(&q_gh);
1788out_unlockput:
1789	gfs2_qa_put(ip);
1790	inode_unlock(&ip->i_inode);
1791out_put:
1792	qd_put(qd);
1793	return error;
1794}
1795
1796const struct quotactl_ops gfs2_quotactl_ops = {
1797	.quota_sync     = gfs2_quota_sync,
1798	.get_state	= gfs2_quota_get_state,
1799	.get_dqblk	= gfs2_get_dqblk,
1800	.set_dqblk	= gfs2_set_dqblk,
1801};
1802
1803void __init gfs2_quota_hash_init(void)
1804{
1805	unsigned i;
1806
1807	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1808		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1809}