Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10/*
  11 * Quota change tags are associated with each transaction that allocates or
  12 * deallocates space.  Those changes are accumulated locally to each node (in a
  13 * per-node file) and then are periodically synced to the quota file.  This
  14 * avoids the bottleneck of constantly touching the quota file, but introduces
  15 * fuzziness in the current usage value of IDs that are being used on different
  16 * nodes in the cluster simultaneously.  So, it is possible for a user on
  17 * multiple nodes to overrun their quota, but that overrun is controlable.
  18 * Since quota tags are part of transactions, there is no need for a quota check
  19 * program to be run on node crashes or anything like that.
  20 *
  21 * There are couple of knobs that let the administrator manage the quota
  22 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  23 * sitting on one node before being synced to the quota file.  (The default is
  24 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  25 * of quota file syncs increases as the user moves closer to their limit.  The
  26 * more frequent the syncs, the more accurate the quota enforcement, but that
  27 * means that there is more contention between the nodes for the quota file.
  28 * The default value is one.  This sets the maximum theoretical quota overrun
  29 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  30 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  31 * number greater than one makes quota syncs more frequent and reduces the
  32 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  33 * syncs less frequent.
  34 *
  35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  36 * the quota file, so it is not being constantly read.
  37 */
  38
 
 
  39#include <linux/sched.h>
  40#include <linux/slab.h>
  41#include <linux/mm.h>
  42#include <linux/spinlock.h>
  43#include <linux/completion.h>
  44#include <linux/buffer_head.h>
  45#include <linux/sort.h>
  46#include <linux/fs.h>
  47#include <linux/bio.h>
  48#include <linux/gfs2_ondisk.h>
  49#include <linux/kthread.h>
  50#include <linux/freezer.h>
  51#include <linux/quota.h>
  52#include <linux/dqblk_xfs.h>
 
 
 
 
 
 
 
  53
  54#include "gfs2.h"
  55#include "incore.h"
  56#include "bmap.h"
  57#include "glock.h"
  58#include "glops.h"
  59#include "log.h"
  60#include "meta_io.h"
  61#include "quota.h"
  62#include "rgrp.h"
  63#include "super.h"
  64#include "trans.h"
  65#include "inode.h"
  66#include "util.h"
  67
  68#define QUOTA_USER 1
  69#define QUOTA_GROUP 0
 
  70
  71struct gfs2_quota_change_host {
  72	u64 qc_change;
  73	u32 qc_flags; /* GFS2_QCF_... */
  74	u32 qc_id;
  75};
  76
  77static LIST_HEAD(qd_lru_list);
  78static atomic_t qd_lru_count = ATOMIC_INIT(0);
  79static DEFINE_SPINLOCK(qd_lru_lock);
  80
  81int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
 
  82{
  83	struct gfs2_quota_data *qd;
  84	struct gfs2_sbd *sdp;
  85	int nr_to_scan = sc->nr_to_scan;
  86
  87	if (nr_to_scan == 0)
  88		goto out;
  89
  90	if (!(sc->gfp_mask & __GFP_FS))
  91		return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  92
  93	spin_lock(&qd_lru_lock);
  94	while (nr_to_scan && !list_empty(&qd_lru_list)) {
  95		qd = list_entry(qd_lru_list.next,
  96				struct gfs2_quota_data, qd_reclaim);
  97		sdp = qd->qd_gl->gl_sbd;
  98
  99		/* Free from the filesystem-specific list */
 100		list_del(&qd->qd_list);
 
 101
 
 102		gfs2_assert_warn(sdp, !qd->qd_change);
 103		gfs2_assert_warn(sdp, !qd->qd_slot_count);
 104		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 
 
 
 
 
 
 
 
 
 105
 106		gfs2_glock_put(qd->qd_gl);
 107		atomic_dec(&sdp->sd_quota_count);
 
 108
 109		/* Delete it from the common reclaim list */
 110		list_del_init(&qd->qd_reclaim);
 111		atomic_dec(&qd_lru_count);
 112		spin_unlock(&qd_lru_lock);
 113		kmem_cache_free(gfs2_quotad_cachep, qd);
 114		spin_lock(&qd_lru_lock);
 115		nr_to_scan--;
 116	}
 117	spin_unlock(&qd_lru_lock);
 118
 119out:
 120	return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 121}
 122
 123static u64 qd2offset(struct gfs2_quota_data *qd)
 
 124{
 125	u64 offset;
 
 
 
 
 126
 127	offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
 128	offset *= sizeof(struct gfs2_quota);
 129
 130	return offset;
 
 
 131}
 132
 133static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
 134		    struct gfs2_quota_data **qdp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 135{
 136	struct gfs2_quota_data *qd;
 137	int error;
 138
 139	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 140	if (!qd)
 141		return -ENOMEM;
 142
 143	atomic_set(&qd->qd_count, 1);
 144	qd->qd_id = id;
 145	if (user)
 146		set_bit(QDF_USER, &qd->qd_flags);
 147	qd->qd_slot = -1;
 148	INIT_LIST_HEAD(&qd->qd_reclaim);
 
 149
 150	error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
 151			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 152	if (error)
 153		goto fail;
 154
 155	*qdp = qd;
 156
 157	return 0;
 158
 159fail:
 160	kmem_cache_free(gfs2_quotad_cachep, qd);
 161	return error;
 162}
 163
 164static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
 165		  struct gfs2_quota_data **qdp)
 
 166{
 167	struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
 168	int error, found;
 169
 170	*qdp = NULL;
 171
 172	for (;;) {
 173		found = 0;
 174		spin_lock(&qd_lru_lock);
 175		list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 176			if (qd->qd_id == id &&
 177			    !test_bit(QDF_USER, &qd->qd_flags) == !user) {
 178				if (!atomic_read(&qd->qd_count) &&
 179				    !list_empty(&qd->qd_reclaim)) {
 180					/* Remove it from reclaim list */
 181					list_del_init(&qd->qd_reclaim);
 182					atomic_dec(&qd_lru_count);
 183				}
 184				atomic_inc(&qd->qd_count);
 185				found = 1;
 186				break;
 187			}
 188		}
 
 189
 190		if (!found)
 191			qd = NULL;
 192
 193		if (!qd && new_qd) {
 194			qd = new_qd;
 195			list_add(&qd->qd_list, &sdp->sd_quota_list);
 196			atomic_inc(&sdp->sd_quota_count);
 197			new_qd = NULL;
 198		}
 199
 200		spin_unlock(&qd_lru_lock);
 
 
 
 
 201
 202		if (qd) {
 203			if (new_qd) {
 204				gfs2_glock_put(new_qd->qd_gl);
 205				kmem_cache_free(gfs2_quotad_cachep, new_qd);
 206			}
 207			*qdp = qd;
 208			return 0;
 209		}
 210
 211		error = qd_alloc(sdp, user, id, &new_qd);
 212		if (error)
 213			return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 214	}
 
 
 
 
 
 
 
 
 
 215}
 216
 217static void qd_hold(struct gfs2_quota_data *qd)
 
 218{
 219	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 220	gfs2_assert(sdp, atomic_read(&qd->qd_count));
 221	atomic_inc(&qd->qd_count);
 222}
 223
 224static void qd_put(struct gfs2_quota_data *qd)
 225{
 226	if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
 227		/* Add to the reclaim list */
 228		list_add_tail(&qd->qd_reclaim, &qd_lru_list);
 229		atomic_inc(&qd_lru_count);
 230		spin_unlock(&qd_lru_lock);
 231	}
 232}
 233
 234static int slot_get(struct gfs2_quota_data *qd)
 235{
 236	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 237	unsigned int c, o = 0, b;
 238	unsigned char byte = 0;
 239
 240	spin_lock(&qd_lru_lock);
 
 
 
 
 241
 242	if (qd->qd_slot_count++) {
 243		spin_unlock(&qd_lru_lock);
 244		return 0;
 245	}
 246
 247	for (c = 0; c < sdp->sd_quota_chunks; c++)
 248		for (o = 0; o < PAGE_SIZE; o++) {
 249			byte = sdp->sd_quota_bitmap[c][o];
 250			if (byte != 0xFF)
 251				goto found;
 252		}
 253
 254	goto fail;
 255
 256found:
 257	for (b = 0; b < 8; b++)
 258		if (!(byte & (1 << b)))
 259			break;
 260	qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
 261
 262	if (qd->qd_slot >= sdp->sd_quota_slots)
 263		goto fail;
 264
 265	sdp->sd_quota_bitmap[c][o] |= 1 << b;
 266
 267	spin_unlock(&qd_lru_lock);
 268
 269	return 0;
 
 
 
 
 270
 271fail:
 272	qd->qd_slot_count--;
 273	spin_unlock(&qd_lru_lock);
 274	return -ENOSPC;
 
 
 
 
 
 
 
 
 
 
 
 275}
 276
 277static void slot_hold(struct gfs2_quota_data *qd)
 278{
 279	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 280
 281	spin_lock(&qd_lru_lock);
 282	gfs2_assert(sdp, qd->qd_slot_count);
 283	qd->qd_slot_count++;
 284	spin_unlock(&qd_lru_lock);
 285}
 286
 287static void slot_put(struct gfs2_quota_data *qd)
 288{
 289	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 290
 291	spin_lock(&qd_lru_lock);
 292	gfs2_assert(sdp, qd->qd_slot_count);
 293	if (!--qd->qd_slot_count) {
 294		gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
 295		qd->qd_slot = -1;
 296	}
 297	spin_unlock(&qd_lru_lock);
 298}
 299
 300static int bh_get(struct gfs2_quota_data *qd)
 301{
 302	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 303	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 
 304	unsigned int block, offset;
 305	struct buffer_head *bh;
 
 306	int error;
 307	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 308
 309	mutex_lock(&sdp->sd_quota_mutex);
 310
 311	if (qd->qd_bh_count++) {
 312		mutex_unlock(&sdp->sd_quota_mutex);
 
 
 313		return 0;
 314	}
 
 315
 316	block = qd->qd_slot / sdp->sd_qc_per_block;
 317	offset = qd->qd_slot % sdp->sd_qc_per_block;
 318
 319	bh_map.b_size = 1 << ip->i_inode.i_blkbits;
 320	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
 
 321	if (error)
 322		goto fail;
 323	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
 
 
 
 
 
 324	if (error)
 325		goto fail;
 326	error = -EIO;
 327	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 328		goto fail_brelse;
 329
 330	qd->qd_bh = bh;
 331	qd->qd_bh_qc = (struct gfs2_quota_change *)
 332		(bh->b_data + sizeof(struct gfs2_meta_header) +
 333		 offset * sizeof(struct gfs2_quota_change));
 334
 335	mutex_unlock(&sdp->sd_quota_mutex);
 336
 337	return 0;
 
 
 
 
 
 
 
 
 
 
 338
 339fail_brelse:
 340	brelse(bh);
 341fail:
 342	qd->qd_bh_count--;
 343	mutex_unlock(&sdp->sd_quota_mutex);
 344	return error;
 345}
 346
 347static void bh_put(struct gfs2_quota_data *qd)
 348{
 349	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 
 350
 351	mutex_lock(&sdp->sd_quota_mutex);
 352	gfs2_assert(sdp, qd->qd_bh_count);
 353	if (!--qd->qd_bh_count) {
 354		brelse(qd->qd_bh);
 355		qd->qd_bh = NULL;
 356		qd->qd_bh_qc = NULL;
 357	}
 358	mutex_unlock(&sdp->sd_quota_mutex);
 359}
 360
 361static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 362{
 363	struct gfs2_quota_data *qd = NULL;
 364	int error;
 365	int found = 0;
 366
 367	*qdp = NULL;
 368
 369	if (sdp->sd_vfs->s_flags & MS_RDONLY)
 370		return 0;
 371
 372	spin_lock(&qd_lru_lock);
 373
 374	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 375		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 376		    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 377		    qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
 378			continue;
 379
 380		list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 381
 382		set_bit(QDF_LOCKED, &qd->qd_flags);
 383		gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
 384		atomic_inc(&qd->qd_count);
 385		qd->qd_change_sync = qd->qd_change;
 386		gfs2_assert_warn(sdp, qd->qd_slot_count);
 387		qd->qd_slot_count++;
 388		found = 1;
 389
 390		break;
 391	}
 392
 393	if (!found)
 394		qd = NULL;
 395
 396	spin_unlock(&qd_lru_lock);
 397
 398	if (qd) {
 399		gfs2_assert_warn(sdp, qd->qd_change_sync);
 400		error = bh_get(qd);
 401		if (error) {
 402			clear_bit(QDF_LOCKED, &qd->qd_flags);
 403			slot_put(qd);
 404			qd_put(qd);
 405			return error;
 406		}
 407	}
 408
 409	*qdp = qd;
 410
 411	return 0;
 412}
 413
 414static int qd_trylock(struct gfs2_quota_data *qd)
 
 415{
 416	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 417
 418	if (sdp->sd_vfs->s_flags & MS_RDONLY)
 419		return 0;
 420
 421	spin_lock(&qd_lru_lock);
 422
 
 423	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 424	    !test_bit(QDF_CHANGE, &qd->qd_flags)) {
 425		spin_unlock(&qd_lru_lock);
 426		return 0;
 427	}
 428
 429	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 
 
 430
 
 431	set_bit(QDF_LOCKED, &qd->qd_flags);
 432	gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
 433	atomic_inc(&qd->qd_count);
 434	qd->qd_change_sync = qd->qd_change;
 435	gfs2_assert_warn(sdp, qd->qd_slot_count);
 436	qd->qd_slot_count++;
 437
 438	spin_unlock(&qd_lru_lock);
 439
 440	gfs2_assert_warn(sdp, qd->qd_change_sync);
 441	if (bh_get(qd)) {
 442		clear_bit(QDF_LOCKED, &qd->qd_flags);
 443		slot_put(qd);
 444		qd_put(qd);
 445		return 0;
 446	}
 447
 448	return 1;
 449}
 450
 451static void qd_unlock(struct gfs2_quota_data *qd)
 452{
 453	gfs2_assert_warn(qd->qd_gl->gl_sbd,
 454			 test_bit(QDF_LOCKED, &qd->qd_flags));
 455	clear_bit(QDF_LOCKED, &qd->qd_flags);
 
 
 
 
 
 
 456	bh_put(qd);
 457	slot_put(qd);
 458	qd_put(qd);
 459}
 460
 461static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
 
 
 
 
 
 
 
 
 
 462		    struct gfs2_quota_data **qdp)
 463{
 464	int error;
 465
 466	error = qd_get(sdp, user, id, qdp);
 467	if (error)
 468		return error;
 469
 470	error = slot_get(*qdp);
 471	if (error)
 472		goto fail;
 473
 474	error = bh_get(*qdp);
 475	if (error)
 476		goto fail_slot;
 477
 478	return 0;
 479
 480fail_slot:
 481	slot_put(*qdp);
 482fail:
 483	qd_put(*qdp);
 484	return error;
 485}
 486
 487static void qdsb_put(struct gfs2_quota_data *qd)
 
 
 
 
 
 488{
 489	bh_put(qd);
 490	slot_put(qd);
 491	qd_put(qd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 492}
 493
 494int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
 
 
 
 
 
 
 
 
 
 
 
 
 495{
 496	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 497	struct gfs2_alloc *al = ip->i_alloc;
 498	struct gfs2_quota_data **qd = al->al_qd;
 499	int error;
 500
 501	if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
 502	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
 503		return -EIO;
 504
 505	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 506		return 0;
 507
 508	error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
 509	if (error)
 
 
 
 
 
 
 
 
 510		goto out;
 511	al->al_qd_num++;
 
 
 
 
 
 512	qd++;
 513
 514	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
 515	if (error)
 516		goto out;
 517	al->al_qd_num++;
 518	qd++;
 519
 520	if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
 521		error = qdsb_get(sdp, QUOTA_USER, uid, qd);
 
 522		if (error)
 523			goto out;
 524		al->al_qd_num++;
 525		qd++;
 526	}
 527
 528	if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
 529		error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
 
 530		if (error)
 531			goto out;
 532		al->al_qd_num++;
 533		qd++;
 534	}
 535
 536out:
 537	if (error)
 538		gfs2_quota_unhold(ip);
 
 539	return error;
 540}
 541
 542void gfs2_quota_unhold(struct gfs2_inode *ip)
 543{
 544	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 545	struct gfs2_alloc *al = ip->i_alloc;
 546	unsigned int x;
 
 
 547
 548	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 549
 550	for (x = 0; x < al->al_qd_num; x++) {
 551		qdsb_put(al->al_qd[x]);
 552		al->al_qd[x] = NULL;
 553	}
 554	al->al_qd_num = 0;
 
 555}
 556
 557static int sort_qd(const void *a, const void *b)
 558{
 559	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 560	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 561
 562	if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
 563	    !test_bit(QDF_USER, &qd_b->qd_flags)) {
 564		if (test_bit(QDF_USER, &qd_a->qd_flags))
 565			return -1;
 566		else
 567			return 1;
 568	}
 569	if (qd_a->qd_id < qd_b->qd_id)
 570		return -1;
 571	if (qd_a->qd_id > qd_b->qd_id)
 572		return 1;
 573
 574	return 0;
 575}
 576
 577static void do_qc(struct gfs2_quota_data *qd, s64 change)
 578{
 579	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 580	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 581	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 
 582	s64 x;
 583
 584	mutex_lock(&sdp->sd_quota_mutex);
 585	gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
 586
 587	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 588		qc->qc_change = 0;
 589		qc->qc_flags = 0;
 590		if (test_bit(QDF_USER, &qd->qd_flags))
 591			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 592		qc->qc_id = cpu_to_be32(qd->qd_id);
 593	}
 594
 595	x = be64_to_cpu(qc->qc_change) + change;
 596	qc->qc_change = cpu_to_be64(x);
 597
 598	spin_lock(&qd_lru_lock);
 599	qd->qd_change = x;
 600	spin_unlock(&qd_lru_lock);
 
 
 601
 602	if (!x) {
 603		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 604		clear_bit(QDF_CHANGE, &qd->qd_flags);
 605		qc->qc_flags = 0;
 606		qc->qc_id = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 607		slot_put(qd);
 608		qd_put(qd);
 609	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 610		qd_hold(qd);
 611		slot_hold(qd);
 612	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 613
 614	mutex_unlock(&sdp->sd_quota_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 615}
 616
 617/**
 618 * gfs2_adjust_quota - adjust record of current block usage
 619 * @ip: The quota inode
 620 * @loc: Offset of the entry in the quota file
 621 * @change: The amount of usage change to record
 622 * @qd: The quota data
 623 * @fdq: The updated limits to record
 624 *
 625 * This function was mostly borrowed from gfs2_block_truncate_page which was
 626 * in turn mostly borrowed from ext3
 627 *
 628 * Returns: 0 or -ve on error
 629 */
 630
 631static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 632			     s64 change, struct gfs2_quota_data *qd,
 633			     struct fs_disk_quota *fdq)
 634{
 
 635	struct inode *inode = &ip->i_inode;
 636	struct gfs2_sbd *sdp = GFS2_SB(inode);
 637	struct address_space *mapping = inode->i_mapping;
 638	unsigned long index = loc >> PAGE_CACHE_SHIFT;
 639	unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
 640	unsigned blocksize, iblock, pos;
 641	struct buffer_head *bh, *dibh;
 642	struct page *page;
 643	void *kaddr, *ptr;
 644	struct gfs2_quota q, *qp;
 645	int err, nbytes;
 646	u64 size;
 647
 648	if (gfs2_is_stuffed(ip))
 649		gfs2_unstuff_dinode(ip, NULL);
 
 
 
 650
 651	memset(&q, 0, sizeof(struct gfs2_quota));
 652	err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
 653	if (err < 0)
 654		return err;
 655
 656	err = -EIO;
 657	qp = &q;
 658	qp->qu_value = be64_to_cpu(qp->qu_value);
 659	qp->qu_value += change;
 660	qp->qu_value = cpu_to_be64(qp->qu_value);
 661	qd->qd_qb.qb_value = qp->qu_value;
 662	if (fdq) {
 663		if (fdq->d_fieldmask & FS_DQ_BSOFT) {
 664			qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
 665			qd->qd_qb.qb_warn = qp->qu_warn;
 666		}
 667		if (fdq->d_fieldmask & FS_DQ_BHARD) {
 668			qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
 669			qd->qd_qb.qb_limit = qp->qu_limit;
 670		}
 671		if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
 672			qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
 673			qd->qd_qb.qb_value = qp->qu_value;
 674		}
 675	}
 
 676
 677	/* Write the quota into the quota file on disk */
 678	ptr = qp;
 679	nbytes = sizeof(struct gfs2_quota);
 680get_a_page:
 681	page = grab_cache_page(mapping, index);
 682	if (!page)
 683		return -ENOMEM;
 684
 685	blocksize = inode->i_sb->s_blocksize;
 686	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 687
 688	if (!page_has_buffers(page))
 689		create_empty_buffers(page, blocksize, 0);
 690
 691	bh = page_buffers(page);
 692	pos = blocksize;
 693	while (offset >= pos) {
 694		bh = bh->b_this_page;
 695		iblock++;
 696		pos += blocksize;
 697	}
 698
 699	if (!buffer_mapped(bh)) {
 700		gfs2_block_map(inode, iblock, bh, 1);
 701		if (!buffer_mapped(bh))
 702			goto unlock_out;
 703		/* If it's a newly allocated disk block for quota, zero it */
 704		if (buffer_new(bh))
 705			zero_user(page, pos - blocksize, bh->b_size);
 706	}
 707
 708	if (PageUptodate(page))
 709		set_buffer_uptodate(bh);
 710
 711	if (!buffer_uptodate(bh)) {
 712		ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
 713		wait_on_buffer(bh);
 714		if (!buffer_uptodate(bh))
 715			goto unlock_out;
 716	}
 717
 718	gfs2_trans_add_bh(ip->i_gl, bh, 0);
 719
 720	kaddr = kmap_atomic(page, KM_USER0);
 721	if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
 722		nbytes = PAGE_CACHE_SIZE - offset;
 723	memcpy(kaddr + offset, ptr, nbytes);
 724	flush_dcache_page(page);
 725	kunmap_atomic(kaddr, KM_USER0);
 726	unlock_page(page);
 727	page_cache_release(page);
 728
 729	/* If quota straddles page boundary, we need to update the rest of the
 730	 * quota at the beginning of the next page */
 731	if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
 732		ptr = ptr + nbytes;
 733		nbytes = sizeof(struct gfs2_quota) - nbytes;
 734		offset = 0;
 735		index++;
 736		goto get_a_page;
 737	}
 738
 739	/* Update the disk inode timestamp and size (if extended) */
 740	err = gfs2_meta_inode_buffer(ip, &dibh);
 741	if (err)
 742		goto out;
 743
 744	size = loc + sizeof(struct gfs2_quota);
 745	if (size > inode->i_size)
 746		i_size_write(inode, size);
 747	inode->i_mtime = inode->i_atime = CURRENT_TIME;
 748	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 749	gfs2_dinode_out(ip, dibh->b_data);
 750	brelse(dibh);
 751	mark_inode_dirty(inode);
 752
 753out:
 754	return err;
 755unlock_out:
 756	unlock_page(page);
 757	page_cache_release(page);
 758	return err;
 759}
 760
 761static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 
 762{
 763	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
 764	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 
 765	unsigned int data_blocks, ind_blocks;
 766	struct gfs2_holder *ghs, i_gh;
 767	unsigned int qx, x;
 768	struct gfs2_quota_data *qd;
 
 769	loff_t offset;
 770	unsigned int nalloc = 0, blocks;
 771	struct gfs2_alloc *al = NULL;
 772	int error;
 773
 774	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 775			      &data_blocks, &ind_blocks);
 776
 777	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 778	if (!ghs)
 779		return -ENOMEM;
 780
 781	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 782	mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
 783	for (qx = 0; qx < num_qd; qx++) {
 784		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 785					   GL_NOCACHE, &ghs[qx]);
 786		if (error)
 787			goto out;
 788	}
 789
 790	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 791	if (error)
 792		goto out;
 793
 794	for (x = 0; x < num_qd; x++) {
 795		offset = qd2offset(qda[x]);
 796		if (gfs2_write_alloc_required(ip, offset,
 797					      sizeof(struct gfs2_quota)))
 798			nalloc++;
 799	}
 800
 801	al = gfs2_alloc_get(ip);
 802	if (!al) {
 803		error = -ENOMEM;
 804		goto out_gunlock;
 805	}
 806	/* 
 807	 * 1 blk for unstuffing inode if stuffed. We add this extra
 808	 * block to the reservation unconditionally. If the inode
 809	 * doesn't need unstuffing, the block will be released to the 
 810	 * rgrp since it won't be allocated during the transaction
 811	 */
 812	al->al_requested = 1;
 813	/* +3 in the end for unstuffing block, inode size update block
 814	 * and another block in case quota straddles page boundary and 
 815	 * two blocks need to be updated instead of 1 */
 816	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 817
 818	if (nalloc)
 819		al->al_requested += nalloc * (data_blocks + ind_blocks);		
 820	error = gfs2_inplace_reserve(ip);
 821	if (error)
 822		goto out_alloc;
 823
 824	if (nalloc)
 825		blocks += gfs2_rg_blocks(al) + nalloc * ind_blocks + RES_STATFS;
 826
 827	error = gfs2_trans_begin(sdp, blocks, 0);
 828	if (error)
 829		goto out_ipres;
 830
 831	for (x = 0; x < num_qd; x++) {
 832		qd = qda[x];
 833		offset = qd2offset(qd);
 834		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 
 835		if (error)
 836			goto out_end_trans;
 837
 838		do_qc(qd, -qd->qd_change_sync);
 839		set_bit(QDF_REFRESH, &qd->qd_flags);
 840	}
 841
 842	error = 0;
 843
 844out_end_trans:
 845	gfs2_trans_end(sdp);
 846out_ipres:
 847	gfs2_inplace_release(ip);
 848out_alloc:
 849	gfs2_alloc_put(ip);
 850out_gunlock:
 851	gfs2_glock_dq_uninit(&i_gh);
 852out:
 853	while (qx--)
 854		gfs2_glock_dq_uninit(&ghs[qx]);
 855	mutex_unlock(&ip->i_inode.i_mutex);
 856	kfree(ghs);
 857	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
 
 
 
 
 
 
 
 
 
 
 858	return error;
 859}
 860
 861static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 862{
 863	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 864	struct gfs2_quota q;
 865	struct gfs2_quota_lvb *qlvb;
 866	loff_t pos;
 867	int error;
 868
 869	memset(&q, 0, sizeof(struct gfs2_quota));
 870	pos = qd2offset(qd);
 871	error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
 872	if (error < 0)
 873		return error;
 874
 875	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
 876	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 877	qlvb->__pad = 0;
 878	qlvb->qb_limit = q.qu_limit;
 879	qlvb->qb_warn = q.qu_warn;
 880	qlvb->qb_value = q.qu_value;
 
 881	qd->qd_qb = *qlvb;
 
 882
 883	return 0;
 884}
 885
 886static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
 887		    struct gfs2_holder *q_gh)
 888{
 889	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 890	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 891	struct gfs2_holder i_gh;
 892	int error;
 893
 
 894restart:
 895	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
 896	if (error)
 897		return error;
 898
 899	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
 
 
 
 
 
 900
 901	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
 902		gfs2_glock_dq_uninit(q_gh);
 903		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
 904					   GL_NOCACHE, q_gh);
 905		if (error)
 906			return error;
 907
 908		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
 909		if (error)
 910			goto fail;
 911
 912		error = update_qd(sdp, qd);
 913		if (error)
 914			goto fail_gunlock;
 915
 916		gfs2_glock_dq_uninit(&i_gh);
 917		gfs2_glock_dq_uninit(q_gh);
 918		force_refresh = 0;
 919		goto restart;
 920	}
 921
 922	return 0;
 923
 924fail_gunlock:
 925	gfs2_glock_dq_uninit(&i_gh);
 926fail:
 927	gfs2_glock_dq_uninit(q_gh);
 928	return error;
 929}
 930
 931int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
 932{
 933	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 934	struct gfs2_alloc *al = ip->i_alloc;
 935	struct gfs2_quota_data *qd;
 936	unsigned int x;
 937	int error = 0;
 938
 939	gfs2_quota_hold(ip, uid, gid);
 940
 941	if (capable(CAP_SYS_RESOURCE) ||
 942	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
 943		return 0;
 944
 945	sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
 946	     sort_qd, NULL);
 
 
 
 
 947
 948	for (x = 0; x < al->al_qd_num; x++) {
 949		int force = NO_FORCE;
 950		qd = al->al_qd[x];
 951		if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
 952			force = FORCE;
 953		error = do_glock(qd, force, &al->al_qd_ghs[x]);
 954		if (error)
 955			break;
 956	}
 957
 958	if (!error)
 959		set_bit(GIF_QD_LOCKED, &ip->i_flags);
 960	else {
 961		while (x--)
 962			gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
 963		gfs2_quota_unhold(ip);
 964	}
 965
 966	return error;
 967}
 968
 969static int need_sync(struct gfs2_quota_data *qd)
 970{
 971	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 972	struct gfs2_tune *gt = &sdp->sd_tune;
 973	s64 value;
 974	unsigned int num, den;
 975	int do_sync = 1;
 976
 
 977	if (!qd->qd_qb.qb_limit)
 978		return 0;
 979
 980	spin_lock(&qd_lru_lock);
 981	value = qd->qd_change;
 982	spin_unlock(&qd_lru_lock);
 
 
 
 
 983
 984	spin_lock(&gt->gt_spin);
 985	num = gt->gt_quota_scale_num;
 986	den = gt->gt_quota_scale_den;
 987	spin_unlock(&gt->gt_spin);
 988
 989	if (value < 0)
 990		do_sync = 0;
 991	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
 992		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
 993		do_sync = 0;
 994	else {
 995		value *= gfs2_jindex_size(sdp) * num;
 996		value = div_s64(value, den);
 997		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
 998		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
 999			do_sync = 0;
1000	}
1001
1002	return do_sync;
 
 
 
1003}
1004
1005void gfs2_quota_unlock(struct gfs2_inode *ip)
1006{
1007	struct gfs2_alloc *al = ip->i_alloc;
1008	struct gfs2_quota_data *qda[4];
1009	unsigned int count = 0;
1010	unsigned int x;
1011
1012	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1013		goto out;
1014
1015	for (x = 0; x < al->al_qd_num; x++) {
1016		struct gfs2_quota_data *qd;
1017		int sync;
 
1018
1019		qd = al->al_qd[x];
1020		sync = need_sync(qd);
1021
1022		gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1023
1024		if (sync && qd_trylock(qd))
1025			qda[count++] = qd;
1026	}
1027
1028	if (count) {
1029		do_sync(count, qda);
 
 
1030		for (x = 0; x < count; x++)
1031			qd_unlock(qda[x]);
1032	}
1033
1034out:
1035	gfs2_quota_unhold(ip);
1036}
1037
1038#define MAX_LINE 256
1039
1040static int print_message(struct gfs2_quota_data *qd, char *type)
1041{
1042	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
1043
1044	printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
1045	       sdp->sd_fsname, type,
1046	       (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1047	       qd->qd_id);
1048
1049	return 0;
 
 
 
 
 
1050}
1051
1052int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053{
1054	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1055	struct gfs2_alloc *al = ip->i_alloc;
1056	struct gfs2_quota_data *qd;
1057	s64 value;
1058	unsigned int x;
1059	int error = 0;
1060
 
1061	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1062		return 0;
1063
1064        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1065                return 0;
1066
1067	for (x = 0; x < al->al_qd_num; x++) {
1068		qd = al->al_qd[x];
1069
1070		if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1071		      (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
1072			continue;
1073
 
 
 
1074		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1075		spin_lock(&qd_lru_lock);
1076		value += qd->qd_change;
1077		spin_unlock(&qd_lru_lock);
1078
1079		if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1080			print_message(qd, "exceeded");
1081			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1082					   USRQUOTA : GRPQUOTA, qd->qd_id,
1083					   sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1084
1085			error = -EDQUOT;
1086			break;
1087		} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1088			   (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
 
 
 
 
 
 
 
 
1089			   time_after_eq(jiffies, qd->qd_last_warn +
1090					 gfs2_tune_get(sdp,
1091						gt_quota_warn_period) * HZ)) {
1092			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1093					   USRQUOTA : GRPQUOTA, qd->qd_id,
1094					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1095			error = print_message(qd, "warning");
 
1096			qd->qd_last_warn = jiffies;
1097		}
1098	}
1099
1100	return error;
1101}
1102
1103void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1104		       u32 uid, u32 gid)
1105{
1106	struct gfs2_alloc *al = ip->i_alloc;
1107	struct gfs2_quota_data *qd;
1108	unsigned int x;
 
1109
1110	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
 
1111		return;
1112	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1113		return;
1114
1115	for (x = 0; x < al->al_qd_num; x++) {
1116		qd = al->al_qd[x];
 
 
 
1117
1118		if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1119		    (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1120			do_qc(qd, change);
1121		}
1122	}
1123}
1124
1125int gfs2_quota_sync(struct super_block *sb, int type, int wait)
1126{
1127	struct gfs2_sbd *sdp = sb->s_fs_info;
1128	struct gfs2_quota_data **qda;
1129	unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1130	unsigned int num_qd;
1131	unsigned int x;
1132	int error = 0;
1133
1134	sdp->sd_quota_sync_gen++;
 
1135
1136	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1137	if (!qda)
1138		return -ENOMEM;
1139
1140	do {
1141		num_qd = 0;
1142
1143		for (;;) {
1144			error = qd_fish(sdp, qda + num_qd);
1145			if (error || !qda[num_qd])
1146				break;
1147			if (++num_qd == max_qd)
1148				break;
 
 
 
 
 
 
1149		}
 
1150
1151		if (num_qd) {
1152			if (!error)
1153				error = do_sync(num_qd, qda);
 
 
1154			if (!error)
1155				for (x = 0; x < num_qd; x++)
1156					qda[x]->qd_sync_gen =
1157						sdp->sd_quota_sync_gen;
 
 
 
1158
1159			for (x = 0; x < num_qd; x++)
1160				qd_unlock(qda[x]);
 
1161		}
1162	} while (!error && num_qd == max_qd);
1163
 
 
 
 
 
1164	kfree(qda);
1165
1166	return error;
1167}
1168
1169static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
1170{
1171	return gfs2_quota_sync(sb, type, 0);
1172}
1173
1174int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1175{
1176	struct gfs2_quota_data *qd;
1177	struct gfs2_holder q_gh;
1178	int error;
1179
1180	error = qd_get(sdp, user, id, &qd);
1181	if (error)
1182		return error;
1183
1184	error = do_glock(qd, FORCE, &q_gh);
1185	if (!error)
1186		gfs2_glock_dq_uninit(&q_gh);
1187
1188	qd_put(qd);
1189	return error;
1190}
1191
1192static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1193{
1194	const struct gfs2_quota_change *str = buf;
1195
1196	qc->qc_change = be64_to_cpu(str->qc_change);
1197	qc->qc_flags = be32_to_cpu(str->qc_flags);
1198	qc->qc_id = be32_to_cpu(str->qc_id);
1199}
1200
1201int gfs2_quota_init(struct gfs2_sbd *sdp)
1202{
1203	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1204	u64 size = i_size_read(sdp->sd_qc_inode);
1205	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1206	unsigned int x, slot = 0;
1207	unsigned int found = 0;
 
 
 
1208	u64 dblock;
1209	u32 extlen = 0;
1210	int error;
1211
1212	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1213		return -EIO;
1214
1215	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1216	sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1217
1218	error = -ENOMEM;
1219
1220	sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1221				       sizeof(unsigned char *), GFP_NOFS);
 
1222	if (!sdp->sd_quota_bitmap)
1223		return error;
1224
1225	for (x = 0; x < sdp->sd_quota_chunks; x++) {
1226		sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1227		if (!sdp->sd_quota_bitmap[x])
1228			goto fail;
1229	}
1230
1231	for (x = 0; x < blocks; x++) {
1232		struct buffer_head *bh;
1233		unsigned int y;
1234
1235		if (!extlen) {
1236			int new = 0;
1237			error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1238			if (error)
1239				goto fail;
1240		}
1241		error = -EIO;
1242		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1243		if (!bh)
1244			goto fail;
1245		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1246			brelse(bh);
1247			goto fail;
1248		}
1249
 
1250		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1251		     y++, slot++) {
1252			struct gfs2_quota_change_host qc;
1253			struct gfs2_quota_data *qd;
1254
1255			gfs2_quota_change_in(&qc, bh->b_data +
1256					  sizeof(struct gfs2_meta_header) +
1257					  y * sizeof(struct gfs2_quota_change));
1258			if (!qc.qc_change)
 
 
1259				continue;
1260
1261			error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1262					 qc.qc_id, &qd);
1263			if (error) {
1264				brelse(bh);
1265				goto fail;
1266			}
1267
1268			set_bit(QDF_CHANGE, &qd->qd_flags);
1269			qd->qd_change = qc.qc_change;
1270			qd->qd_slot = slot;
1271			qd->qd_slot_count = 1;
1272
1273			spin_lock(&qd_lru_lock);
1274			gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1275			list_add(&qd->qd_list, &sdp->sd_quota_list);
1276			atomic_inc(&sdp->sd_quota_count);
1277			spin_unlock(&qd_lru_lock);
 
 
1278
1279			found++;
1280		}
1281
 
 
1282		brelse(bh);
1283		dblock++;
1284		extlen--;
1285	}
1286
1287	if (found)
1288		fs_info(sdp, "found %u quota changes\n", found);
1289
1290	return 0;
1291
 
 
 
 
1292fail:
1293	gfs2_quota_cleanup(sdp);
1294	return error;
1295}
1296
1297void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1298{
1299	struct list_head *head = &sdp->sd_quota_list;
1300	struct gfs2_quota_data *qd;
1301	unsigned int x;
 
1302
1303	spin_lock(&qd_lru_lock);
1304	while (!list_empty(head)) {
1305		qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1306
1307		if (atomic_read(&qd->qd_count) > 1 ||
1308		    (atomic_read(&qd->qd_count) &&
1309		     !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1310			list_move(&qd->qd_list, head);
1311			spin_unlock(&qd_lru_lock);
1312			schedule();
1313			spin_lock(&qd_lru_lock);
1314			continue;
1315		}
1316
1317		list_del(&qd->qd_list);
1318		/* Also remove if this qd exists in the reclaim list */
1319		if (!list_empty(&qd->qd_reclaim)) {
1320			list_del_init(&qd->qd_reclaim);
1321			atomic_dec(&qd_lru_count);
 
1322		}
1323		atomic_dec(&sdp->sd_quota_count);
1324		spin_unlock(&qd_lru_lock);
1325
1326		if (!atomic_read(&qd->qd_count)) {
1327			gfs2_assert_warn(sdp, !qd->qd_change);
1328			gfs2_assert_warn(sdp, !qd->qd_slot_count);
1329		} else
1330			gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1331		gfs2_assert_warn(sdp, !qd->qd_bh_count);
1332
1333		gfs2_glock_put(qd->qd_gl);
1334		kmem_cache_free(gfs2_quotad_cachep, qd);
1335
1336		spin_lock(&qd_lru_lock);
1337	}
1338	spin_unlock(&qd_lru_lock);
1339
1340	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
 
1341
1342	if (sdp->sd_quota_bitmap) {
1343		for (x = 0; x < sdp->sd_quota_chunks; x++)
1344			kfree(sdp->sd_quota_bitmap[x]);
1345		kfree(sdp->sd_quota_bitmap);
1346	}
1347}
1348
1349static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1350{
1351	if (error == 0 || error == -EROFS)
1352		return;
1353	if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1354		fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
 
 
 
1355}
1356
1357static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1358			       int (*fxn)(struct super_block *sb, int type),
1359			       unsigned long t, unsigned long *timeo,
1360			       unsigned int *new_timeo)
1361{
1362	if (t >= *timeo) {
1363		int error = fxn(sdp->sd_vfs, 0);
1364		quotad_error(sdp, msg, error);
1365		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1366	} else {
1367		*timeo -= t;
1368	}
1369}
1370
1371static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1372{
1373	struct gfs2_inode *ip;
1374
1375	while(1) {
1376		ip = NULL;
1377		spin_lock(&sdp->sd_trunc_lock);
1378		if (!list_empty(&sdp->sd_trunc_list)) {
1379			ip = list_entry(sdp->sd_trunc_list.next,
1380					struct gfs2_inode, i_trunc_list);
1381			list_del_init(&ip->i_trunc_list);
1382		}
1383		spin_unlock(&sdp->sd_trunc_lock);
1384		if (ip == NULL)
1385			return;
1386		gfs2_glock_finish_truncate(ip);
1387	}
1388}
1389
1390void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1391	if (!sdp->sd_statfs_force_sync) {
1392		sdp->sd_statfs_force_sync = 1;
1393		wake_up(&sdp->sd_quota_wait);
1394	}
1395}
1396
1397
1398/**
1399 * gfs2_quotad - Write cached quota changes into the quota file
1400 * @sdp: Pointer to GFS2 superblock
1401 *
1402 */
1403
1404int gfs2_quotad(void *data)
1405{
1406	struct gfs2_sbd *sdp = data;
1407	struct gfs2_tune *tune = &sdp->sd_tune;
1408	unsigned long statfs_timeo = 0;
1409	unsigned long quotad_timeo = 0;
1410	unsigned long t = 0;
1411	DEFINE_WAIT(wait);
1412	int empty;
1413
 
1414	while (!kthread_should_stop()) {
 
 
1415
1416		/* Update the master statfs file */
1417		if (sdp->sd_statfs_force_sync) {
1418			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1419			quotad_error(sdp, "statfs", error);
1420			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1421		}
1422		else
1423			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1424				   	   &statfs_timeo,
1425					   &tune->gt_statfs_quantum);
1426
1427		/* Update quota file */
1428		quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
1429				   &quotad_timeo, &tune->gt_quota_quantum);
1430
1431		/* Check for & recover partially truncated inodes */
1432		quotad_check_trunc_list(sdp);
1433
1434		if (freezing(current))
1435			refrigerator();
1436		t = min(quotad_timeo, statfs_timeo);
1437
1438		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1439		spin_lock(&sdp->sd_trunc_lock);
1440		empty = list_empty(&sdp->sd_trunc_list);
1441		spin_unlock(&sdp->sd_trunc_lock);
1442		if (empty && !sdp->sd_statfs_force_sync)
1443			t -= schedule_timeout(t);
1444		else
1445			t = 0;
1446		finish_wait(&sdp->sd_quota_wait, &wait);
1447	}
1448
1449	return 0;
1450}
1451
1452static int gfs2_quota_get_xstate(struct super_block *sb,
1453				 struct fs_quota_stat *fqs)
1454{
1455	struct gfs2_sbd *sdp = sb->s_fs_info;
1456
1457	memset(fqs, 0, sizeof(struct fs_quota_stat));
1458	fqs->qs_version = FS_QSTAT_VERSION;
1459
1460	switch (sdp->sd_args.ar_quota) {
 
 
1461	case GFS2_QUOTA_ON:
1462		fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
1463		/*FALLTHRU*/
 
1464	case GFS2_QUOTA_ACCOUNT:
1465		fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
 
 
 
1466		break;
1467	case GFS2_QUOTA_OFF:
1468		break;
1469	}
1470
1471	if (sdp->sd_quota_inode) {
1472		fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1473		fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1474	}
1475	fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1476	fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1477	fqs->qs_incoredqs = atomic_read(&qd_lru_count);
 
1478	return 0;
1479}
1480
1481static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1482			  struct fs_disk_quota *fdq)
1483{
1484	struct gfs2_sbd *sdp = sb->s_fs_info;
1485	struct gfs2_quota_lvb *qlvb;
1486	struct gfs2_quota_data *qd;
1487	struct gfs2_holder q_gh;
1488	int error;
1489
1490	memset(fdq, 0, sizeof(struct fs_disk_quota));
1491
1492	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1493		return -ESRCH; /* Crazy XFS error code */
1494
1495	if (type == USRQUOTA)
1496		type = QUOTA_USER;
1497	else if (type == GRPQUOTA)
1498		type = QUOTA_GROUP;
1499	else
1500		return -EINVAL;
1501
1502	error = qd_get(sdp, type, id, &qd);
1503	if (error)
1504		return error;
1505	error = do_glock(qd, FORCE, &q_gh);
1506	if (error)
1507		goto out;
1508
1509	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1510	fdq->d_version = FS_DQUOT_VERSION;
1511	fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1512	fdq->d_id = id;
1513	fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1514	fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1515	fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1516
1517	gfs2_glock_dq_uninit(&q_gh);
1518out:
1519	qd_put(qd);
1520	return error;
1521}
1522
1523/* GFS2 only supports a subset of the XFS fields */
1524#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1525
1526static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1527			  struct fs_disk_quota *fdq)
1528{
1529	struct gfs2_sbd *sdp = sb->s_fs_info;
1530	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1531	struct gfs2_quota_data *qd;
1532	struct gfs2_holder q_gh, i_gh;
1533	unsigned int data_blocks, ind_blocks;
1534	unsigned int blocks = 0;
1535	int alloc_required;
1536	struct gfs2_alloc *al;
1537	loff_t offset;
1538	int error;
1539
1540	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1541		return -ESRCH; /* Crazy XFS error code */
1542
1543	switch(type) {
1544	case USRQUOTA:
1545		type = QUOTA_USER;
1546		if (fdq->d_flags != FS_USER_QUOTA)
1547			return -EINVAL;
1548		break;
1549	case GRPQUOTA:
1550		type = QUOTA_GROUP;
1551		if (fdq->d_flags != FS_GROUP_QUOTA)
1552			return -EINVAL;
1553		break;
1554	default:
1555		return -EINVAL;
1556	}
1557
1558	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1559		return -EINVAL;
1560	if (fdq->d_id != id)
1561		return -EINVAL;
1562
1563	error = qd_get(sdp, type, id, &qd);
1564	if (error)
1565		return error;
1566
1567	mutex_lock(&ip->i_inode.i_mutex);
1568	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1569	if (error)
1570		goto out_put;
 
 
 
 
 
1571	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1572	if (error)
1573		goto out_q;
1574
1575	/* Check for existing entry, if none then alloc new blocks */
1576	error = update_qd(sdp, qd);
1577	if (error)
1578		goto out_i;
1579
1580	/* If nothing has changed, this is a no-op */
1581	if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1582	    ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1583		fdq->d_fieldmask ^= FS_DQ_BSOFT;
1584
1585	if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1586	    ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1587		fdq->d_fieldmask ^= FS_DQ_BHARD;
1588
1589	if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1590	    ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1591		fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1592
1593	if (fdq->d_fieldmask == 0)
1594		goto out_i;
1595
1596	offset = qd2offset(qd);
1597	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1598	if (gfs2_is_stuffed(ip))
1599		alloc_required = 1;
1600	if (alloc_required) {
1601		al = gfs2_alloc_get(ip);
1602		if (al == NULL)
1603			goto out_i;
1604		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1605				       &data_blocks, &ind_blocks);
1606		blocks = al->al_requested = 1 + data_blocks + ind_blocks;
1607		error = gfs2_inplace_reserve(ip);
 
1608		if (error)
1609			goto out_alloc;
1610		blocks += gfs2_rg_blocks(al);
1611	}
1612
1613	/* Some quotas span block boundaries and can update two blocks,
1614	   adding an extra block to the transaction to handle such quotas */
1615	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1616	if (error)
1617		goto out_release;
1618
1619	/* Apply changes */
1620	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
 
 
1621
1622	gfs2_trans_end(sdp);
1623out_release:
1624	if (alloc_required) {
1625		gfs2_inplace_release(ip);
1626out_alloc:
1627		gfs2_alloc_put(ip);
1628	}
1629out_i:
1630	gfs2_glock_dq_uninit(&i_gh);
1631out_q:
1632	gfs2_glock_dq_uninit(&q_gh);
 
 
 
1633out_put:
1634	mutex_unlock(&ip->i_inode.i_mutex);
1635	qd_put(qd);
1636	return error;
1637}
1638
1639const struct quotactl_ops gfs2_quotactl_ops = {
1640	.quota_sync     = gfs2_quota_sync,
1641	.get_xstate     = gfs2_quota_get_xstate,
1642	.get_dqblk	= gfs2_get_dqblk,
1643	.set_dqblk	= gfs2_set_dqblk,
1644};
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7/*
   8 * Quota change tags are associated with each transaction that allocates or
   9 * deallocates space.  Those changes are accumulated locally to each node (in a
  10 * per-node file) and then are periodically synced to the quota file.  This
  11 * avoids the bottleneck of constantly touching the quota file, but introduces
  12 * fuzziness in the current usage value of IDs that are being used on different
  13 * nodes in the cluster simultaneously.  So, it is possible for a user on
  14 * multiple nodes to overrun their quota, but that overrun is controlable.
  15 * Since quota tags are part of transactions, there is no need for a quota check
  16 * program to be run on node crashes or anything like that.
  17 *
  18 * There are couple of knobs that let the administrator manage the quota
  19 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  20 * sitting on one node before being synced to the quota file.  (The default is
  21 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  22 * of quota file syncs increases as the user moves closer to their limit.  The
  23 * more frequent the syncs, the more accurate the quota enforcement, but that
  24 * means that there is more contention between the nodes for the quota file.
  25 * The default value is one.  This sets the maximum theoretical quota overrun
  26 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  27 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  28 * number greater than one makes quota syncs more frequent and reduces the
  29 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  30 * syncs less frequent.
  31 *
  32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  33 * the quota file, so it is not being constantly read.
  34 */
  35
  36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  37
  38#include <linux/sched.h>
  39#include <linux/slab.h>
  40#include <linux/mm.h>
  41#include <linux/spinlock.h>
  42#include <linux/completion.h>
  43#include <linux/buffer_head.h>
  44#include <linux/sort.h>
  45#include <linux/fs.h>
  46#include <linux/bio.h>
  47#include <linux/gfs2_ondisk.h>
  48#include <linux/kthread.h>
  49#include <linux/freezer.h>
  50#include <linux/quota.h>
  51#include <linux/dqblk_xfs.h>
  52#include <linux/lockref.h>
  53#include <linux/list_lru.h>
  54#include <linux/rcupdate.h>
  55#include <linux/rculist_bl.h>
  56#include <linux/bit_spinlock.h>
  57#include <linux/jhash.h>
  58#include <linux/vmalloc.h>
  59
  60#include "gfs2.h"
  61#include "incore.h"
  62#include "bmap.h"
  63#include "glock.h"
  64#include "glops.h"
  65#include "log.h"
  66#include "meta_io.h"
  67#include "quota.h"
  68#include "rgrp.h"
  69#include "super.h"
  70#include "trans.h"
  71#include "inode.h"
  72#include "util.h"
  73
  74#define GFS2_QD_HASH_SHIFT      12
  75#define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
  76#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
  77
  78/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  79/*                     -> sd_bitmap_lock                              */
  80static DEFINE_SPINLOCK(qd_lock);
  81struct list_lru gfs2_qd_lru;
 
  82
  83static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
 
 
  84
  85static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  86				 const struct kqid qid)
  87{
  88	unsigned int h;
 
 
  89
  90	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  91	h = jhash(&qid, sizeof(struct kqid), h);
  92
  93	return h & GFS2_QD_HASH_MASK;
  94}
  95
  96static inline void spin_lock_bucket(unsigned int hash)
  97{
  98        hlist_bl_lock(&qd_hash_table[hash]);
  99}
 100
 101static inline void spin_unlock_bucket(unsigned int hash)
 102{
 103        hlist_bl_unlock(&qd_hash_table[hash]);
 104}
 105
 106static void gfs2_qd_dealloc(struct rcu_head *rcu)
 107{
 108	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
 109	struct gfs2_sbd *sdp = qd->qd_sbd;
 110
 111	kmem_cache_free(gfs2_quotad_cachep, qd);
 112	if (atomic_dec_and_test(&sdp->sd_quota_count))
 113		wake_up(&sdp->sd_kill_wait);
 114}
 115
 116static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
 117{
 118	struct gfs2_sbd *sdp = qd->qd_sbd;
 119
 120	spin_lock(&qd_lock);
 121	list_del(&qd->qd_list);
 122	spin_unlock(&qd_lock);
 
 
 123
 124	spin_lock_bucket(qd->qd_hash);
 125	hlist_bl_del_rcu(&qd->qd_hlist);
 126	spin_unlock_bucket(qd->qd_hash);
 127
 128	if (!gfs2_withdrawing_or_withdrawn(sdp)) {
 129		gfs2_assert_warn(sdp, !qd->qd_change);
 130		gfs2_assert_warn(sdp, !qd->qd_slot_ref);
 131		gfs2_assert_warn(sdp, !qd->qd_bh_count);
 132	}
 133
 134	gfs2_glock_put(qd->qd_gl);
 135	call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 136}
 137
 138static void gfs2_qd_list_dispose(struct list_head *list)
 139{
 140	struct gfs2_quota_data *qd;
 141
 142	while (!list_empty(list)) {
 143		qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
 144		list_del(&qd->qd_lru);
 145
 146		gfs2_qd_dispose(qd);
 
 
 
 
 
 
 147	}
 148}
 149
 150
 151static enum lru_status gfs2_qd_isolate(struct list_head *item,
 152		struct list_lru_one *lru, void *arg)
 153{
 154	struct list_head *dispose = arg;
 155	struct gfs2_quota_data *qd =
 156		list_entry(item, struct gfs2_quota_data, qd_lru);
 157	enum lru_status status;
 158
 159	if (!spin_trylock(&qd->qd_lockref.lock))
 160		return LRU_SKIP;
 161
 162	status = LRU_SKIP;
 163	if (qd->qd_lockref.count == 0) {
 164		lockref_mark_dead(&qd->qd_lockref);
 165		list_lru_isolate_move(lru, &qd->qd_lru, dispose);
 166		status = LRU_REMOVED;
 167	}
 168
 169	spin_unlock(&qd->qd_lockref.lock);
 170	return status;
 171}
 172
 173static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
 174					 struct shrink_control *sc)
 175{
 176	LIST_HEAD(dispose);
 177	unsigned long freed;
 178
 179	if (!(sc->gfp_mask & __GFP_FS))
 180		return SHRINK_STOP;
 181
 182	freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
 183				     gfs2_qd_isolate, &dispose);
 184
 185	gfs2_qd_list_dispose(&dispose);
 186
 187	return freed;
 188}
 189
 190static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 191					  struct shrink_control *sc)
 192{
 193	return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
 194}
 195
 196static struct shrinker *gfs2_qd_shrinker;
 197
 198int __init gfs2_qd_shrinker_init(void)
 199{
 200	gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd");
 201	if (!gfs2_qd_shrinker)
 202		return -ENOMEM;
 203
 204	gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count;
 205	gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan;
 206
 207	shrinker_register(gfs2_qd_shrinker);
 208
 209	return 0;
 210}
 211
 212void gfs2_qd_shrinker_exit(void)
 213{
 214	shrinker_free(gfs2_qd_shrinker);
 215}
 216
 217static u64 qd2index(struct gfs2_quota_data *qd)
 218{
 219	struct kqid qid = qd->qd_id;
 220	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 221		((qid.type == USRQUOTA) ? 0 : 1);
 222}
 223
 224static u64 qd2offset(struct gfs2_quota_data *qd)
 225{
 226	return qd2index(qd) * sizeof(struct gfs2_quota);
 227}
 228
 229static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
 230{
 231	struct gfs2_quota_data *qd;
 232	int error;
 233
 234	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 235	if (!qd)
 236		return NULL;
 237
 238	qd->qd_sbd = sdp;
 239	qd->qd_lockref.count = 0;
 240	spin_lock_init(&qd->qd_lockref.lock);
 241	qd->qd_id = qid;
 242	qd->qd_slot = -1;
 243	INIT_LIST_HEAD(&qd->qd_lru);
 244	qd->qd_hash = hash;
 245
 246	error = gfs2_glock_get(sdp, qd2index(qd),
 247			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
 248	if (error)
 249		goto fail;
 250
 251	return qd;
 
 
 252
 253fail:
 254	kmem_cache_free(gfs2_quotad_cachep, qd);
 255	return NULL;
 256}
 257
 258static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
 259						     const struct gfs2_sbd *sdp,
 260						     struct kqid qid)
 261{
 262	struct gfs2_quota_data *qd;
 263	struct hlist_bl_node *h;
 
 
 264
 265	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
 266		if (!qid_eq(qd->qd_id, qid))
 267			continue;
 268		if (qd->qd_sbd != sdp)
 269			continue;
 270		if (lockref_get_not_dead(&qd->qd_lockref)) {
 271			list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
 272			return qd;
 
 
 
 
 
 
 
 
 273		}
 274	}
 275
 276	return NULL;
 277}
 278
 
 
 
 
 
 
 279
 280static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 281		  struct gfs2_quota_data **qdp)
 282{
 283	struct gfs2_quota_data *qd, *new_qd;
 284	unsigned int hash = gfs2_qd_hash(sdp, qid);
 285
 286	rcu_read_lock();
 287	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 288	rcu_read_unlock();
 
 
 
 
 
 289
 290	if (qd)
 291		return 0;
 292
 293	new_qd = qd_alloc(hash, sdp, qid);
 294	if (!new_qd)
 295		return -ENOMEM;
 296
 297	spin_lock(&qd_lock);
 298	spin_lock_bucket(hash);
 299	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 300	if (qd == NULL) {
 301		new_qd->qd_lockref.count++;
 302		*qdp = new_qd;
 303		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
 304		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
 305		atomic_inc(&sdp->sd_quota_count);
 306	}
 307	spin_unlock_bucket(hash);
 308	spin_unlock(&qd_lock);
 309
 310	if (qd) {
 311		gfs2_glock_put(new_qd->qd_gl);
 312		kmem_cache_free(gfs2_quotad_cachep, new_qd);
 313	}
 314
 315	return 0;
 316}
 317
 318
 319static void __qd_hold(struct gfs2_quota_data *qd)
 320{
 321	struct gfs2_sbd *sdp = qd->qd_sbd;
 322	gfs2_assert(sdp, qd->qd_lockref.count > 0);
 323	qd->qd_lockref.count++;
 324}
 325
 326static void qd_put(struct gfs2_quota_data *qd)
 327{
 328	struct gfs2_sbd *sdp;
 
 
 
 
 
 
 329
 330	if (lockref_put_or_lock(&qd->qd_lockref))
 331		return;
 
 
 
 332
 333	BUG_ON(__lockref_is_dead(&qd->qd_lockref));
 334	sdp = qd->qd_sbd;
 335	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
 336		lockref_mark_dead(&qd->qd_lockref);
 337		spin_unlock(&qd->qd_lockref.lock);
 338
 339		gfs2_qd_dispose(qd);
 340		return;
 
 341	}
 342
 343	qd->qd_lockref.count = 0;
 344	list_lru_add_obj(&gfs2_qd_lru, &qd->qd_lru);
 345	spin_unlock(&qd->qd_lockref.lock);
 346}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347
 348static int slot_get(struct gfs2_quota_data *qd)
 349{
 350	struct gfs2_sbd *sdp = qd->qd_sbd;
 351	unsigned int bit;
 352	int error = 0;
 353
 354	spin_lock(&sdp->sd_bitmap_lock);
 355	if (qd->qd_slot_ref == 0) {
 356		bit = find_first_zero_bit(sdp->sd_quota_bitmap,
 357					  sdp->sd_quota_slots);
 358		if (bit >= sdp->sd_quota_slots) {
 359			error = -ENOSPC;
 360			goto out;
 361		}
 362		set_bit(bit, sdp->sd_quota_bitmap);
 363		qd->qd_slot = bit;
 364	}
 365	qd->qd_slot_ref++;
 366out:
 367	spin_unlock(&sdp->sd_bitmap_lock);
 368	return error;
 369}
 370
 371static void slot_hold(struct gfs2_quota_data *qd)
 372{
 373	struct gfs2_sbd *sdp = qd->qd_sbd;
 374
 375	spin_lock(&sdp->sd_bitmap_lock);
 376	gfs2_assert(sdp, qd->qd_slot_ref);
 377	qd->qd_slot_ref++;
 378	spin_unlock(&sdp->sd_bitmap_lock);
 379}
 380
 381static void slot_put(struct gfs2_quota_data *qd)
 382{
 383	struct gfs2_sbd *sdp = qd->qd_sbd;
 384
 385	spin_lock(&sdp->sd_bitmap_lock);
 386	gfs2_assert(sdp, qd->qd_slot_ref);
 387	if (!--qd->qd_slot_ref) {
 388		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
 389		qd->qd_slot = -1;
 390	}
 391	spin_unlock(&sdp->sd_bitmap_lock);
 392}
 393
 394static int bh_get(struct gfs2_quota_data *qd)
 395{
 396	struct gfs2_sbd *sdp = qd->qd_sbd;
 397	struct inode *inode = sdp->sd_qc_inode;
 398	struct gfs2_inode *ip = GFS2_I(inode);
 399	unsigned int block, offset;
 400	struct buffer_head *bh = NULL;
 401	struct iomap iomap = { };
 402	int error;
 
 
 
 403
 404	spin_lock(&qd->qd_lockref.lock);
 405	if (qd->qd_bh_count) {
 406		qd->qd_bh_count++;
 407		spin_unlock(&qd->qd_lockref.lock);
 408		return 0;
 409	}
 410	spin_unlock(&qd->qd_lockref.lock);
 411
 412	block = qd->qd_slot / sdp->sd_qc_per_block;
 413	offset = qd->qd_slot % sdp->sd_qc_per_block;
 414
 415	error = gfs2_iomap_get(inode,
 416			       (loff_t)block << inode->i_blkbits,
 417			       i_blocksize(inode), &iomap);
 418	if (error)
 419		return error;
 420	error = -ENOENT;
 421	if (iomap.type != IOMAP_MAPPED)
 422		return error;
 423
 424	error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
 425			       DIO_WAIT, 0, &bh);
 426	if (error)
 427		return error;
 428	error = -EIO;
 429	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 430		goto out;
 
 
 
 
 
 
 
 431
 432	spin_lock(&qd->qd_lockref.lock);
 433	if (qd->qd_bh == NULL) {
 434		qd->qd_bh = bh;
 435		qd->qd_bh_qc = (struct gfs2_quota_change *)
 436			(bh->b_data + sizeof(struct gfs2_meta_header) +
 437			 offset * sizeof(struct gfs2_quota_change));
 438		bh = NULL;
 439	}
 440	qd->qd_bh_count++;
 441	spin_unlock(&qd->qd_lockref.lock);
 442	error = 0;
 443
 444out:
 445	brelse(bh);
 
 
 
 446	return error;
 447}
 448
 449static void bh_put(struct gfs2_quota_data *qd)
 450{
 451	struct gfs2_sbd *sdp = qd->qd_sbd;
 452	struct buffer_head *bh = NULL;
 453
 454	spin_lock(&qd->qd_lockref.lock);
 455	gfs2_assert(sdp, qd->qd_bh_count);
 456	if (!--qd->qd_bh_count) {
 457		bh = qd->qd_bh;
 458		qd->qd_bh = NULL;
 459		qd->qd_bh_qc = NULL;
 460	}
 461	spin_unlock(&qd->qd_lockref.lock);
 462	brelse(bh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463}
 464
 465static bool qd_grab_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 466			 u64 sync_gen)
 467{
 468	bool ret = false;
 
 
 
 
 
 469
 470	spin_lock(&qd->qd_lockref.lock);
 471	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 472	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 473	    qd->qd_sync_gen >= sync_gen)
 474		goto out;
 
 475
 476	if (__lockref_is_dead(&qd->qd_lockref))
 477		goto out;
 478	qd->qd_lockref.count++;
 479
 480	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 481	set_bit(QDF_LOCKED, &qd->qd_flags);
 
 
 482	qd->qd_change_sync = qd->qd_change;
 483	slot_hold(qd);
 484	ret = true;
 485
 486out:
 487	spin_unlock(&qd->qd_lockref.lock);
 488	return ret;
 
 
 
 
 
 
 
 
 489}
 490
 491static void qd_ungrab_sync(struct gfs2_quota_data *qd)
 492{
 
 
 493	clear_bit(QDF_LOCKED, &qd->qd_flags);
 494	slot_put(qd);
 495	qd_put(qd);
 496}
 497
 498static void qdsb_put(struct gfs2_quota_data *qd)
 499{
 500	bh_put(qd);
 501	slot_put(qd);
 502	qd_put(qd);
 503}
 504
 505static void qd_unlock(struct gfs2_quota_data *qd)
 506{
 507	spin_lock(&qd->qd_lockref.lock);
 508	gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
 509	clear_bit(QDF_LOCKED, &qd->qd_flags);
 510	spin_unlock(&qd->qd_lockref.lock);
 511	qdsb_put(qd);
 512}
 513
 514static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 515		    struct gfs2_quota_data **qdp)
 516{
 517	int error;
 518
 519	error = qd_get(sdp, qid, qdp);
 520	if (error)
 521		return error;
 522
 523	error = slot_get(*qdp);
 524	if (error)
 525		goto fail;
 526
 527	error = bh_get(*qdp);
 528	if (error)
 529		goto fail_slot;
 530
 531	return 0;
 532
 533fail_slot:
 534	slot_put(*qdp);
 535fail:
 536	qd_put(*qdp);
 537	return error;
 538}
 539
 540/**
 541 * gfs2_qa_get - make sure we have a quota allocations data structure,
 542 *               if necessary
 543 * @ip: the inode for this reservation
 544 */
 545int gfs2_qa_get(struct gfs2_inode *ip)
 546{
 547	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 548	struct inode *inode = &ip->i_inode;
 549
 550	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 551		return 0;
 552
 553	spin_lock(&inode->i_lock);
 554	if (ip->i_qadata == NULL) {
 555		struct gfs2_qadata *tmp;
 556
 557		spin_unlock(&inode->i_lock);
 558		tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
 559		if (!tmp)
 560			return -ENOMEM;
 561
 562		spin_lock(&inode->i_lock);
 563		if (ip->i_qadata == NULL)
 564			ip->i_qadata = tmp;
 565		else
 566			kmem_cache_free(gfs2_qadata_cachep, tmp);
 567	}
 568	ip->i_qadata->qa_ref++;
 569	spin_unlock(&inode->i_lock);
 570	return 0;
 571}
 572
 573void gfs2_qa_put(struct gfs2_inode *ip)
 574{
 575	struct inode *inode = &ip->i_inode;
 576
 577	spin_lock(&inode->i_lock);
 578	if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
 579		kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
 580		ip->i_qadata = NULL;
 581	}
 582	spin_unlock(&inode->i_lock);
 583}
 584
 585int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 586{
 587	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 588	struct gfs2_quota_data **qd;
 
 589	int error;
 590
 
 
 
 
 591	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 592		return 0;
 593
 594	error = gfs2_qa_get(ip);
 595	if (error)
 596		return error;
 597
 598	qd = ip->i_qadata->qa_qd;
 599
 600	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
 601	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
 602		error = -EIO;
 603		gfs2_qa_put(ip);
 604		goto out;
 605	}
 606
 607	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 608	if (error)
 609		goto out_unhold;
 610	ip->i_qadata->qa_qd_num++;
 611	qd++;
 612
 613	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 614	if (error)
 615		goto out_unhold;
 616	ip->i_qadata->qa_qd_num++;
 617	qd++;
 618
 619	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 620	    !uid_eq(uid, ip->i_inode.i_uid)) {
 621		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 622		if (error)
 623			goto out_unhold;
 624		ip->i_qadata->qa_qd_num++;
 625		qd++;
 626	}
 627
 628	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 629	    !gid_eq(gid, ip->i_inode.i_gid)) {
 630		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 631		if (error)
 632			goto out_unhold;
 633		ip->i_qadata->qa_qd_num++;
 634		qd++;
 635	}
 636
 637out_unhold:
 638	if (error)
 639		gfs2_quota_unhold(ip);
 640out:
 641	return error;
 642}
 643
 644void gfs2_quota_unhold(struct gfs2_inode *ip)
 645{
 646	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 647	u32 x;
 648
 649	if (ip->i_qadata == NULL)
 650		return;
 651
 652	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 653
 654	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
 655		qdsb_put(ip->i_qadata->qa_qd[x]);
 656		ip->i_qadata->qa_qd[x] = NULL;
 657	}
 658	ip->i_qadata->qa_qd_num = 0;
 659	gfs2_qa_put(ip);
 660}
 661
 662static int sort_qd(const void *a, const void *b)
 663{
 664	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 665	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 666
 667	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 
 
 
 
 
 
 
 668		return -1;
 669	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 670		return 1;
 
 671	return 0;
 672}
 673
 674static void do_qc(struct gfs2_quota_data *qd, s64 change)
 675{
 676	struct gfs2_sbd *sdp = qd->qd_sbd;
 677	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 678	struct gfs2_quota_change *qc = qd->qd_bh_qc;
 679	bool needs_put = false;
 680	s64 x;
 681
 682	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 
 683
 684	/*
 685	 * The QDF_CHANGE flag indicates that the slot in the quota change file
 686	 * is used.  Here, we use the value of qc->qc_change when the slot is
 687	 * used, and we assume a value of 0 otherwise.
 688	 */
 
 
 689
 690	spin_lock(&qd->qd_lockref.lock);
 
 691
 692	x = 0;
 693	if (test_bit(QDF_CHANGE, &qd->qd_flags))
 694		x = be64_to_cpu(qc->qc_change);
 695	x += change;
 696	qd->qd_change += change;
 697
 698	if (!x && test_bit(QDF_CHANGE, &qd->qd_flags)) {
 699		/* The slot in the quota change file becomes unused. */
 700		clear_bit(QDF_CHANGE, &qd->qd_flags);
 701		qc->qc_flags = 0;
 702		qc->qc_id = 0;
 703		needs_put = true;
 704	} else if (x && !test_bit(QDF_CHANGE, &qd->qd_flags)) {
 705		/* The slot in the quota change file becomes used. */
 706		set_bit(QDF_CHANGE, &qd->qd_flags);
 707		__qd_hold(qd);
 708		slot_hold(qd);
 709
 710		qc->qc_flags = 0;
 711		if (qd->qd_id.type == USRQUOTA)
 712			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 713		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 714	}
 715	qc->qc_change = cpu_to_be64(x);
 716
 717	spin_unlock(&qd->qd_lockref.lock);
 718
 719	if (needs_put) {
 720		slot_put(qd);
 721		qd_put(qd);
 
 
 
 722	}
 723	if (change < 0) /* Reset quiet flag if we freed some blocks */
 724		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
 725}
 726
 727static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
 728				  unsigned off, void *buf, unsigned bytes)
 729{
 730	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 731	struct inode *inode = &ip->i_inode;
 732	struct address_space *mapping = inode->i_mapping;
 733	struct folio *folio;
 734	struct buffer_head *bh;
 735	u64 blk;
 736	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
 737	unsigned to_write = bytes, pg_off = off;
 738
 739	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
 740	boff = off % bsize;
 741
 742	folio = filemap_grab_folio(mapping, index);
 743	if (IS_ERR(folio))
 744		return PTR_ERR(folio);
 745	bh = folio_buffers(folio);
 746	if (!bh)
 747		bh = create_empty_buffers(folio, bsize, 0);
 748
 749	for (;;) {
 750		/* Find the beginning block within the folio */
 751		if (pg_off >= ((bnum * bsize) + bsize)) {
 752			bh = bh->b_this_page;
 753			bnum++;
 754			blk++;
 755			continue;
 756		}
 757		if (!buffer_mapped(bh)) {
 758			gfs2_block_map(inode, blk, bh, 1);
 759			if (!buffer_mapped(bh))
 760				goto unlock_out;
 761			/* If it's a newly allocated disk block, zero it */
 762			if (buffer_new(bh))
 763				folio_zero_range(folio, bnum * bsize,
 764						bh->b_size);
 765		}
 766		if (folio_test_uptodate(folio))
 767			set_buffer_uptodate(bh);
 768		if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
 769			goto unlock_out;
 770		gfs2_trans_add_data(ip->i_gl, bh);
 771
 772		/* If we need to write to the next block as well */
 773		if (to_write > (bsize - boff)) {
 774			pg_off += (bsize - boff);
 775			to_write -= (bsize - boff);
 776			boff = pg_off % bsize;
 777			continue;
 778		}
 779		break;
 780	}
 781
 782	/* Write to the folio, now that we have setup the buffer(s) */
 783	memcpy_to_folio(folio, off, buf, bytes);
 784	flush_dcache_folio(folio);
 785	folio_unlock(folio);
 786	folio_put(folio);
 787
 788	return 0;
 789
 790unlock_out:
 791	folio_unlock(folio);
 792	folio_put(folio);
 793	return -EIO;
 794}
 795
 796static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp,
 797				 loff_t loc)
 798{
 799	unsigned long pg_beg;
 800	unsigned pg_off, nbytes, overflow = 0;
 801	int error;
 802	void *ptr;
 803
 804	nbytes = sizeof(struct gfs2_quota);
 805
 806	pg_beg = loc >> PAGE_SHIFT;
 807	pg_off = offset_in_page(loc);
 808
 809	/* If the quota straddles a page boundary, split the write in two */
 810	if ((pg_off + nbytes) > PAGE_SIZE)
 811		overflow = (pg_off + nbytes) - PAGE_SIZE;
 812
 813	ptr = qp;
 814	error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr,
 815				       nbytes - overflow);
 816	/* If there's an overflow, write the remaining bytes to the next page */
 817	if (!error && overflow)
 818		error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0,
 819					       ptr + nbytes - overflow,
 820					       overflow);
 821	return error;
 822}
 823
 824/**
 825 * gfs2_adjust_quota - adjust record of current block usage
 826 * @sdp: The superblock
 827 * @loc: Offset of the entry in the quota file
 828 * @change: The amount of usage change to record
 829 * @qd: The quota data
 830 * @fdq: The updated limits to record
 831 *
 832 * This function was mostly borrowed from gfs2_block_truncate_page which was
 833 * in turn mostly borrowed from ext3
 834 *
 835 * Returns: 0 or -ve on error
 836 */
 837
 838static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
 839			     s64 change, struct gfs2_quota_data *qd,
 840			     struct qc_dqblk *fdq)
 841{
 842	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 843	struct inode *inode = &ip->i_inode;
 844	struct gfs2_quota q;
 845	int err;
 
 
 
 
 
 
 
 
 846	u64 size;
 847
 848	if (gfs2_is_stuffed(ip)) {
 849		err = gfs2_unstuff_dinode(ip);
 850		if (err)
 851			return err;
 852	}
 853
 854	memset(&q, 0, sizeof(struct gfs2_quota));
 855	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 856	if (err < 0)
 857		return err;
 858
 859	loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
 860	be64_add_cpu(&q.qu_value, change);
 861	if (((s64)be64_to_cpu(q.qu_value)) < 0)
 862		q.qu_value = 0; /* Never go negative on quota usage */
 863	spin_lock(&qd->qd_lockref.lock);
 864	qd->qd_qb.qb_value = q.qu_value;
 865	if (fdq) {
 866		if (fdq->d_fieldmask & QC_SPC_SOFT) {
 867			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
 868			qd->qd_qb.qb_warn = q.qu_warn;
 869		}
 870		if (fdq->d_fieldmask & QC_SPC_HARD) {
 871			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
 872			qd->qd_qb.qb_limit = q.qu_limit;
 873		}
 874		if (fdq->d_fieldmask & QC_SPACE) {
 875			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
 876			qd->qd_qb.qb_value = q.qu_value;
 877		}
 878	}
 879	spin_unlock(&qd->qd_lockref.lock);
 880
 881	err = gfs2_write_disk_quota(sdp, &q, loc);
 882	if (!err) {
 883		size = loc + sizeof(struct gfs2_quota);
 884		if (size > inode->i_size)
 885			i_size_write(inode, size);
 886		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 887		mark_inode_dirty(inode);
 888		set_bit(QDF_REFRESH, &qd->qd_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 889	}
 890
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 891	return err;
 892}
 893
 894static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda,
 895		   u64 sync_gen)
 896{
 897	struct gfs2_sbd *sdp = (*qda)->qd_sbd;
 898	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 899	struct gfs2_alloc_parms ap = {};
 900	unsigned int data_blocks, ind_blocks;
 901	struct gfs2_holder *ghs, i_gh;
 902	unsigned int qx, x;
 903	struct gfs2_quota_data *qd;
 904	unsigned reserved;
 905	loff_t offset;
 906	unsigned int nalloc = 0, blocks;
 
 907	int error;
 908
 909	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 910			      &data_blocks, &ind_blocks);
 911
 912	ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 913	if (!ghs)
 914		return -ENOMEM;
 915
 916	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 917	inode_lock(&ip->i_inode);
 918	for (qx = 0; qx < num_qd; qx++) {
 919		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 920					   GL_NOCACHE, &ghs[qx]);
 921		if (error)
 922			goto out_dq;
 923	}
 924
 925	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 926	if (error)
 927		goto out_dq;
 928
 929	for (x = 0; x < num_qd; x++) {
 930		offset = qd2offset(qda[x]);
 931		if (gfs2_write_alloc_required(ip, offset,
 932					      sizeof(struct gfs2_quota)))
 933			nalloc++;
 934	}
 935
 
 
 
 
 
 936	/* 
 937	 * 1 blk for unstuffing inode if stuffed. We add this extra
 938	 * block to the reservation unconditionally. If the inode
 939	 * doesn't need unstuffing, the block will be released to the 
 940	 * rgrp since it won't be allocated during the transaction
 941	 */
 
 942	/* +3 in the end for unstuffing block, inode size update block
 943	 * and another block in case quota straddles page boundary and 
 944	 * two blocks need to be updated instead of 1 */
 945	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 946
 947	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 948	ap.target = reserved;
 949	error = gfs2_inplace_reserve(ip, &ap);
 950	if (error)
 951		goto out_alloc;
 952
 953	if (nalloc)
 954		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 955
 956	error = gfs2_trans_begin(sdp, blocks, 0);
 957	if (error)
 958		goto out_ipres;
 959
 960	for (x = 0; x < num_qd; x++) {
 961		qd = qda[x];
 962		offset = qd2offset(qd);
 963		error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd,
 964							NULL);
 965		if (error)
 966			goto out_end_trans;
 967
 968		do_qc(qd, -qd->qd_change_sync);
 969		set_bit(QDF_REFRESH, &qd->qd_flags);
 970	}
 971
 
 
 972out_end_trans:
 973	gfs2_trans_end(sdp);
 974out_ipres:
 975	gfs2_inplace_release(ip);
 976out_alloc:
 
 
 977	gfs2_glock_dq_uninit(&i_gh);
 978out_dq:
 979	while (qx--)
 980		gfs2_glock_dq_uninit(&ghs[qx]);
 981	inode_unlock(&ip->i_inode);
 982	kfree(ghs);
 983	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
 984		       GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
 985	if (!error) {
 986		for (x = 0; x < num_qd; x++) {
 987			qd = qda[x];
 988			spin_lock(&qd->qd_lockref.lock);
 989			if (qd->qd_sync_gen < sync_gen)
 990				qd->qd_sync_gen = sync_gen;
 991			spin_unlock(&qd->qd_lockref.lock);
 992		}
 993	}
 994	return error;
 995}
 996
 997static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 998{
 999	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1000	struct gfs2_quota q;
1001	struct gfs2_quota_lvb *qlvb;
1002	loff_t pos;
1003	int error;
1004
1005	memset(&q, 0, sizeof(struct gfs2_quota));
1006	pos = qd2offset(qd);
1007	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
1008	if (error < 0)
1009		return error;
1010
1011	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1012	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
1013	qlvb->__pad = 0;
1014	qlvb->qb_limit = q.qu_limit;
1015	qlvb->qb_warn = q.qu_warn;
1016	qlvb->qb_value = q.qu_value;
1017	spin_lock(&qd->qd_lockref.lock);
1018	qd->qd_qb = *qlvb;
1019	spin_unlock(&qd->qd_lockref.lock);
1020
1021	return 0;
1022}
1023
1024static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1025		    struct gfs2_holder *q_gh)
1026{
1027	struct gfs2_sbd *sdp = qd->qd_sbd;
1028	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1029	struct gfs2_holder i_gh;
1030	int error;
1031
1032	gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
1033restart:
1034	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1035	if (error)
1036		return error;
1037
1038	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1039		force_refresh = FORCE;
1040
1041	spin_lock(&qd->qd_lockref.lock);
1042	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1043	spin_unlock(&qd->qd_lockref.lock);
1044
1045	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1046		gfs2_glock_dq_uninit(q_gh);
1047		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1048					   GL_NOCACHE, q_gh);
1049		if (error)
1050			return error;
1051
1052		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1053		if (error)
1054			goto fail;
1055
1056		error = update_qd(sdp, qd);
1057		if (error)
1058			goto fail_gunlock;
1059
1060		gfs2_glock_dq_uninit(&i_gh);
1061		gfs2_glock_dq_uninit(q_gh);
1062		force_refresh = 0;
1063		goto restart;
1064	}
1065
1066	return 0;
1067
1068fail_gunlock:
1069	gfs2_glock_dq_uninit(&i_gh);
1070fail:
1071	gfs2_glock_dq_uninit(q_gh);
1072	return error;
1073}
1074
1075int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1076{
1077	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
1078	struct gfs2_quota_data *qd;
1079	u32 x;
1080	int error;
 
 
1081
1082	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 
1083		return 0;
1084
1085	error = gfs2_quota_hold(ip, uid, gid);
1086	if (error)
1087		return error;
1088
1089	sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1090	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
1091
1092	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1093		qd = ip->i_qadata->qa_qd[x];
1094		error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
 
 
 
1095		if (error)
1096			break;
1097	}
1098
1099	if (!error)
1100		set_bit(GIF_QD_LOCKED, &ip->i_flags);
1101	else {
1102		while (x--)
1103			gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1104		gfs2_quota_unhold(ip);
1105	}
1106
1107	return error;
1108}
1109
1110static bool need_sync(struct gfs2_quota_data *qd)
1111{
1112	struct gfs2_sbd *sdp = qd->qd_sbd;
1113	struct gfs2_tune *gt = &sdp->sd_tune;
1114	s64 value, change, limit;
1115	unsigned int num, den;
1116	int ret = false;
1117
1118	spin_lock(&qd->qd_lockref.lock);
1119	if (!qd->qd_qb.qb_limit)
1120		goto out;
1121
1122	change = qd->qd_change;
1123	if (change <= 0)
1124		goto out;
1125	value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1126	limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1127	if (value >= limit)
1128		goto out;
1129
1130	spin_lock(&gt->gt_spin);
1131	num = gt->gt_quota_scale_num;
1132	den = gt->gt_quota_scale_den;
1133	spin_unlock(&gt->gt_spin);
1134
1135	change *= gfs2_jindex_size(sdp) * num;
1136	change = div_s64(change, den);
1137	if (value + change < limit)
1138		goto out;
 
 
 
 
 
 
 
 
1139
1140	ret = true;
1141out:
1142	spin_unlock(&qd->qd_lockref.lock);
1143	return ret;
1144}
1145
1146void gfs2_quota_unlock(struct gfs2_inode *ip)
1147{
1148	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1149	struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
1150	unsigned int count = 0;
1151	u32 x;
1152
1153	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1154		return;
1155
1156	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1157		struct gfs2_quota_data *qd;
1158		bool sync;
1159		int error;
1160
1161		qd = ip->i_qadata->qa_qd[x];
1162		sync = need_sync(qd);
1163
1164		gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1165		if (!sync)
1166			continue;
1167
1168		spin_lock(&qd_lock);
1169		sync = qd_grab_sync(sdp, qd, U64_MAX);
1170		spin_unlock(&qd_lock);
1171
1172		if (!sync)
1173			continue;
1174
1175		gfs2_assert_warn(sdp, qd->qd_change_sync);
1176		error = bh_get(qd);
1177		if (error) {
1178			qd_ungrab_sync(qd);
1179			continue;
1180		}
1181
1182		qda[count++] = qd;
 
1183	}
1184
1185	if (count) {
1186		u64 sync_gen = READ_ONCE(sdp->sd_quota_sync_gen);
1187
1188		do_sync(count, qda, sync_gen);
1189		for (x = 0; x < count; x++)
1190			qd_unlock(qda[x]);
1191	}
1192
 
1193	gfs2_quota_unhold(ip);
1194}
1195
1196#define MAX_LINE 256
1197
1198static void print_message(struct gfs2_quota_data *qd, char *type)
1199{
1200	struct gfs2_sbd *sdp = qd->qd_sbd;
 
 
 
 
 
1201
1202	if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) {
1203		fs_info(sdp, "quota %s for %s %u\n",
1204			type,
1205			(qd->qd_id.type == USRQUOTA) ? "user" : "group",
1206			from_kqid(&init_user_ns, qd->qd_id));
1207	}
1208}
1209
1210/**
1211 * gfs2_quota_check - check if allocating new blocks will exceed quota
1212 * @ip:  The inode for which this check is being performed
1213 * @uid: The uid to check against
1214 * @gid: The gid to check against
1215 * @ap:  The allocation parameters. ap->target contains the requested
1216 *       blocks. ap->min_target, if set, contains the minimum blks
1217 *       requested.
1218 *
1219 * Returns: 0 on success.
1220 *                  min_req = ap->min_target ? ap->min_target : ap->target;
1221 *                  quota must allow at least min_req blks for success and
1222 *                  ap->allowed is set to the number of blocks allowed
1223 *
1224 *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1225 *                  of blocks available.
1226 */
1227int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1228		     struct gfs2_alloc_parms *ap)
1229{
1230	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
1231	struct gfs2_quota_data *qd;
1232	s64 value, warn, limit;
1233	u32 x;
1234	int error = 0;
1235
1236	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1237	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1238		return 0;
1239
1240	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1241		qd = ip->i_qadata->qa_qd[x];
 
 
 
1242
1243		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1244		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1245			continue;
1246
1247		spin_lock(&qd->qd_lockref.lock);
1248		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1249		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1250		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
 
1251		value += qd->qd_change;
1252		spin_unlock(&qd->qd_lockref.lock);
1253
1254		if (limit > 0 && (limit - value) < ap->allowed)
1255			ap->allowed = limit - value;
1256		/* If we can't meet the target */
1257		if (limit && limit < (value + (s64)ap->target)) {
1258			/* If no min_target specified or we don't meet
1259			 * min_target, return -EDQUOT */
1260			if (!ap->min_target || ap->min_target > ap->allowed) {
1261				if (!test_and_set_bit(QDF_QMSG_QUIET,
1262						      &qd->qd_flags)) {
1263					print_message(qd, "exceeded");
1264					quota_send_warning(qd->qd_id,
1265							   sdp->sd_vfs->s_dev,
1266							   QUOTA_NL_BHARDWARN);
1267				}
1268				error = -EDQUOT;
1269				break;
1270			}
1271		} else if (warn && warn < value &&
1272			   time_after_eq(jiffies, qd->qd_last_warn +
1273					 gfs2_tune_get(sdp, gt_quota_warn_period)
1274					 * HZ)) {
1275			quota_send_warning(qd->qd_id,
 
1276					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1277			print_message(qd, "warning");
1278			error = 0;
1279			qd->qd_last_warn = jiffies;
1280		}
1281	}
 
1282	return error;
1283}
1284
1285void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1286		       kuid_t uid, kgid_t gid)
1287{
 
1288	struct gfs2_quota_data *qd;
1289	u32 x;
1290	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1291
1292	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ||
1293	    gfs2_assert_warn(sdp, change))
1294		return;
1295	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1296		return;
1297
1298	if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1299				 ip->i_qadata->qa_ref > 0))
1300		return;
1301	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1302		qd = ip->i_qadata->qa_qd[x];
1303
1304		if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1305		    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1306			do_qc(qd, change);
1307		}
1308	}
1309}
1310
1311int gfs2_quota_sync(struct super_block *sb, int type)
1312{
1313	struct gfs2_sbd *sdp = sb->s_fs_info;
1314	struct gfs2_quota_data **qda;
1315	unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
1316	u64 sync_gen;
 
1317	int error = 0;
1318
1319	if (sb_rdonly(sdp->sd_vfs))
1320		return 0;
1321
1322	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1323	if (!qda)
1324		return -ENOMEM;
1325
1326	mutex_lock(&sdp->sd_quota_sync_mutex);
1327	sync_gen = sdp->sd_quota_sync_gen + 1;
1328
1329	do {
1330		struct gfs2_quota_data *iter;
1331		unsigned int num_qd = 0;
1332		unsigned int x;
1333
1334		spin_lock(&qd_lock);
1335		list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
1336			if (qd_grab_sync(sdp, iter, sync_gen)) {
1337				qda[num_qd++] = iter;
1338				if (num_qd == max_qd)
1339					break;
1340			}
1341		}
1342		spin_unlock(&qd_lock);
1343
1344		if (!num_qd)
1345			break;
1346
1347		for (x = 0; x < num_qd; x++) {
1348			error = bh_get(qda[x]);
1349			if (!error)
1350				continue;
1351
1352			while (x < num_qd)
1353				qd_ungrab_sync(qda[--num_qd]);
1354			break;
1355		}
1356
1357		if (!error) {
1358			WRITE_ONCE(sdp->sd_quota_sync_gen, sync_gen);
1359			error = do_sync(num_qd, qda, sync_gen);
1360		}
 
1361
1362		for (x = 0; x < num_qd; x++)
1363			qd_unlock(qda[x]);
1364	} while (!error);
1365
1366	mutex_unlock(&sdp->sd_quota_sync_mutex);
1367	kfree(qda);
1368
1369	return error;
1370}
1371
1372int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
 
 
 
 
 
1373{
1374	struct gfs2_quota_data *qd;
1375	struct gfs2_holder q_gh;
1376	int error;
1377
1378	error = qd_get(sdp, qid, &qd);
1379	if (error)
1380		return error;
1381
1382	error = do_glock(qd, FORCE, &q_gh);
1383	if (!error)
1384		gfs2_glock_dq_uninit(&q_gh);
1385
1386	qd_put(qd);
1387	return error;
1388}
1389
 
 
 
 
 
 
 
 
 
1390int gfs2_quota_init(struct gfs2_sbd *sdp)
1391{
1392	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1393	u64 size = i_size_read(sdp->sd_qc_inode);
1394	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1395	unsigned int x, slot = 0;
1396	unsigned int found = 0;
1397	unsigned int hash;
1398	unsigned int bm_size;
1399	struct buffer_head *bh;
1400	u64 dblock;
1401	u32 extlen = 0;
1402	int error;
1403
1404	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1405		return -EIO;
1406
1407	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1408	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1409	bm_size *= sizeof(unsigned long);
1410	error = -ENOMEM;
1411	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1412	if (sdp->sd_quota_bitmap == NULL)
1413		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1414						 __GFP_ZERO);
1415	if (!sdp->sd_quota_bitmap)
1416		return error;
1417
 
 
 
 
 
 
1418	for (x = 0; x < blocks; x++) {
1419		struct gfs2_quota_change *qc;
1420		unsigned int y;
1421
1422		if (!extlen) {
1423			extlen = 32;
1424			error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
1425			if (error)
1426				goto fail;
1427		}
1428		error = -EIO;
1429		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1430		if (!bh)
1431			goto fail;
1432		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
1433			goto fail_brelse;
 
 
1434
1435		qc = (struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1436		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1437		     y++, slot++) {
1438			struct gfs2_quota_data *old_qd, *qd;
1439			s64 qc_change = be64_to_cpu(qc->qc_change);
1440			u32 qc_flags = be32_to_cpu(qc->qc_flags);
1441			enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1442						USRQUOTA : GRPQUOTA;
1443			struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1444						      be32_to_cpu(qc->qc_id));
1445			qc++;
1446			if (!qc_change)
1447				continue;
1448
1449			hash = gfs2_qd_hash(sdp, qc_id);
1450			qd = qd_alloc(hash, sdp, qc_id);
1451			if (qd == NULL)
1452				goto fail_brelse;
 
 
1453
1454			set_bit(QDF_CHANGE, &qd->qd_flags);
1455			qd->qd_change = qc_change;
1456			qd->qd_slot = slot;
1457			qd->qd_slot_ref = 1;
1458
1459			spin_lock(&qd_lock);
1460			spin_lock_bucket(hash);
1461			old_qd = gfs2_qd_search_bucket(hash, sdp, qc_id);
1462			if (old_qd) {
1463				fs_err(sdp, "Corruption found in quota_change%u"
1464					    "file: duplicate identifier in "
1465					    "slot %u\n",
1466					    sdp->sd_jdesc->jd_jid, slot);
1467
1468				spin_unlock_bucket(hash);
1469				spin_unlock(&qd_lock);
1470				qd_put(old_qd);
1471
1472				gfs2_glock_put(qd->qd_gl);
1473				kmem_cache_free(gfs2_quotad_cachep, qd);
1474
1475				/* zero out the duplicate slot */
1476				lock_buffer(bh);
1477				memset(qc, 0, sizeof(*qc));
1478				mark_buffer_dirty(bh);
1479				unlock_buffer(bh);
1480
1481				continue;
1482			}
1483			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1484			list_add(&qd->qd_list, &sdp->sd_quota_list);
1485			atomic_inc(&sdp->sd_quota_count);
1486			hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1487			spin_unlock_bucket(hash);
1488			spin_unlock(&qd_lock);
1489
1490			found++;
1491		}
1492
1493		if (buffer_dirty(bh))
1494			sync_dirty_buffer(bh);
1495		brelse(bh);
1496		dblock++;
1497		extlen--;
1498	}
1499
1500	if (found)
1501		fs_info(sdp, "found %u quota changes\n", found);
1502
1503	return 0;
1504
1505fail_brelse:
1506	if (buffer_dirty(bh))
1507		sync_dirty_buffer(bh);
1508	brelse(bh);
1509fail:
1510	gfs2_quota_cleanup(sdp);
1511	return error;
1512}
1513
1514void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1515{
 
1516	struct gfs2_quota_data *qd;
1517	LIST_HEAD(dispose);
1518	int count;
1519
1520	BUG_ON(!test_bit(SDF_NORECOVERY, &sdp->sd_flags) &&
1521		test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
 
 
 
 
 
 
 
 
 
 
 
1522
1523	spin_lock(&qd_lock);
1524	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
1525		spin_lock(&qd->qd_lockref.lock);
1526		if (qd->qd_lockref.count != 0) {
1527			spin_unlock(&qd->qd_lockref.lock);
1528			continue;
1529		}
1530		lockref_mark_dead(&qd->qd_lockref);
1531		spin_unlock(&qd->qd_lockref.lock);
1532
1533		list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru);
1534		list_add(&qd->qd_lru, &dispose);
1535	}
1536	spin_unlock(&qd_lock);
 
 
1537
1538	gfs2_qd_list_dispose(&dispose);
 
1539
1540	wait_event_timeout(sdp->sd_kill_wait,
1541		(count = atomic_read(&sdp->sd_quota_count)) == 0,
1542		HZ * 60);
1543
1544	if (count != 0)
1545		fs_err(sdp, "%d left-over quota data objects\n", count);
1546
1547	kvfree(sdp->sd_quota_bitmap);
1548	sdp->sd_quota_bitmap = NULL;
 
 
 
1549}
1550
1551static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1552{
1553	if (error == 0 || error == -EROFS)
1554		return;
1555	if (!gfs2_withdrawing_or_withdrawn(sdp)) {
1556		if (!cmpxchg(&sdp->sd_log_error, 0, error))
1557			fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1558		wake_up(&sdp->sd_logd_waitq);
1559	}
1560}
1561
1562static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1563			       int (*fxn)(struct super_block *sb, int type),
1564			       unsigned long t, unsigned long *timeo,
1565			       unsigned int *new_timeo)
1566{
1567	if (t >= *timeo) {
1568		int error = fxn(sdp->sd_vfs, 0);
1569		quotad_error(sdp, msg, error);
1570		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1571	} else {
1572		*timeo -= t;
1573	}
1574}
1575
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1576void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1577	if (!sdp->sd_statfs_force_sync) {
1578		sdp->sd_statfs_force_sync = 1;
1579		wake_up(&sdp->sd_quota_wait);
1580	}
1581}
1582
1583
1584/**
1585 * gfs2_quotad - Write cached quota changes into the quota file
1586 * @data: Pointer to GFS2 superblock
1587 *
1588 */
1589
1590int gfs2_quotad(void *data)
1591{
1592	struct gfs2_sbd *sdp = data;
1593	struct gfs2_tune *tune = &sdp->sd_tune;
1594	unsigned long statfs_timeo = 0;
1595	unsigned long quotad_timeo = 0;
1596	unsigned long t = 0;
 
 
1597
1598	set_freezable();
1599	while (!kthread_should_stop()) {
1600		if (gfs2_withdrawing_or_withdrawn(sdp))
1601			break;
1602
1603		/* Update the master statfs file */
1604		if (sdp->sd_statfs_force_sync) {
1605			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1606			quotad_error(sdp, "statfs", error);
1607			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1608		}
1609		else
1610			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1611				   	   &statfs_timeo,
1612					   &tune->gt_statfs_quantum);
1613
1614		/* Update quota file */
1615		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1616				   &quotad_timeo, &tune->gt_quota_quantum);
1617
 
 
 
 
 
1618		t = min(quotad_timeo, statfs_timeo);
1619
1620		t = wait_event_freezable_timeout(sdp->sd_quota_wait,
1621				sdp->sd_statfs_force_sync ||
1622				gfs2_withdrawing_or_withdrawn(sdp) ||
1623				kthread_should_stop(),
1624				t);
1625
1626		if (sdp->sd_statfs_force_sync)
1627			t = 0;
 
1628	}
1629
1630	return 0;
1631}
1632
1633static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
 
1634{
1635	struct gfs2_sbd *sdp = sb->s_fs_info;
1636
1637	memset(state, 0, sizeof(*state));
 
1638
1639	switch (sdp->sd_args.ar_quota) {
1640	case GFS2_QUOTA_QUIET:
1641		fallthrough;
1642	case GFS2_QUOTA_ON:
1643		state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1644		state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1645		fallthrough;
1646	case GFS2_QUOTA_ACCOUNT:
1647		state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1648						  QCI_SYSFILE;
1649		state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1650						  QCI_SYSFILE;
1651		break;
1652	case GFS2_QUOTA_OFF:
1653		break;
1654	}
 
1655	if (sdp->sd_quota_inode) {
1656		state->s_state[USRQUOTA].ino =
1657					GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1658		state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1659	}
1660	state->s_state[USRQUOTA].nextents = 1;	/* unsupported */
1661	state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1662	state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1663	return 0;
1664}
1665
1666static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1667			  struct qc_dqblk *fdq)
1668{
1669	struct gfs2_sbd *sdp = sb->s_fs_info;
1670	struct gfs2_quota_lvb *qlvb;
1671	struct gfs2_quota_data *qd;
1672	struct gfs2_holder q_gh;
1673	int error;
1674
1675	memset(fdq, 0, sizeof(*fdq));
1676
1677	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1678		return -ESRCH; /* Crazy XFS error code */
1679
1680	if ((qid.type != USRQUOTA) &&
1681	    (qid.type != GRPQUOTA))
 
 
 
1682		return -EINVAL;
1683
1684	error = qd_get(sdp, qid, &qd);
1685	if (error)
1686		return error;
1687	error = do_glock(qd, FORCE, &q_gh);
1688	if (error)
1689		goto out;
1690
1691	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1692	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1693	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1694	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
 
 
 
1695
1696	gfs2_glock_dq_uninit(&q_gh);
1697out:
1698	qd_put(qd);
1699	return error;
1700}
1701
1702/* GFS2 only supports a subset of the XFS fields */
1703#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1704
1705static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1706			  struct qc_dqblk *fdq)
1707{
1708	struct gfs2_sbd *sdp = sb->s_fs_info;
1709	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1710	struct gfs2_quota_data *qd;
1711	struct gfs2_holder q_gh, i_gh;
1712	unsigned int data_blocks, ind_blocks;
1713	unsigned int blocks = 0;
1714	int alloc_required;
 
1715	loff_t offset;
1716	int error;
1717
1718	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1719		return -ESRCH; /* Crazy XFS error code */
1720
1721	if ((qid.type != USRQUOTA) &&
1722	    (qid.type != GRPQUOTA))
 
 
 
 
 
 
 
 
 
 
1723		return -EINVAL;
 
1724
1725	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1726		return -EINVAL;
 
 
1727
1728	error = qd_get(sdp, qid, &qd);
1729	if (error)
1730		return error;
1731
1732	error = gfs2_qa_get(ip);
 
1733	if (error)
1734		goto out_put;
1735
1736	inode_lock(&ip->i_inode);
1737	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1738	if (error)
1739		goto out_unlockput;
1740	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1741	if (error)
1742		goto out_q;
1743
1744	/* Check for existing entry, if none then alloc new blocks */
1745	error = update_qd(sdp, qd);
1746	if (error)
1747		goto out_i;
1748
1749	/* If nothing has changed, this is a no-op */
1750	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1751	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1752		fdq->d_fieldmask ^= QC_SPC_SOFT;
1753
1754	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1755	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1756		fdq->d_fieldmask ^= QC_SPC_HARD;
1757
1758	if ((fdq->d_fieldmask & QC_SPACE) &&
1759	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1760		fdq->d_fieldmask ^= QC_SPACE;
1761
1762	if (fdq->d_fieldmask == 0)
1763		goto out_i;
1764
1765	offset = qd2offset(qd);
1766	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1767	if (gfs2_is_stuffed(ip))
1768		alloc_required = 1;
1769	if (alloc_required) {
1770		struct gfs2_alloc_parms ap = {};
 
 
1771		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1772				       &data_blocks, &ind_blocks);
1773		blocks = 1 + data_blocks + ind_blocks;
1774		ap.target = blocks;
1775		error = gfs2_inplace_reserve(ip, &ap);
1776		if (error)
1777			goto out_i;
1778		blocks += gfs2_rg_blocks(ip, blocks);
1779	}
1780
1781	/* Some quotas span block boundaries and can update two blocks,
1782	   adding an extra block to the transaction to handle such quotas */
1783	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1784	if (error)
1785		goto out_release;
1786
1787	/* Apply changes */
1788	error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq);
1789	if (!error)
1790		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1791
1792	gfs2_trans_end(sdp);
1793out_release:
1794	if (alloc_required)
1795		gfs2_inplace_release(ip);
 
 
 
1796out_i:
1797	gfs2_glock_dq_uninit(&i_gh);
1798out_q:
1799	gfs2_glock_dq_uninit(&q_gh);
1800out_unlockput:
1801	gfs2_qa_put(ip);
1802	inode_unlock(&ip->i_inode);
1803out_put:
 
1804	qd_put(qd);
1805	return error;
1806}
1807
1808const struct quotactl_ops gfs2_quotactl_ops = {
1809	.quota_sync     = gfs2_quota_sync,
1810	.get_state	= gfs2_quota_get_state,
1811	.get_dqblk	= gfs2_get_dqblk,
1812	.set_dqblk	= gfs2_set_dqblk,
1813};
1814
1815void __init gfs2_quota_hash_init(void)
1816{
1817	unsigned i;
1818
1819	for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1820		INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1821}