Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Implementation of the diskquota system for the LINUX operating system. QUOTA
   4 * is implemented using the BSD system call interface as the means of
   5 * communication with the user level. This file contains the generic routines
   6 * called by the different filesystems on allocation of an inode or block.
   7 * These routines take care of the administration needed to have a consistent
   8 * diskquota tracking system. The ideas of both user and group quotas are based
   9 * on the Melbourne quota system as used on BSD derived systems. The internal
  10 * implementation is based on one of the several variants of the LINUX
  11 * inode-subsystem with added complexity of the diskquota system.
  12 *
  13 * Author:	Marco van Wieringen <mvw@planets.elm.net>
  14 *
  15 * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
  16 *
  17 *		Revised list management to avoid races
  18 *		-- Bill Hawes, <whawes@star.net>, 9/98
  19 *
  20 *		Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
  21 *		As the consequence the locking was moved from dquot_decr_...(),
  22 *		dquot_incr_...() to calling functions.
  23 *		invalidate_dquots() now writes modified dquots.
  24 *		Serialized quota_off() and quota_on() for mount point.
  25 *		Fixed a few bugs in grow_dquots().
  26 *		Fixed deadlock in write_dquot() - we no longer account quotas on
  27 *		quota files
  28 *		remove_dquot_ref() moved to inode.c - it now traverses through inodes
  29 *		add_dquot_ref() restarts after blocking
  30 *		Added check for bogus uid and fixed check for group in quotactl.
  31 *		Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
  32 *
  33 *		Used struct list_head instead of own list struct
  34 *		Invalidation of referenced dquots is no longer possible
  35 *		Improved free_dquots list management
  36 *		Quota and i_blocks are now updated in one place to avoid races
  37 *		Warnings are now delayed so we won't block in critical section
  38 *		Write updated not to require dquot lock
  39 *		Jan Kara, <jack@suse.cz>, 9/2000
  40 *
  41 *		Added dynamic quota structure allocation
  42 *		Jan Kara <jack@suse.cz> 12/2000
  43 *
  44 *		Rewritten quota interface. Implemented new quota format and
  45 *		formats registering.
  46 *		Jan Kara, <jack@suse.cz>, 2001,2002
  47 *
  48 *		New SMP locking.
  49 *		Jan Kara, <jack@suse.cz>, 10/2002
  50 *
  51 *		Added journalled quota support, fix lock inversion problems
  52 *		Jan Kara, <jack@suse.cz>, 2003,2004
  53 *
  54 * (C) Copyright 1994 - 1997 Marco van Wieringen
  55 */
  56
  57#include <linux/errno.h>
  58#include <linux/kernel.h>
  59#include <linux/fs.h>
  60#include <linux/mount.h>
  61#include <linux/mm.h>
  62#include <linux/time.h>
  63#include <linux/types.h>
  64#include <linux/string.h>
  65#include <linux/fcntl.h>
  66#include <linux/stat.h>
  67#include <linux/tty.h>
  68#include <linux/file.h>
  69#include <linux/slab.h>
  70#include <linux/sysctl.h>
  71#include <linux/init.h>
  72#include <linux/module.h>
  73#include <linux/proc_fs.h>
  74#include <linux/security.h>
  75#include <linux/sched.h>
  76#include <linux/cred.h>
  77#include <linux/kmod.h>
  78#include <linux/namei.h>
 
  79#include <linux/capability.h>
  80#include <linux/quotaops.h>
  81#include <linux/blkdev.h>
  82#include "../internal.h" /* ugh */
  83
  84#include <linux/uaccess.h>
  85
  86/*
  87 * There are five quota SMP locks:
  88 * * dq_list_lock protects all lists with quotas and quota formats.
  89 * * dquot->dq_dqb_lock protects data from dq_dqb
  90 * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
  91 *   consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
  92 *   dquot_transfer() can stabilize amount it transfers
  93 * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
  94 *   pointers in the inode
  95 * * dq_state_lock protects modifications of quota state (on quotaon and
  96 *   quotaoff) and readers who care about latest values take it as well.
  97 *
  98 * The spinlock ordering is hence:
  99 *   dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
 100 *   dq_list_lock > dq_state_lock
 101 *
 102 * Note that some things (eg. sb pointer, type, id) doesn't change during
 103 * the life of the dquot structure and so needn't to be protected by a lock
 104 *
 105 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
 106 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
 107 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
 108 * inode and before dropping dquot references to avoid use of dquots after
 109 * they are freed. dq_data_lock is used to serialize the pointer setting and
 110 * clearing operations.
 111 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
 112 * inode is a quota file). Functions adding pointers from inode to dquots have
 113 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
 114 * have to do all pointer modifications before dropping dq_data_lock. This makes
 115 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
 116 * then drops all pointers to dquots from an inode.
 117 *
 118 * Each dquot has its dq_lock mutex.  Dquot is locked when it is being read to
 119 * memory (or space for it is being allocated) on the first dqget(), when it is
 120 * being written out, and when it is being released on the last dqput(). The
 121 * allocation and release operations are serialized by the dq_lock and by
 122 * checking the use count in dquot_release().
 
 
 
 123 *
 124 * Lock ordering (including related VFS locks) is the following:
 125 *   s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
 
 
 
 
 
 
 
 
 126 */
 127
 128static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
 129static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
 130__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
 131EXPORT_SYMBOL(dq_data_lock);
 132DEFINE_STATIC_SRCU(dquot_srcu);
 133
 134static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
 135
 136void __quota_error(struct super_block *sb, const char *func,
 137		   const char *fmt, ...)
 138{
 139	if (printk_ratelimit()) {
 140		va_list args;
 141		struct va_format vaf;
 142
 143		va_start(args, fmt);
 144
 145		vaf.fmt = fmt;
 146		vaf.va = &args;
 147
 148		printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
 149		       sb->s_id, func, &vaf);
 150
 151		va_end(args);
 152	}
 153}
 154EXPORT_SYMBOL(__quota_error);
 155
 156#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
 157static char *quotatypes[] = INITQFNAMES;
 158#endif
 159static struct quota_format_type *quota_formats;	/* List of registered formats */
 160static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
 161
 162/* SLAB cache for dquot structures */
 163static struct kmem_cache *dquot_cachep;
 164
 165int register_quota_format(struct quota_format_type *fmt)
 166{
 167	spin_lock(&dq_list_lock);
 168	fmt->qf_next = quota_formats;
 169	quota_formats = fmt;
 170	spin_unlock(&dq_list_lock);
 171	return 0;
 172}
 173EXPORT_SYMBOL(register_quota_format);
 174
 175void unregister_quota_format(struct quota_format_type *fmt)
 176{
 177	struct quota_format_type **actqf;
 178
 179	spin_lock(&dq_list_lock);
 180	for (actqf = &quota_formats; *actqf && *actqf != fmt;
 181	     actqf = &(*actqf)->qf_next)
 182		;
 183	if (*actqf)
 184		*actqf = (*actqf)->qf_next;
 185	spin_unlock(&dq_list_lock);
 186}
 187EXPORT_SYMBOL(unregister_quota_format);
 188
 189static struct quota_format_type *find_quota_format(int id)
 190{
 191	struct quota_format_type *actqf;
 192
 193	spin_lock(&dq_list_lock);
 194	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
 195	     actqf = actqf->qf_next)
 196		;
 197	if (!actqf || !try_module_get(actqf->qf_owner)) {
 198		int qm;
 199
 200		spin_unlock(&dq_list_lock);
 201
 202		for (qm = 0; module_names[qm].qm_fmt_id &&
 203			     module_names[qm].qm_fmt_id != id; qm++)
 204			;
 205		if (!module_names[qm].qm_fmt_id ||
 206		    request_module(module_names[qm].qm_mod_name))
 207			return NULL;
 208
 209		spin_lock(&dq_list_lock);
 210		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
 211		     actqf = actqf->qf_next)
 212			;
 213		if (actqf && !try_module_get(actqf->qf_owner))
 214			actqf = NULL;
 215	}
 216	spin_unlock(&dq_list_lock);
 217	return actqf;
 218}
 219
 220static void put_quota_format(struct quota_format_type *fmt)
 221{
 222	module_put(fmt->qf_owner);
 223}
 224
 225/*
 226 * Dquot List Management:
 227 * The quota code uses four lists for dquot management: the inuse_list,
 228 * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot
 229 * structure may be on some of those lists, depending on its current state.
 230 *
 231 * All dquots are placed to the end of inuse_list when first created, and this
 232 * list is used for invalidate operation, which must look at every dquot.
 233 *
 234 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
 235 * and this list is searched whenever we need an available dquot.  Dquots are
 236 * removed from the list as soon as they are used again, and
 237 * dqstats.free_dquots gives the number of dquots on the list. When
 238 * dquot is invalidated it's completely released from memory.
 239 *
 240 * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
 241 * dirtied, and this list is searched when writing dirty dquots back to
 242 * quota file. Note that some filesystems do dirty dquot tracking on their
 243 * own (e.g. in a journal) and thus don't use dqi_dirty_list.
 244 *
 245 * Dquots with a specific identity (device, type and id) are placed on
 246 * one of the dquot_hash[] hash chains. The provides an efficient search
 247 * mechanism to locate a specific dquot.
 248 */
 249
 250static LIST_HEAD(inuse_list);
 251static LIST_HEAD(free_dquots);
 252static unsigned int dq_hash_bits, dq_hash_mask;
 253static struct hlist_head *dquot_hash;
 254
 255struct dqstats dqstats;
 256EXPORT_SYMBOL(dqstats);
 257
 258static qsize_t inode_get_rsv_space(struct inode *inode);
 259static qsize_t __inode_get_rsv_space(struct inode *inode);
 260static int __dquot_initialize(struct inode *inode, int type);
 261
 262static inline unsigned int
 263hashfn(const struct super_block *sb, struct kqid qid)
 264{
 265	unsigned int id = from_kqid(&init_user_ns, qid);
 266	int type = qid.type;
 267	unsigned long tmp;
 268
 269	tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
 270	return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
 271}
 272
 273/*
 274 * Following list functions expect dq_list_lock to be held
 275 */
 276static inline void insert_dquot_hash(struct dquot *dquot)
 277{
 278	struct hlist_head *head;
 279	head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
 280	hlist_add_head(&dquot->dq_hash, head);
 281}
 282
 283static inline void remove_dquot_hash(struct dquot *dquot)
 284{
 285	hlist_del_init(&dquot->dq_hash);
 286}
 287
 288static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
 289				struct kqid qid)
 290{
 291	struct hlist_node *node;
 292	struct dquot *dquot;
 293
 294	hlist_for_each (node, dquot_hash+hashent) {
 295		dquot = hlist_entry(node, struct dquot, dq_hash);
 296		if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
 
 297			return dquot;
 298	}
 299	return NULL;
 300}
 301
 302/* Add a dquot to the tail of the free list */
 303static inline void put_dquot_last(struct dquot *dquot)
 304{
 305	list_add_tail(&dquot->dq_free, &free_dquots);
 306	dqstats_inc(DQST_FREE_DQUOTS);
 307}
 308
 309static inline void remove_free_dquot(struct dquot *dquot)
 310{
 311	if (list_empty(&dquot->dq_free))
 312		return;
 313	list_del_init(&dquot->dq_free);
 314	dqstats_dec(DQST_FREE_DQUOTS);
 315}
 316
 317static inline void put_inuse(struct dquot *dquot)
 318{
 319	/* We add to the back of inuse list so we don't have to restart
 320	 * when traversing this list and we block */
 321	list_add_tail(&dquot->dq_inuse, &inuse_list);
 322	dqstats_inc(DQST_ALLOC_DQUOTS);
 323}
 324
 325static inline void remove_inuse(struct dquot *dquot)
 326{
 327	dqstats_dec(DQST_ALLOC_DQUOTS);
 328	list_del(&dquot->dq_inuse);
 329}
 330/*
 331 * End of list functions needing dq_list_lock
 332 */
 333
 334static void wait_on_dquot(struct dquot *dquot)
 335{
 336	mutex_lock(&dquot->dq_lock);
 337	mutex_unlock(&dquot->dq_lock);
 338}
 339
 340static inline int dquot_dirty(struct dquot *dquot)
 341{
 342	return test_bit(DQ_MOD_B, &dquot->dq_flags);
 343}
 344
 345static inline int mark_dquot_dirty(struct dquot *dquot)
 346{
 347	return dquot->dq_sb->dq_op->mark_dirty(dquot);
 348}
 349
 350/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
 351int dquot_mark_dquot_dirty(struct dquot *dquot)
 352{
 353	int ret = 1;
 354
 355	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
 356		return 0;
 357
 358	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
 359		return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
 360
 361	/* If quota is dirty already, we don't have to acquire dq_list_lock */
 362	if (test_bit(DQ_MOD_B, &dquot->dq_flags))
 363		return 1;
 364
 365	spin_lock(&dq_list_lock);
 366	if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
 367		list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
 368				info[dquot->dq_id.type].dqi_dirty_list);
 369		ret = 0;
 370	}
 371	spin_unlock(&dq_list_lock);
 372	return ret;
 373}
 374EXPORT_SYMBOL(dquot_mark_dquot_dirty);
 375
 376/* Dirtify all the dquots - this can block when journalling */
 377static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
 378{
 379	int ret, err, cnt;
 380
 381	ret = err = 0;
 382	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 383		if (dquot[cnt])
 384			/* Even in case of error we have to continue */
 385			ret = mark_dquot_dirty(dquot[cnt]);
 386		if (!err)
 387			err = ret;
 388	}
 389	return err;
 390}
 391
 392static inline void dqput_all(struct dquot **dquot)
 393{
 394	unsigned int cnt;
 395
 396	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 397		dqput(dquot[cnt]);
 398}
 399
 
 400static inline int clear_dquot_dirty(struct dquot *dquot)
 401{
 402	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
 403		return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
 404
 405	spin_lock(&dq_list_lock);
 406	if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
 407		spin_unlock(&dq_list_lock);
 408		return 0;
 409	}
 410	list_del_init(&dquot->dq_dirty);
 411	spin_unlock(&dq_list_lock);
 412	return 1;
 413}
 414
 415void mark_info_dirty(struct super_block *sb, int type)
 416{
 417	spin_lock(&dq_data_lock);
 418	sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
 419	spin_unlock(&dq_data_lock);
 420}
 421EXPORT_SYMBOL(mark_info_dirty);
 422
 423/*
 424 *	Read dquot from disk and alloc space for it
 425 */
 426
 427int dquot_acquire(struct dquot *dquot)
 428{
 429	int ret = 0, ret2 = 0;
 430	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 431
 432	mutex_lock(&dquot->dq_lock);
 433	if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
 434		ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
 435		if (ret < 0)
 436			goto out_iolock;
 437	}
 438	/* Make sure flags update is visible after dquot has been filled */
 439	smp_mb__before_atomic();
 440	set_bit(DQ_READ_B, &dquot->dq_flags);
 441	/* Instantiate dquot if needed */
 442	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
 443		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
 444		/* Write the info if needed */
 445		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
 446			ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
 447					dquot->dq_sb, dquot->dq_id.type);
 448		}
 449		if (ret < 0)
 450			goto out_iolock;
 451		if (ret2 < 0) {
 452			ret = ret2;
 453			goto out_iolock;
 454		}
 455	}
 456	/*
 457	 * Make sure flags update is visible after on-disk struct has been
 458	 * allocated. Paired with smp_rmb() in dqget().
 459	 */
 460	smp_mb__before_atomic();
 461	set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
 462out_iolock:
 
 463	mutex_unlock(&dquot->dq_lock);
 464	return ret;
 465}
 466EXPORT_SYMBOL(dquot_acquire);
 467
 468/*
 469 *	Write dquot to disk
 470 */
 471int dquot_commit(struct dquot *dquot)
 472{
 473	int ret = 0;
 474	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 475
 476	mutex_lock(&dquot->dq_lock);
 477	if (!clear_dquot_dirty(dquot))
 478		goto out_lock;
 
 
 
 
 479	/* Inactive dquot can be only if there was error during read/init
 480	 * => we have better not writing it */
 481	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
 482		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
 483	else
 484		ret = -EIO;
 485out_lock:
 486	mutex_unlock(&dquot->dq_lock);
 487	return ret;
 488}
 489EXPORT_SYMBOL(dquot_commit);
 490
 491/*
 492 *	Release dquot
 493 */
 494int dquot_release(struct dquot *dquot)
 495{
 496	int ret = 0, ret2 = 0;
 497	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 498
 499	mutex_lock(&dquot->dq_lock);
 500	/* Check whether we are not racing with some other dqget() */
 501	if (dquot_is_busy(dquot))
 502		goto out_dqlock;
 503	if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
 504		ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
 
 505		/* Write the info */
 506		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
 507			ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
 508						dquot->dq_sb, dquot->dq_id.type);
 509		}
 510		if (ret >= 0)
 511			ret = ret2;
 512	}
 513	clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
 
 514out_dqlock:
 515	mutex_unlock(&dquot->dq_lock);
 516	return ret;
 517}
 518EXPORT_SYMBOL(dquot_release);
 519
 520void dquot_destroy(struct dquot *dquot)
 521{
 522	kmem_cache_free(dquot_cachep, dquot);
 523}
 524EXPORT_SYMBOL(dquot_destroy);
 525
 526static inline void do_destroy_dquot(struct dquot *dquot)
 527{
 528	dquot->dq_sb->dq_op->destroy_dquot(dquot);
 529}
 530
 531/* Invalidate all dquots on the list. Note that this function is called after
 532 * quota is disabled and pointers from inodes removed so there cannot be new
 533 * quota users. There can still be some users of quotas due to inodes being
 534 * just deleted or pruned by prune_icache() (those are not attached to any
 535 * list) or parallel quotactl call. We have to wait for such users.
 536 */
 537static void invalidate_dquots(struct super_block *sb, int type)
 538{
 539	struct dquot *dquot, *tmp;
 540
 541restart:
 542	spin_lock(&dq_list_lock);
 543	list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
 544		if (dquot->dq_sb != sb)
 545			continue;
 546		if (dquot->dq_id.type != type)
 547			continue;
 548		/* Wait for dquot users */
 549		if (atomic_read(&dquot->dq_count)) {
 550			dqgrab(dquot);
 
 
 
 
 551			spin_unlock(&dq_list_lock);
 552			/*
 553			 * Once dqput() wakes us up, we know it's time to free
 554			 * the dquot.
 555			 * IMPORTANT: we rely on the fact that there is always
 556			 * at most one process waiting for dquot to free.
 557			 * Otherwise dq_count would be > 1 and we would never
 558			 * wake up.
 559			 */
 560			wait_event(dquot_ref_wq,
 561				   atomic_read(&dquot->dq_count) == 1);
 
 562			dqput(dquot);
 563			/* At this moment dquot() need not exist (it could be
 564			 * reclaimed by prune_dqcache(). Hence we must
 565			 * restart. */
 566			goto restart;
 567		}
 568		/*
 569		 * Quota now has no users and it has been written on last
 570		 * dqput()
 571		 */
 572		remove_dquot_hash(dquot);
 573		remove_free_dquot(dquot);
 574		remove_inuse(dquot);
 575		do_destroy_dquot(dquot);
 576	}
 577	spin_unlock(&dq_list_lock);
 578}
 579
 580/* Call callback for every active dquot on given filesystem */
 581int dquot_scan_active(struct super_block *sb,
 582		      int (*fn)(struct dquot *dquot, unsigned long priv),
 583		      unsigned long priv)
 584{
 585	struct dquot *dquot, *old_dquot = NULL;
 586	int ret = 0;
 587
 588	WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
 589
 590	spin_lock(&dq_list_lock);
 591	list_for_each_entry(dquot, &inuse_list, dq_inuse) {
 592		if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
 593			continue;
 594		if (dquot->dq_sb != sb)
 595			continue;
 596		/* Now we have active dquot so we can just increase use count */
 597		atomic_inc(&dquot->dq_count);
 598		spin_unlock(&dq_list_lock);
 
 599		dqput(old_dquot);
 600		old_dquot = dquot;
 601		/*
 602		 * ->release_dquot() can be racing with us. Our reference
 603		 * protects us from new calls to it so just wait for any
 604		 * outstanding call and recheck the DQ_ACTIVE_B after that.
 605		 */
 606		wait_on_dquot(dquot);
 607		if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
 608			ret = fn(dquot, priv);
 609			if (ret < 0)
 610				goto out;
 611		}
 612		spin_lock(&dq_list_lock);
 613		/* We are safe to continue now because our dquot could not
 614		 * be moved out of the inuse list while we hold the reference */
 615	}
 616	spin_unlock(&dq_list_lock);
 617out:
 618	dqput(old_dquot);
 
 619	return ret;
 620}
 621EXPORT_SYMBOL(dquot_scan_active);
 622
 623/* Write all dquot structures to quota files */
 624int dquot_writeback_dquots(struct super_block *sb, int type)
 625{
 626	struct list_head dirty;
 627	struct dquot *dquot;
 628	struct quota_info *dqopt = sb_dqopt(sb);
 629	int cnt;
 630	int err, ret = 0;
 631
 632	WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
 633
 
 634	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 635		if (type != -1 && cnt != type)
 636			continue;
 637		if (!sb_has_quota_active(sb, cnt))
 638			continue;
 639		spin_lock(&dq_list_lock);
 640		/* Move list away to avoid livelock. */
 641		list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
 642		while (!list_empty(&dirty)) {
 643			dquot = list_first_entry(&dirty, struct dquot,
 644						 dq_dirty);
 645
 646			WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
 647
 
 
 648			/* Now we have active dquot from which someone is
 649 			 * holding reference so we can safely just increase
 650			 * use count */
 651			dqgrab(dquot);
 652			spin_unlock(&dq_list_lock);
 653			err = sb->dq_op->write_dquot(dquot);
 654			if (err) {
 655				/*
 656				 * Clear dirty bit anyway to avoid infinite
 657				 * loop here.
 658				 */
 659				clear_dquot_dirty(dquot);
 660				if (!ret)
 661					ret = err;
 662			}
 663			dqput(dquot);
 664			spin_lock(&dq_list_lock);
 665		}
 666		spin_unlock(&dq_list_lock);
 667	}
 668
 669	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 670		if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
 671		    && info_dirty(&dqopt->info[cnt]))
 672			sb->dq_op->write_info(sb, cnt);
 673	dqstats_inc(DQST_SYNCS);
 
 674
 675	return ret;
 676}
 677EXPORT_SYMBOL(dquot_writeback_dquots);
 678
 679/* Write all dquot structures to disk and make them visible from userspace */
 680int dquot_quota_sync(struct super_block *sb, int type)
 681{
 682	struct quota_info *dqopt = sb_dqopt(sb);
 683	int cnt;
 684	int ret;
 685
 686	ret = dquot_writeback_dquots(sb, type);
 687	if (ret)
 688		return ret;
 689	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
 690		return 0;
 691
 692	/* This is not very clever (and fast) but currently I don't know about
 693	 * any other simple way of getting quota data to disk and we must get
 694	 * them there for userspace to be visible... */
 695	if (sb->s_op->sync_fs)
 696		sb->s_op->sync_fs(sb, 1);
 697	sync_blockdev(sb->s_bdev);
 698
 699	/*
 700	 * Now when everything is written we can discard the pagecache so
 701	 * that userspace sees the changes.
 702	 */
 
 703	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 704		if (type != -1 && cnt != type)
 705			continue;
 706		if (!sb_has_quota_active(sb, cnt))
 707			continue;
 708		inode_lock(dqopt->files[cnt]);
 709		truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
 710		inode_unlock(dqopt->files[cnt]);
 
 711	}
 
 712
 713	return 0;
 714}
 715EXPORT_SYMBOL(dquot_quota_sync);
 716
 717static unsigned long
 718dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 719{
 
 720	struct dquot *dquot;
 721	unsigned long freed = 0;
 722
 723	spin_lock(&dq_list_lock);
 724	while (!list_empty(&free_dquots) && sc->nr_to_scan) {
 725		dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
 726		remove_dquot_hash(dquot);
 727		remove_free_dquot(dquot);
 728		remove_inuse(dquot);
 729		do_destroy_dquot(dquot);
 730		sc->nr_to_scan--;
 731		freed++;
 732	}
 733	spin_unlock(&dq_list_lock);
 734	return freed;
 735}
 736
 737static unsigned long
 738dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 
 
 
 
 739{
 740	return vfs_pressure_ratio(
 741	percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
 
 
 
 
 
 
 
 
 742}
 743
 744static struct shrinker dqcache_shrinker = {
 745	.count_objects = dqcache_shrink_count,
 746	.scan_objects = dqcache_shrink_scan,
 747	.seeks = DEFAULT_SEEKS,
 748};
 749
 750/*
 751 * Put reference to dquot
 
 752 */
 753void dqput(struct dquot *dquot)
 754{
 755	int ret;
 756
 757	if (!dquot)
 758		return;
 759#ifdef CONFIG_QUOTA_DEBUG
 760	if (!atomic_read(&dquot->dq_count)) {
 761		quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
 762			    quotatypes[dquot->dq_id.type],
 763			    from_kqid(&init_user_ns, dquot->dq_id));
 764		BUG();
 765	}
 766#endif
 767	dqstats_inc(DQST_DROPS);
 768we_slept:
 769	spin_lock(&dq_list_lock);
 770	if (atomic_read(&dquot->dq_count) > 1) {
 771		/* We have more than one user... nothing to do */
 772		atomic_dec(&dquot->dq_count);
 773		/* Releasing dquot during quotaoff phase? */
 774		if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
 775		    atomic_read(&dquot->dq_count) == 1)
 776			wake_up(&dquot_ref_wq);
 777		spin_unlock(&dq_list_lock);
 778		return;
 779	}
 780	/* Need to release dquot? */
 781	if (dquot_dirty(dquot)) {
 782		spin_unlock(&dq_list_lock);
 783		/* Commit dquot before releasing */
 784		ret = dquot->dq_sb->dq_op->write_dquot(dquot);
 785		if (ret < 0) {
 786			quota_error(dquot->dq_sb, "Can't write quota structure"
 787				    " (error %d). Quota may get out of sync!",
 788				    ret);
 789			/*
 790			 * We clear dirty bit anyway, so that we avoid
 791			 * infinite loop here
 792			 */
 
 793			clear_dquot_dirty(dquot);
 
 794		}
 795		goto we_slept;
 796	}
 
 
 797	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
 798		spin_unlock(&dq_list_lock);
 799		dquot->dq_sb->dq_op->release_dquot(dquot);
 800		goto we_slept;
 801	}
 802	atomic_dec(&dquot->dq_count);
 803#ifdef CONFIG_QUOTA_DEBUG
 804	/* sanity check */
 805	BUG_ON(!list_empty(&dquot->dq_free));
 806#endif
 807	put_dquot_last(dquot);
 808	spin_unlock(&dq_list_lock);
 809}
 810EXPORT_SYMBOL(dqput);
 811
 812struct dquot *dquot_alloc(struct super_block *sb, int type)
 813{
 814	return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
 815}
 816EXPORT_SYMBOL(dquot_alloc);
 817
 818static struct dquot *get_empty_dquot(struct super_block *sb, int type)
 819{
 820	struct dquot *dquot;
 821
 822	dquot = sb->dq_op->alloc_dquot(sb, type);
 823	if(!dquot)
 824		return NULL;
 825
 826	mutex_init(&dquot->dq_lock);
 827	INIT_LIST_HEAD(&dquot->dq_free);
 828	INIT_LIST_HEAD(&dquot->dq_inuse);
 829	INIT_HLIST_NODE(&dquot->dq_hash);
 830	INIT_LIST_HEAD(&dquot->dq_dirty);
 
 831	dquot->dq_sb = sb;
 832	dquot->dq_id = make_kqid_invalid(type);
 833	atomic_set(&dquot->dq_count, 1);
 834	spin_lock_init(&dquot->dq_dqb_lock);
 835
 836	return dquot;
 837}
 838
 839/*
 840 * Get reference to dquot
 841 *
 842 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
 843 * destroying our dquot by:
 844 *   a) checking for quota flags under dq_list_lock and
 845 *   b) getting a reference to dquot before we release dq_list_lock
 846 */
 847struct dquot *dqget(struct super_block *sb, struct kqid qid)
 848{
 849	unsigned int hashent = hashfn(sb, qid);
 850	struct dquot *dquot, *empty = NULL;
 851
 852	if (!qid_has_mapping(sb->s_user_ns, qid))
 853		return ERR_PTR(-EINVAL);
 854
 855        if (!sb_has_quota_active(sb, qid.type))
 856		return ERR_PTR(-ESRCH);
 857we_slept:
 858	spin_lock(&dq_list_lock);
 859	spin_lock(&dq_state_lock);
 860	if (!sb_has_quota_active(sb, qid.type)) {
 861		spin_unlock(&dq_state_lock);
 862		spin_unlock(&dq_list_lock);
 863		dquot = ERR_PTR(-ESRCH);
 864		goto out;
 865	}
 866	spin_unlock(&dq_state_lock);
 867
 868	dquot = find_dquot(hashent, sb, qid);
 869	if (!dquot) {
 870		if (!empty) {
 871			spin_unlock(&dq_list_lock);
 872			empty = get_empty_dquot(sb, qid.type);
 873			if (!empty)
 874				schedule();	/* Try to wait for a moment... */
 875			goto we_slept;
 876		}
 877		dquot = empty;
 878		empty = NULL;
 879		dquot->dq_id = qid;
 880		/* all dquots go on the inuse_list */
 881		put_inuse(dquot);
 882		/* hash it first so it can be found */
 883		insert_dquot_hash(dquot);
 884		spin_unlock(&dq_list_lock);
 885		dqstats_inc(DQST_LOOKUPS);
 886	} else {
 887		if (!atomic_read(&dquot->dq_count))
 888			remove_free_dquot(dquot);
 889		atomic_inc(&dquot->dq_count);
 890		spin_unlock(&dq_list_lock);
 891		dqstats_inc(DQST_CACHE_HITS);
 892		dqstats_inc(DQST_LOOKUPS);
 893	}
 894	/* Wait for dq_lock - after this we know that either dquot_release() is
 895	 * already finished or it will be canceled due to dq_count > 1 test */
 896	wait_on_dquot(dquot);
 897	/* Read the dquot / allocate space in quota file */
 898	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
 899		int err;
 900
 901		err = sb->dq_op->acquire_dquot(dquot);
 902		if (err < 0) {
 903			dqput(dquot);
 904			dquot = ERR_PTR(err);
 905			goto out;
 906		}
 907	}
 908	/*
 909	 * Make sure following reads see filled structure - paired with
 910	 * smp_mb__before_atomic() in dquot_acquire().
 911	 */
 912	smp_rmb();
 913#ifdef CONFIG_QUOTA_DEBUG
 914	BUG_ON(!dquot->dq_sb);	/* Has somebody invalidated entry under us? */
 915#endif
 916out:
 917	if (empty)
 918		do_destroy_dquot(empty);
 919
 920	return dquot;
 921}
 922EXPORT_SYMBOL(dqget);
 923
 924static inline struct dquot **i_dquot(struct inode *inode)
 925{
 926	return inode->i_sb->s_op->get_dquots(inode);
 927}
 928
 929static int dqinit_needed(struct inode *inode, int type)
 930{
 931	struct dquot * const *dquots;
 932	int cnt;
 933
 934	if (IS_NOQUOTA(inode))
 935		return 0;
 936
 937	dquots = i_dquot(inode);
 938	if (type != -1)
 939		return !dquots[type];
 940	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 941		if (!dquots[cnt])
 942			return 1;
 943	return 0;
 944}
 945
 946/* This routine is guarded by s_umount semaphore */
 947static int add_dquot_ref(struct super_block *sb, int type)
 948{
 949	struct inode *inode, *old_inode = NULL;
 950#ifdef CONFIG_QUOTA_DEBUG
 951	int reserved = 0;
 952#endif
 953	int err = 0;
 954
 955	spin_lock(&sb->s_inode_list_lock);
 956	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
 957		spin_lock(&inode->i_lock);
 958		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
 959		    !atomic_read(&inode->i_writecount) ||
 960		    !dqinit_needed(inode, type)) {
 961			spin_unlock(&inode->i_lock);
 962			continue;
 963		}
 964		__iget(inode);
 965		spin_unlock(&inode->i_lock);
 966		spin_unlock(&sb->s_inode_list_lock);
 967
 968#ifdef CONFIG_QUOTA_DEBUG
 969		if (unlikely(inode_get_rsv_space(inode) > 0))
 970			reserved = 1;
 971#endif
 
 
 
 
 972		iput(old_inode);
 973		err = __dquot_initialize(inode, type);
 974		if (err) {
 975			iput(inode);
 976			goto out;
 977		}
 978
 979		/*
 980		 * We hold a reference to 'inode' so it couldn't have been
 981		 * removed from s_inodes list while we dropped the
 982		 * s_inode_list_lock. We cannot iput the inode now as we can be
 983		 * holding the last reference and we cannot iput it under
 984		 * s_inode_list_lock. So we keep the reference and iput it
 985		 * later.
 986		 */
 987		old_inode = inode;
 988		cond_resched();
 989		spin_lock(&sb->s_inode_list_lock);
 990	}
 991	spin_unlock(&sb->s_inode_list_lock);
 992	iput(old_inode);
 993out:
 994#ifdef CONFIG_QUOTA_DEBUG
 995	if (reserved) {
 996		quota_error(sb, "Writes happened before quota was turned on "
 997			"thus quota information is probably inconsistent. "
 998			"Please run quotacheck(8)");
 999	}
1000#endif
1001	return err;
 
 
 
 
 
 
 
 
 
 
1002}
1003
1004/*
1005 * Remove references to dquots from inode and add dquot to list for freeing
1006 * if we have the last reference to dquot
 
1007 */
1008static void remove_inode_dquot_ref(struct inode *inode, int type,
1009				   struct list_head *tofree_head)
1010{
1011	struct dquot **dquots = i_dquot(inode);
1012	struct dquot *dquot = dquots[type];
1013
1014	if (!dquot)
1015		return;
1016
1017	dquots[type] = NULL;
1018	if (list_empty(&dquot->dq_free)) {
1019		/*
1020		 * The inode still has reference to dquot so it can't be in the
1021		 * free list
1022		 */
1023		spin_lock(&dq_list_lock);
1024		list_add(&dquot->dq_free, tofree_head);
1025		spin_unlock(&dq_list_lock);
1026	} else {
1027		/*
1028		 * Dquot is already in a list to put so we won't drop the last
1029		 * reference here.
1030		 */
1031		dqput(dquot);
1032	}
 
1033}
1034
1035/*
1036 * Free list of dquots
1037 * Dquots are removed from inodes and no new references can be got so we are
1038 * the only ones holding reference
1039 */
1040static void put_dquot_list(struct list_head *tofree_head)
1041{
1042	struct list_head *act_head;
1043	struct dquot *dquot;
1044
1045	act_head = tofree_head->next;
1046	while (act_head != tofree_head) {
1047		dquot = list_entry(act_head, struct dquot, dq_free);
1048		act_head = act_head->next;
1049		/* Remove dquot from the list so we won't have problems... */
1050		list_del_init(&dquot->dq_free);
1051		dqput(dquot);
1052	}
1053}
1054
1055static void remove_dquot_ref(struct super_block *sb, int type,
1056		struct list_head *tofree_head)
1057{
1058	struct inode *inode;
1059#ifdef CONFIG_QUOTA_DEBUG
1060	int reserved = 0;
1061#endif
1062
1063	spin_lock(&sb->s_inode_list_lock);
1064	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1065		/*
1066		 *  We have to scan also I_NEW inodes because they can already
1067		 *  have quota pointer initialized. Luckily, we need to touch
1068		 *  only quota pointers and these have separate locking
1069		 *  (dq_data_lock).
1070		 */
1071		spin_lock(&dq_data_lock);
1072		if (!IS_NOQUOTA(inode)) {
1073#ifdef CONFIG_QUOTA_DEBUG
1074			if (unlikely(inode_get_rsv_space(inode) > 0))
1075				reserved = 1;
1076#endif
1077			remove_inode_dquot_ref(inode, type, tofree_head);
1078		}
1079		spin_unlock(&dq_data_lock);
1080	}
1081	spin_unlock(&sb->s_inode_list_lock);
1082#ifdef CONFIG_QUOTA_DEBUG
1083	if (reserved) {
1084		printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1085			" was disabled thus quota information is probably "
1086			"inconsistent. Please run quotacheck(8).\n", sb->s_id);
1087	}
1088#endif
1089}
1090
1091/* Gather all references from inodes and drop them */
1092static void drop_dquot_ref(struct super_block *sb, int type)
1093{
1094	LIST_HEAD(tofree_head);
1095
1096	if (sb->dq_op) {
 
1097		remove_dquot_ref(sb, type, &tofree_head);
1098		synchronize_srcu(&dquot_srcu);
1099		put_dquot_list(&tofree_head);
1100	}
1101}
1102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103static inline
1104void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1105{
1106	if (dquot->dq_dqb.dqb_rsvspace >= number)
1107		dquot->dq_dqb.dqb_rsvspace -= number;
1108	else {
1109		WARN_ON_ONCE(1);
1110		dquot->dq_dqb.dqb_rsvspace = 0;
1111	}
1112	if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1113	    dquot->dq_dqb.dqb_bsoftlimit)
1114		dquot->dq_dqb.dqb_btime = (time64_t) 0;
1115	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1116}
1117
1118static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1119{
1120	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1121	    dquot->dq_dqb.dqb_curinodes >= number)
1122		dquot->dq_dqb.dqb_curinodes -= number;
1123	else
1124		dquot->dq_dqb.dqb_curinodes = 0;
1125	if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1126		dquot->dq_dqb.dqb_itime = (time64_t) 0;
1127	clear_bit(DQ_INODES_B, &dquot->dq_flags);
1128}
1129
1130static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1131{
1132	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1133	    dquot->dq_dqb.dqb_curspace >= number)
1134		dquot->dq_dqb.dqb_curspace -= number;
1135	else
1136		dquot->dq_dqb.dqb_curspace = 0;
1137	if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1138	    dquot->dq_dqb.dqb_bsoftlimit)
1139		dquot->dq_dqb.dqb_btime = (time64_t) 0;
1140	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1141}
1142
1143struct dquot_warn {
1144	struct super_block *w_sb;
1145	struct kqid w_dq_id;
1146	short w_type;
1147};
1148
1149static int warning_issued(struct dquot *dquot, const int warntype)
1150{
1151	int flag = (warntype == QUOTA_NL_BHARDWARN ||
1152		warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1153		((warntype == QUOTA_NL_IHARDWARN ||
1154		warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1155
1156	if (!flag)
1157		return 0;
1158	return test_and_set_bit(flag, &dquot->dq_flags);
1159}
1160
1161#ifdef CONFIG_PRINT_QUOTA_WARNING
1162static int flag_print_warnings = 1;
1163
1164static int need_print_warning(struct dquot_warn *warn)
1165{
1166	if (!flag_print_warnings)
1167		return 0;
1168
1169	switch (warn->w_dq_id.type) {
1170		case USRQUOTA:
1171			return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1172		case GRPQUOTA:
1173			return in_group_p(warn->w_dq_id.gid);
1174		case PRJQUOTA:
1175			return 1;
1176	}
1177	return 0;
1178}
1179
1180/* Print warning to user which exceeded quota */
1181static void print_warning(struct dquot_warn *warn)
1182{
1183	char *msg = NULL;
1184	struct tty_struct *tty;
1185	int warntype = warn->w_type;
1186
1187	if (warntype == QUOTA_NL_IHARDBELOW ||
1188	    warntype == QUOTA_NL_ISOFTBELOW ||
1189	    warntype == QUOTA_NL_BHARDBELOW ||
1190	    warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1191		return;
1192
1193	tty = get_current_tty();
1194	if (!tty)
1195		return;
1196	tty_write_message(tty, warn->w_sb->s_id);
1197	if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1198		tty_write_message(tty, ": warning, ");
1199	else
1200		tty_write_message(tty, ": write failed, ");
1201	tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1202	switch (warntype) {
1203		case QUOTA_NL_IHARDWARN:
1204			msg = " file limit reached.\r\n";
1205			break;
1206		case QUOTA_NL_ISOFTLONGWARN:
1207			msg = " file quota exceeded too long.\r\n";
1208			break;
1209		case QUOTA_NL_ISOFTWARN:
1210			msg = " file quota exceeded.\r\n";
1211			break;
1212		case QUOTA_NL_BHARDWARN:
1213			msg = " block limit reached.\r\n";
1214			break;
1215		case QUOTA_NL_BSOFTLONGWARN:
1216			msg = " block quota exceeded too long.\r\n";
1217			break;
1218		case QUOTA_NL_BSOFTWARN:
1219			msg = " block quota exceeded.\r\n";
1220			break;
1221	}
1222	tty_write_message(tty, msg);
1223	tty_kref_put(tty);
1224}
1225#endif
1226
1227static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1228			    int warntype)
1229{
1230	if (warning_issued(dquot, warntype))
1231		return;
1232	warn->w_type = warntype;
1233	warn->w_sb = dquot->dq_sb;
1234	warn->w_dq_id = dquot->dq_id;
1235}
1236
1237/*
1238 * Write warnings to the console and send warning messages over netlink.
1239 *
1240 * Note that this function can call into tty and networking code.
1241 */
1242static void flush_warnings(struct dquot_warn *warn)
1243{
 
1244	int i;
1245
1246	for (i = 0; i < MAXQUOTAS; i++) {
1247		if (warn[i].w_type == QUOTA_NL_NOWARN)
1248			continue;
 
1249#ifdef CONFIG_PRINT_QUOTA_WARNING
1250		print_warning(&warn[i]);
1251#endif
1252		quota_send_warning(warn[i].w_dq_id,
1253				   warn[i].w_sb->s_dev, warn[i].w_type);
 
1254	}
1255}
1256
1257static int ignore_hardlimit(struct dquot *dquot)
1258{
1259	struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1260
1261	return capable(CAP_SYS_RESOURCE) &&
1262	       (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1263		!(info->dqi_flags & DQF_ROOT_SQUASH));
1264}
1265
1266static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
1267			    struct dquot_warn *warn)
1268{
1269	qsize_t newinodes;
1270	int ret = 0;
1271
1272	spin_lock(&dquot->dq_dqb_lock);
1273	newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1274	if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1275	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1276		goto add;
1277
1278	if (dquot->dq_dqb.dqb_ihardlimit &&
1279	    newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1280            !ignore_hardlimit(dquot)) {
1281		prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1282		ret = -EDQUOT;
1283		goto out;
1284	}
1285
1286	if (dquot->dq_dqb.dqb_isoftlimit &&
1287	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1288	    dquot->dq_dqb.dqb_itime &&
1289	    ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1290            !ignore_hardlimit(dquot)) {
1291		prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1292		ret = -EDQUOT;
1293		goto out;
1294	}
1295
1296	if (dquot->dq_dqb.dqb_isoftlimit &&
1297	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1298	    dquot->dq_dqb.dqb_itime == 0) {
1299		prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1300		dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1301		    sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1302	}
1303add:
1304	dquot->dq_dqb.dqb_curinodes = newinodes;
1305
1306out:
1307	spin_unlock(&dquot->dq_dqb_lock);
1308	return ret;
1309}
1310
1311static int dquot_add_space(struct dquot *dquot, qsize_t space,
1312			   qsize_t rsv_space, unsigned int flags,
1313			   struct dquot_warn *warn)
1314{
1315	qsize_t tspace;
1316	struct super_block *sb = dquot->dq_sb;
1317	int ret = 0;
1318
1319	spin_lock(&dquot->dq_dqb_lock);
1320	if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1321	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1322		goto finish;
1323
1324	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1325		+ space + rsv_space;
1326
1327	if (dquot->dq_dqb.dqb_bhardlimit &&
1328	    tspace > dquot->dq_dqb.dqb_bhardlimit &&
1329            !ignore_hardlimit(dquot)) {
1330		if (flags & DQUOT_SPACE_WARN)
1331			prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1332		ret = -EDQUOT;
1333		goto finish;
1334	}
1335
1336	if (dquot->dq_dqb.dqb_bsoftlimit &&
1337	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1338	    dquot->dq_dqb.dqb_btime &&
1339	    ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1340            !ignore_hardlimit(dquot)) {
1341		if (flags & DQUOT_SPACE_WARN)
1342			prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1343		ret = -EDQUOT;
1344		goto finish;
1345	}
1346
1347	if (dquot->dq_dqb.dqb_bsoftlimit &&
1348	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1349	    dquot->dq_dqb.dqb_btime == 0) {
1350		if (flags & DQUOT_SPACE_WARN) {
1351			prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1352			dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1353			    sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1354		} else {
 
1355			/*
1356			 * We don't allow preallocation to exceed softlimit so exceeding will
1357			 * be always printed
1358			 */
1359			ret = -EDQUOT;
1360			goto finish;
1361		}
1362	}
1363finish:
1364	/*
1365	 * We have to be careful and go through warning generation & grace time
1366	 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1367	 * only here...
1368	 */
1369	if (flags & DQUOT_SPACE_NOFAIL)
1370		ret = 0;
1371	if (!ret) {
1372		dquot->dq_dqb.dqb_rsvspace += rsv_space;
1373		dquot->dq_dqb.dqb_curspace += space;
1374	}
1375	spin_unlock(&dquot->dq_dqb_lock);
1376	return ret;
1377}
1378
1379static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1380{
1381	qsize_t newinodes;
1382
1383	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1384	    dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1385	    !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1386		return QUOTA_NL_NOWARN;
1387
1388	newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1389	if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1390		return QUOTA_NL_ISOFTBELOW;
1391	if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1392	    newinodes < dquot->dq_dqb.dqb_ihardlimit)
1393		return QUOTA_NL_IHARDBELOW;
1394	return QUOTA_NL_NOWARN;
1395}
1396
1397static int info_bdq_free(struct dquot *dquot, qsize_t space)
1398{
1399	qsize_t tspace;
1400
1401	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
1402
1403	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1404	    tspace <= dquot->dq_dqb.dqb_bsoftlimit)
1405		return QUOTA_NL_NOWARN;
1406
1407	if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1408		return QUOTA_NL_BSOFTBELOW;
1409	if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
1410	    tspace - space < dquot->dq_dqb.dqb_bhardlimit)
1411		return QUOTA_NL_BHARDBELOW;
1412	return QUOTA_NL_NOWARN;
1413}
1414
1415static int dquot_active(const struct inode *inode)
1416{
1417	struct super_block *sb = inode->i_sb;
1418
1419	if (IS_NOQUOTA(inode))
1420		return 0;
1421	return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1422}
1423
1424/*
1425 * Initialize quota pointers in inode
1426 *
 
 
 
1427 * It is better to call this function outside of any transaction as it
1428 * might need a lot of space in journal for dquot structure allocation.
1429 */
1430static int __dquot_initialize(struct inode *inode, int type)
1431{
1432	int cnt, init_needed = 0;
1433	struct dquot **dquots, *got[MAXQUOTAS] = {};
 
1434	struct super_block *sb = inode->i_sb;
1435	qsize_t rsv;
1436	int ret = 0;
1437
 
 
1438	if (!dquot_active(inode))
1439		return 0;
1440
1441	dquots = i_dquot(inode);
1442
1443	/* First get references to structures we might need. */
1444	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1445		struct kqid qid;
1446		kprojid_t projid;
1447		int rc;
1448		struct dquot *dquot;
1449
1450		if (type != -1 && cnt != type)
1451			continue;
1452		/*
1453		 * The i_dquot should have been initialized in most cases,
1454		 * we check it without locking here to avoid unnecessary
1455		 * dqget()/dqput() calls.
1456		 */
1457		if (dquots[cnt])
1458			continue;
1459
1460		if (!sb_has_quota_active(sb, cnt))
1461			continue;
1462
1463		init_needed = 1;
1464
1465		switch (cnt) {
1466		case USRQUOTA:
1467			qid = make_kqid_uid(inode->i_uid);
1468			break;
1469		case GRPQUOTA:
1470			qid = make_kqid_gid(inode->i_gid);
1471			break;
1472		case PRJQUOTA:
1473			rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1474			if (rc)
1475				continue;
1476			qid = make_kqid_projid(projid);
1477			break;
1478		}
1479		dquot = dqget(sb, qid);
1480		if (IS_ERR(dquot)) {
1481			/* We raced with somebody turning quotas off... */
1482			if (PTR_ERR(dquot) != -ESRCH) {
1483				ret = PTR_ERR(dquot);
1484				goto out_put;
1485			}
1486			dquot = NULL;
1487		}
1488		got[cnt] = dquot;
1489	}
1490
1491	/* All required i_dquot has been initialized */
1492	if (!init_needed)
1493		return 0;
1494
1495	spin_lock(&dq_data_lock);
1496	if (IS_NOQUOTA(inode))
1497		goto out_lock;
1498	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1499		if (type != -1 && cnt != type)
1500			continue;
1501		/* Avoid races with quotaoff() */
1502		if (!sb_has_quota_active(sb, cnt))
1503			continue;
1504		/* We could race with quotaon or dqget() could have failed */
1505		if (!got[cnt])
1506			continue;
1507		if (!dquots[cnt]) {
1508			dquots[cnt] = got[cnt];
1509			got[cnt] = NULL;
1510			/*
1511			 * Make quota reservation system happy if someone
1512			 * did a write before quota was turned on
1513			 */
1514			rsv = inode_get_rsv_space(inode);
1515			if (unlikely(rsv)) {
1516				spin_lock(&inode->i_lock);
1517				/* Get reservation again under proper lock */
1518				rsv = __inode_get_rsv_space(inode);
1519				spin_lock(&dquots[cnt]->dq_dqb_lock);
1520				dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
1521				spin_unlock(&dquots[cnt]->dq_dqb_lock);
1522				spin_unlock(&inode->i_lock);
1523			}
1524		}
1525	}
1526out_lock:
1527	spin_unlock(&dq_data_lock);
1528out_put:
1529	/* Drop unused references */
1530	dqput_all(got);
1531
1532	return ret;
1533}
1534
1535int dquot_initialize(struct inode *inode)
1536{
1537	return __dquot_initialize(inode, -1);
1538}
1539EXPORT_SYMBOL(dquot_initialize);
1540
1541bool dquot_initialize_needed(struct inode *inode)
1542{
1543	struct dquot **dquots;
1544	int i;
1545
1546	if (!dquot_active(inode))
1547		return false;
1548
1549	dquots = i_dquot(inode);
1550	for (i = 0; i < MAXQUOTAS; i++)
1551		if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1552			return true;
1553	return false;
1554}
1555EXPORT_SYMBOL(dquot_initialize_needed);
1556
1557/*
1558 * Release all quotas referenced by inode.
1559 *
1560 * This function only be called on inode free or converting
1561 * a file to quota file, no other users for the i_dquot in
1562 * both cases, so we needn't call synchronize_srcu() after
1563 * clearing i_dquot.
1564 */
1565static void __dquot_drop(struct inode *inode)
1566{
1567	int cnt;
1568	struct dquot **dquots = i_dquot(inode);
1569	struct dquot *put[MAXQUOTAS];
1570
1571	spin_lock(&dq_data_lock);
1572	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1573		put[cnt] = dquots[cnt];
1574		dquots[cnt] = NULL;
1575	}
1576	spin_unlock(&dq_data_lock);
1577	dqput_all(put);
1578}
1579
1580void dquot_drop(struct inode *inode)
1581{
1582	struct dquot * const *dquots;
1583	int cnt;
1584
1585	if (IS_NOQUOTA(inode))
1586		return;
1587
1588	/*
1589	 * Test before calling to rule out calls from proc and such
1590	 * where we are not allowed to block. Note that this is
1591	 * actually reliable test even without the lock - the caller
1592	 * must assure that nobody can come after the DQUOT_DROP and
1593	 * add quota pointers back anyway.
1594	 */
1595	dquots = i_dquot(inode);
1596	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1597		if (dquots[cnt])
1598			break;
1599	}
1600
1601	if (cnt < MAXQUOTAS)
1602		__dquot_drop(inode);
1603}
1604EXPORT_SYMBOL(dquot_drop);
1605
1606/*
1607 * inode_reserved_space is managed internally by quota, and protected by
1608 * i_lock similar to i_blocks+i_bytes.
1609 */
1610static qsize_t *inode_reserved_space(struct inode * inode)
1611{
1612	/* Filesystem must explicitly define it's own method in order to use
1613	 * quota reservation interface */
1614	BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1615	return inode->i_sb->dq_op->get_reserved_space(inode);
1616}
1617
1618static qsize_t __inode_get_rsv_space(struct inode *inode)
1619{
1620	if (!inode->i_sb->dq_op->get_reserved_space)
1621		return 0;
1622	return *inode_reserved_space(inode);
 
 
 
 
 
 
 
 
 
1623}
 
 
 
 
 
 
 
 
 
1624
1625static qsize_t inode_get_rsv_space(struct inode *inode)
1626{
1627	qsize_t ret;
1628
1629	if (!inode->i_sb->dq_op->get_reserved_space)
1630		return 0;
1631	spin_lock(&inode->i_lock);
1632	ret = __inode_get_rsv_space(inode);
1633	spin_unlock(&inode->i_lock);
1634	return ret;
1635}
1636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1637/*
1638 * This functions updates i_blocks+i_bytes fields and quota information
1639 * (together with appropriate checks).
1640 *
1641 * NOTE: We absolutely rely on the fact that caller dirties the inode
1642 * (usually helpers in quotaops.h care about this) and holds a handle for
1643 * the current transaction so that dquot write and inode write go into the
1644 * same transaction.
1645 */
1646
1647/*
1648 * This operation can block, but only after everything is updated
1649 */
1650int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1651{
1652	int cnt, ret = 0, index;
1653	struct dquot_warn warn[MAXQUOTAS];
 
1654	int reserve = flags & DQUOT_SPACE_RESERVE;
1655	struct dquot **dquots;
1656
 
 
 
 
1657	if (!dquot_active(inode)) {
1658		if (reserve) {
1659			spin_lock(&inode->i_lock);
1660			*inode_reserved_space(inode) += number;
1661			spin_unlock(&inode->i_lock);
1662		} else {
1663			inode_add_bytes(inode, number);
1664		}
1665		goto out;
1666	}
1667
 
1668	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1669		warn[cnt].w_type = QUOTA_NL_NOWARN;
1670
1671	dquots = i_dquot(inode);
1672	index = srcu_read_lock(&dquot_srcu);
1673	spin_lock(&inode->i_lock);
1674	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1675		if (!dquots[cnt])
1676			continue;
1677		if (reserve) {
1678			ret = dquot_add_space(dquots[cnt], 0, number, flags,
1679					      &warn[cnt]);
1680		} else {
1681			ret = dquot_add_space(dquots[cnt], number, 0, flags,
1682					      &warn[cnt]);
1683		}
1684		if (ret) {
1685			/* Back out changes we already did */
1686			for (cnt--; cnt >= 0; cnt--) {
1687				if (!dquots[cnt])
1688					continue;
1689				spin_lock(&dquots[cnt]->dq_dqb_lock);
1690				if (reserve)
1691					dquot_free_reserved_space(dquots[cnt],
1692								  number);
1693				else
1694					dquot_decr_space(dquots[cnt], number);
1695				spin_unlock(&dquots[cnt]->dq_dqb_lock);
1696			}
1697			spin_unlock(&inode->i_lock);
1698			goto out_flush_warn;
1699		}
1700	}
1701	if (reserve)
1702		*inode_reserved_space(inode) += number;
1703	else
1704		__inode_add_bytes(inode, number);
1705	spin_unlock(&inode->i_lock);
 
 
 
 
 
1706
1707	if (reserve)
1708		goto out_flush_warn;
1709	mark_all_dquot_dirty(dquots);
1710out_flush_warn:
1711	srcu_read_unlock(&dquot_srcu, index);
1712	flush_warnings(warn);
1713out:
1714	return ret;
1715}
1716EXPORT_SYMBOL(__dquot_alloc_space);
1717
1718/*
1719 * This operation can block, but only after everything is updated
1720 */
1721int dquot_alloc_inode(struct inode *inode)
1722{
1723	int cnt, ret = 0, index;
1724	struct dquot_warn warn[MAXQUOTAS];
1725	struct dquot * const *dquots;
1726
 
 
1727	if (!dquot_active(inode))
1728		return 0;
1729	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1730		warn[cnt].w_type = QUOTA_NL_NOWARN;
1731
1732	dquots = i_dquot(inode);
1733	index = srcu_read_lock(&dquot_srcu);
1734	spin_lock(&inode->i_lock);
1735	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1736		if (!dquots[cnt])
1737			continue;
1738		ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
1739		if (ret) {
1740			for (cnt--; cnt >= 0; cnt--) {
1741				if (!dquots[cnt])
1742					continue;
1743				/* Back out changes we already did */
1744				spin_lock(&dquots[cnt]->dq_dqb_lock);
1745				dquot_decr_inodes(dquots[cnt], 1);
1746				spin_unlock(&dquots[cnt]->dq_dqb_lock);
1747			}
1748			goto warn_put_all;
1749		}
 
 
 
 
 
1750	}
1751
1752warn_put_all:
1753	spin_unlock(&inode->i_lock);
1754	if (ret == 0)
1755		mark_all_dquot_dirty(dquots);
1756	srcu_read_unlock(&dquot_srcu, index);
1757	flush_warnings(warn);
1758	return ret;
1759}
1760EXPORT_SYMBOL(dquot_alloc_inode);
1761
1762/*
1763 * Convert in-memory reserved quotas to real consumed quotas
1764 */
1765int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1766{
1767	struct dquot **dquots;
1768	int cnt, index;
1769
1770	if (!dquot_active(inode)) {
1771		spin_lock(&inode->i_lock);
1772		*inode_reserved_space(inode) -= number;
1773		__inode_add_bytes(inode, number);
1774		spin_unlock(&inode->i_lock);
1775		return 0;
1776	}
1777
1778	dquots = i_dquot(inode);
1779	index = srcu_read_lock(&dquot_srcu);
1780	spin_lock(&inode->i_lock);
1781	/* Claim reserved quotas to allocated quotas */
1782	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1783		if (dquots[cnt]) {
1784			struct dquot *dquot = dquots[cnt];
1785
1786			spin_lock(&dquot->dq_dqb_lock);
1787			if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
1788				number = dquot->dq_dqb.dqb_rsvspace;
1789			dquot->dq_dqb.dqb_curspace += number;
1790			dquot->dq_dqb.dqb_rsvspace -= number;
1791			spin_unlock(&dquot->dq_dqb_lock);
1792		}
1793	}
1794	/* Update inode bytes */
1795	*inode_reserved_space(inode) -= number;
1796	__inode_add_bytes(inode, number);
1797	spin_unlock(&inode->i_lock);
1798	mark_all_dquot_dirty(dquots);
1799	srcu_read_unlock(&dquot_srcu, index);
1800	return 0;
1801}
1802EXPORT_SYMBOL(dquot_claim_space_nodirty);
1803
1804/*
1805 * Convert allocated space back to in-memory reserved quotas
1806 */
1807void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1808{
1809	struct dquot **dquots;
1810	int cnt, index;
1811
1812	if (!dquot_active(inode)) {
1813		spin_lock(&inode->i_lock);
1814		*inode_reserved_space(inode) += number;
1815		__inode_sub_bytes(inode, number);
1816		spin_unlock(&inode->i_lock);
1817		return;
1818	}
1819
1820	dquots = i_dquot(inode);
1821	index = srcu_read_lock(&dquot_srcu);
1822	spin_lock(&inode->i_lock);
1823	/* Claim reserved quotas to allocated quotas */
1824	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1825		if (dquots[cnt]) {
1826			struct dquot *dquot = dquots[cnt];
1827
1828			spin_lock(&dquot->dq_dqb_lock);
1829			if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1830				number = dquot->dq_dqb.dqb_curspace;
1831			dquot->dq_dqb.dqb_rsvspace += number;
1832			dquot->dq_dqb.dqb_curspace -= number;
1833			spin_unlock(&dquot->dq_dqb_lock);
1834		}
1835	}
1836	/* Update inode bytes */
1837	*inode_reserved_space(inode) += number;
1838	__inode_sub_bytes(inode, number);
1839	spin_unlock(&inode->i_lock);
1840	mark_all_dquot_dirty(dquots);
1841	srcu_read_unlock(&dquot_srcu, index);
1842	return;
1843}
1844EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1845
1846/*
1847 * This operation can block, but only after everything is updated
1848 */
1849void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1850{
1851	unsigned int cnt;
1852	struct dquot_warn warn[MAXQUOTAS];
1853	struct dquot **dquots;
1854	int reserve = flags & DQUOT_SPACE_RESERVE, index;
1855
 
 
1856	if (!dquot_active(inode)) {
1857		if (reserve) {
1858			spin_lock(&inode->i_lock);
1859			*inode_reserved_space(inode) -= number;
1860			spin_unlock(&inode->i_lock);
1861		} else {
1862			inode_sub_bytes(inode, number);
1863		}
1864		return;
1865	}
1866
1867	dquots = i_dquot(inode);
1868	index = srcu_read_lock(&dquot_srcu);
1869	spin_lock(&inode->i_lock);
1870	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1871		int wtype;
1872
1873		warn[cnt].w_type = QUOTA_NL_NOWARN;
1874		if (!dquots[cnt])
1875			continue;
1876		spin_lock(&dquots[cnt]->dq_dqb_lock);
1877		wtype = info_bdq_free(dquots[cnt], number);
1878		if (wtype != QUOTA_NL_NOWARN)
1879			prepare_warning(&warn[cnt], dquots[cnt], wtype);
1880		if (reserve)
1881			dquot_free_reserved_space(dquots[cnt], number);
1882		else
1883			dquot_decr_space(dquots[cnt], number);
1884		spin_unlock(&dquots[cnt]->dq_dqb_lock);
1885	}
1886	if (reserve)
1887		*inode_reserved_space(inode) -= number;
1888	else
1889		__inode_sub_bytes(inode, number);
1890	spin_unlock(&inode->i_lock);
1891
1892	if (reserve)
1893		goto out_unlock;
1894	mark_all_dquot_dirty(dquots);
1895out_unlock:
1896	srcu_read_unlock(&dquot_srcu, index);
1897	flush_warnings(warn);
1898}
1899EXPORT_SYMBOL(__dquot_free_space);
1900
1901/*
1902 * This operation can block, but only after everything is updated
1903 */
1904void dquot_free_inode(struct inode *inode)
1905{
1906	unsigned int cnt;
1907	struct dquot_warn warn[MAXQUOTAS];
1908	struct dquot * const *dquots;
1909	int index;
1910
 
 
1911	if (!dquot_active(inode))
1912		return;
1913
1914	dquots = i_dquot(inode);
1915	index = srcu_read_lock(&dquot_srcu);
1916	spin_lock(&inode->i_lock);
1917	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1918		int wtype;
1919
1920		warn[cnt].w_type = QUOTA_NL_NOWARN;
1921		if (!dquots[cnt])
1922			continue;
1923		spin_lock(&dquots[cnt]->dq_dqb_lock);
1924		wtype = info_idq_free(dquots[cnt], 1);
1925		if (wtype != QUOTA_NL_NOWARN)
1926			prepare_warning(&warn[cnt], dquots[cnt], wtype);
1927		dquot_decr_inodes(dquots[cnt], 1);
1928		spin_unlock(&dquots[cnt]->dq_dqb_lock);
1929	}
1930	spin_unlock(&inode->i_lock);
1931	mark_all_dquot_dirty(dquots);
1932	srcu_read_unlock(&dquot_srcu, index);
1933	flush_warnings(warn);
1934}
1935EXPORT_SYMBOL(dquot_free_inode);
1936
1937/*
1938 * Transfer the number of inode and blocks from one diskquota to an other.
1939 * On success, dquot references in transfer_to are consumed and references
1940 * to original dquots that need to be released are placed there. On failure,
1941 * references are kept untouched.
1942 *
1943 * This operation can block, but only after everything is updated
1944 * A transaction must be started when entering this function.
1945 *
1946 * We are holding reference on transfer_from & transfer_to, no need to
1947 * protect them by srcu_read_lock().
1948 */
1949int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1950{
1951	qsize_t cur_space;
1952	qsize_t rsv_space = 0;
1953	qsize_t inode_usage = 1;
1954	struct dquot *transfer_from[MAXQUOTAS] = {};
1955	int cnt, ret = 0;
1956	char is_valid[MAXQUOTAS] = {};
1957	struct dquot_warn warn_to[MAXQUOTAS];
1958	struct dquot_warn warn_from_inodes[MAXQUOTAS];
1959	struct dquot_warn warn_from_space[MAXQUOTAS];
1960
 
 
1961	if (IS_NOQUOTA(inode))
1962		return 0;
1963
1964	if (inode->i_sb->dq_op->get_inode_usage) {
1965		ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
1966		if (ret)
1967			return ret;
1968	}
1969
1970	/* Initialize the arrays */
1971	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1972		warn_to[cnt].w_type = QUOTA_NL_NOWARN;
1973		warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1974		warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1975	}
1976
1977	spin_lock(&dq_data_lock);
1978	spin_lock(&inode->i_lock);
1979	if (IS_NOQUOTA(inode)) {	/* File without quota accounting? */
1980		spin_unlock(&inode->i_lock);
1981		spin_unlock(&dq_data_lock);
1982		return 0;
1983	}
1984	cur_space = __inode_get_bytes(inode);
1985	rsv_space = __inode_get_rsv_space(inode);
1986	/*
1987	 * Build the transfer_from list, check limits, and update usage in
1988	 * the target structures.
1989	 */
1990	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1991		/*
1992		 * Skip changes for same uid or gid or for turned off quota-type.
1993		 */
1994		if (!transfer_to[cnt])
1995			continue;
1996		/* Avoid races with quotaoff() */
1997		if (!sb_has_quota_active(inode->i_sb, cnt))
1998			continue;
1999		is_valid[cnt] = 1;
2000		transfer_from[cnt] = i_dquot(inode)[cnt];
2001		ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
2002				       &warn_to[cnt]);
2003		if (ret)
2004			goto over_quota;
2005		ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
2006				      DQUOT_SPACE_WARN, &warn_to[cnt]);
2007		if (ret) {
2008			spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2009			dquot_decr_inodes(transfer_to[cnt], inode_usage);
2010			spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2011			goto over_quota;
2012		}
2013	}
2014
2015	/* Decrease usage for source structures and update quota pointers */
 
 
2016	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2017		if (!is_valid[cnt])
2018			continue;
2019		/* Due to IO error we might not have transfer_from[] structure */
2020		if (transfer_from[cnt]) {
2021			int wtype;
2022
2023			spin_lock(&transfer_from[cnt]->dq_dqb_lock);
2024			wtype = info_idq_free(transfer_from[cnt], inode_usage);
2025			if (wtype != QUOTA_NL_NOWARN)
2026				prepare_warning(&warn_from_inodes[cnt],
2027						transfer_from[cnt], wtype);
2028			wtype = info_bdq_free(transfer_from[cnt],
2029					      cur_space + rsv_space);
2030			if (wtype != QUOTA_NL_NOWARN)
2031				prepare_warning(&warn_from_space[cnt],
2032						transfer_from[cnt], wtype);
2033			dquot_decr_inodes(transfer_from[cnt], inode_usage);
2034			dquot_decr_space(transfer_from[cnt], cur_space);
2035			dquot_free_reserved_space(transfer_from[cnt],
2036						  rsv_space);
2037			spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
2038		}
2039		i_dquot(inode)[cnt] = transfer_to[cnt];
 
 
 
 
 
2040	}
2041	spin_unlock(&inode->i_lock);
2042	spin_unlock(&dq_data_lock);
 
2043
2044	mark_all_dquot_dirty(transfer_from);
2045	mark_all_dquot_dirty(transfer_to);
2046	flush_warnings(warn_to);
2047	flush_warnings(warn_from_inodes);
2048	flush_warnings(warn_from_space);
2049	/* Pass back references to put */
2050	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2051		if (is_valid[cnt])
2052			transfer_to[cnt] = transfer_from[cnt];
2053	return 0;
2054over_quota:
2055	/* Back out changes we already did */
2056	for (cnt--; cnt >= 0; cnt--) {
2057		if (!is_valid[cnt])
2058			continue;
2059		spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2060		dquot_decr_inodes(transfer_to[cnt], inode_usage);
2061		dquot_decr_space(transfer_to[cnt], cur_space);
2062		dquot_free_reserved_space(transfer_to[cnt], rsv_space);
2063		spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2064	}
2065	spin_unlock(&inode->i_lock);
2066	spin_unlock(&dq_data_lock);
2067	flush_warnings(warn_to);
 
2068	return ret;
2069}
2070EXPORT_SYMBOL(__dquot_transfer);
2071
2072/* Wrapper for transferring ownership of an inode for uid/gid only
2073 * Called from FSXXX_setattr()
2074 */
2075int dquot_transfer(struct inode *inode, struct iattr *iattr)
2076{
2077	struct dquot *transfer_to[MAXQUOTAS] = {};
2078	struct dquot *dquot;
2079	struct super_block *sb = inode->i_sb;
2080	int ret;
2081
2082	if (!dquot_active(inode))
2083		return 0;
2084
2085	if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
2086		dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
2087		if (IS_ERR(dquot)) {
2088			if (PTR_ERR(dquot) != -ESRCH) {
2089				ret = PTR_ERR(dquot);
2090				goto out_put;
2091			}
2092			dquot = NULL;
2093		}
2094		transfer_to[USRQUOTA] = dquot;
2095	}
2096	if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
2097		dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
2098		if (IS_ERR(dquot)) {
2099			if (PTR_ERR(dquot) != -ESRCH) {
2100				ret = PTR_ERR(dquot);
2101				goto out_put;
2102			}
2103			dquot = NULL;
2104		}
2105		transfer_to[GRPQUOTA] = dquot;
2106	}
2107	ret = __dquot_transfer(inode, transfer_to);
2108out_put:
2109	dqput_all(transfer_to);
2110	return ret;
2111}
2112EXPORT_SYMBOL(dquot_transfer);
2113
2114/*
2115 * Write info of quota file to disk
2116 */
2117int dquot_commit_info(struct super_block *sb, int type)
2118{
 
2119	struct quota_info *dqopt = sb_dqopt(sb);
2120
2121	return dqopt->ops[type]->write_file_info(sb, type);
 
 
 
2122}
2123EXPORT_SYMBOL(dquot_commit_info);
2124
2125int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2126{
2127	struct quota_info *dqopt = sb_dqopt(sb);
2128
2129	if (!sb_has_quota_active(sb, qid->type))
2130		return -ESRCH;
2131	if (!dqopt->ops[qid->type]->get_next_id)
2132		return -ENOSYS;
2133	return dqopt->ops[qid->type]->get_next_id(sb, qid);
2134}
2135EXPORT_SYMBOL(dquot_get_next_id);
2136
2137/*
2138 * Definitions of diskquota operations.
2139 */
2140const struct dquot_operations dquot_operations = {
2141	.write_dquot	= dquot_commit,
2142	.acquire_dquot	= dquot_acquire,
2143	.release_dquot	= dquot_release,
2144	.mark_dirty	= dquot_mark_dquot_dirty,
2145	.write_info	= dquot_commit_info,
2146	.alloc_dquot	= dquot_alloc,
2147	.destroy_dquot	= dquot_destroy,
2148	.get_next_id	= dquot_get_next_id,
2149};
2150EXPORT_SYMBOL(dquot_operations);
2151
2152/*
2153 * Generic helper for ->open on filesystems supporting disk quotas.
2154 */
2155int dquot_file_open(struct inode *inode, struct file *file)
2156{
2157	int error;
2158
2159	error = generic_file_open(inode, file);
2160	if (!error && (file->f_mode & FMODE_WRITE))
2161		error = dquot_initialize(inode);
2162	return error;
2163}
2164EXPORT_SYMBOL(dquot_file_open);
2165
2166static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
2167{
2168	struct quota_info *dqopt = sb_dqopt(sb);
2169	struct inode *inode = dqopt->files[type];
2170
2171	if (!inode)
2172		return;
2173	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2174		inode_lock(inode);
2175		inode->i_flags &= ~S_NOQUOTA;
2176		inode_unlock(inode);
2177	}
2178	dqopt->files[type] = NULL;
2179	iput(inode);
2180}
2181
2182/*
2183 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2184 */
2185int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2186{
2187	int cnt;
2188	struct quota_info *dqopt = sb_dqopt(sb);
2189
2190	/* s_umount should be held in exclusive mode */
2191	if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2192		up_read(&sb->s_umount);
2193
2194	/* Cannot turn off usage accounting without turning off limits, or
2195	 * suspend quotas and simultaneously turn quotas off. */
2196	if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2197	    || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2198	    DQUOT_USAGE_ENABLED)))
2199		return -EINVAL;
2200
 
 
 
2201	/*
2202	 * Skip everything if there's nothing to do. We have to do this because
2203	 * sometimes we are called when fill_super() failed and calling
2204	 * sync_fs() in such cases does no good.
2205	 */
2206	if (!sb_any_quota_loaded(sb))
 
2207		return 0;
2208
2209	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 
2210		if (type != -1 && cnt != type)
2211			continue;
2212		if (!sb_has_quota_loaded(sb, cnt))
2213			continue;
2214
2215		if (flags & DQUOT_SUSPENDED) {
2216			spin_lock(&dq_state_lock);
2217			dqopt->flags |=
2218				dquot_state_flag(DQUOT_SUSPENDED, cnt);
2219			spin_unlock(&dq_state_lock);
2220		} else {
2221			spin_lock(&dq_state_lock);
2222			dqopt->flags &= ~dquot_state_flag(flags, cnt);
2223			/* Turning off suspended quotas? */
2224			if (!sb_has_quota_loaded(sb, cnt) &&
2225			    sb_has_quota_suspended(sb, cnt)) {
2226				dqopt->flags &=	~dquot_state_flag(
2227							DQUOT_SUSPENDED, cnt);
2228				spin_unlock(&dq_state_lock);
2229				vfs_cleanup_quota_inode(sb, cnt);
 
2230				continue;
2231			}
2232			spin_unlock(&dq_state_lock);
2233		}
2234
2235		/* We still have to keep quota loaded? */
2236		if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2237			continue;
2238
2239		/* Note: these are blocking operations */
2240		drop_dquot_ref(sb, cnt);
2241		invalidate_dquots(sb, cnt);
2242		/*
2243		 * Now all dquots should be invalidated, all writes done so we
2244		 * should be only users of the info. No locks needed.
2245		 */
2246		if (info_dirty(&dqopt->info[cnt]))
2247			sb->dq_op->write_info(sb, cnt);
2248		if (dqopt->ops[cnt]->free_file_info)
2249			dqopt->ops[cnt]->free_file_info(sb, cnt);
2250		put_quota_format(dqopt->info[cnt].dqi_format);
 
 
 
 
2251		dqopt->info[cnt].dqi_flags = 0;
2252		dqopt->info[cnt].dqi_igrace = 0;
2253		dqopt->info[cnt].dqi_bgrace = 0;
2254		dqopt->ops[cnt] = NULL;
2255	}
 
2256
2257	/* Skip syncing and setting flags if quota files are hidden */
2258	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2259		goto put_inodes;
2260
2261	/* Sync the superblock so that buffers with quota data are written to
2262	 * disk (and so userspace sees correct data afterwards). */
2263	if (sb->s_op->sync_fs)
2264		sb->s_op->sync_fs(sb, 1);
2265	sync_blockdev(sb->s_bdev);
2266	/* Now the quota files are just ordinary files and we can set the
2267	 * inode flags back. Moreover we discard the pagecache so that
2268	 * userspace sees the writes we did bypassing the pagecache. We
2269	 * must also discard the blockdev buffers so that we see the
2270	 * changes done by userspace on the next quotaon() */
2271	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2272		if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
2273			inode_lock(dqopt->files[cnt]);
2274			truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
2275			inode_unlock(dqopt->files[cnt]);
 
 
 
 
 
 
 
 
 
 
 
2276		}
2277	if (sb->s_bdev)
2278		invalidate_bdev(sb->s_bdev);
2279put_inodes:
2280	/* We are done when suspending quotas */
2281	if (flags & DQUOT_SUSPENDED)
2282		return 0;
2283
2284	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2285		if (!sb_has_quota_loaded(sb, cnt))
2286			vfs_cleanup_quota_inode(sb, cnt);
2287	return 0;
 
 
 
 
 
 
 
 
 
 
 
2288}
2289EXPORT_SYMBOL(dquot_disable);
2290
2291int dquot_quota_off(struct super_block *sb, int type)
2292{
2293	return dquot_disable(sb, type,
2294			     DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2295}
2296EXPORT_SYMBOL(dquot_quota_off);
2297
2298/*
2299 *	Turn quotas on on a device
2300 */
2301
2302static int vfs_setup_quota_inode(struct inode *inode, int type)
2303{
2304	struct super_block *sb = inode->i_sb;
2305	struct quota_info *dqopt = sb_dqopt(sb);
2306
2307	if (!S_ISREG(inode->i_mode))
2308		return -EACCES;
2309	if (IS_RDONLY(inode))
2310		return -EROFS;
2311	if (sb_has_quota_loaded(sb, type))
2312		return -EBUSY;
2313
2314	dqopt->files[type] = igrab(inode);
2315	if (!dqopt->files[type])
2316		return -EIO;
2317	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2318		/* We don't want quota and atime on quota files (deadlocks
2319		 * possible) Also nobody should write to the file - we use
2320		 * special IO operations which ignore the immutable bit. */
2321		inode_lock(inode);
2322		inode->i_flags |= S_NOQUOTA;
2323		inode_unlock(inode);
2324		/*
2325		 * When S_NOQUOTA is set, remove dquot references as no more
2326		 * references can be added
2327		 */
2328		__dquot_drop(inode);
2329	}
2330	return 0;
2331}
2332
2333int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
2334	unsigned int flags)
2335{
2336	struct quota_format_type *fmt = find_quota_format(format_id);
 
2337	struct quota_info *dqopt = sb_dqopt(sb);
2338	int error;
2339
2340	/* Just unsuspend quotas? */
2341	BUG_ON(flags & DQUOT_SUSPENDED);
2342	/* s_umount should be held in exclusive mode */
2343	if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2344		up_read(&sb->s_umount);
2345
2346	if (!fmt)
2347		return -ESRCH;
2348	if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
2349	    (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2350		error = -EINVAL;
 
 
 
2351		goto out_fmt;
2352	}
2353	/* Filesystems outside of init_user_ns not yet supported */
2354	if (sb->s_user_ns != &init_user_ns) {
2355		error = -EINVAL;
2356		goto out_fmt;
2357	}
2358	/* Usage always has to be set... */
2359	if (!(flags & DQUOT_USAGE_ENABLED)) {
2360		error = -EINVAL;
2361		goto out_fmt;
2362	}
2363	if (sb_has_quota_loaded(sb, type)) {
2364		error = -EBUSY;
2365		goto out_fmt;
2366	}
2367
2368	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2369		/* As we bypass the pagecache we must now flush all the
2370		 * dirty data and invalidate caches so that kernel sees
2371		 * changes from userspace. It is not enough to just flush
2372		 * the quota file since if blocksize < pagesize, invalidation
2373		 * of the cache could fail because of other unrelated dirty
2374		 * data */
2375		sync_filesystem(sb);
2376		invalidate_bdev(sb->s_bdev);
2377	}
 
 
 
 
 
2378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2379	error = -EINVAL;
2380	if (!fmt->qf_ops->check_quota_file(sb, type))
2381		goto out_fmt;
2382
2383	dqopt->ops[type] = fmt->qf_ops;
2384	dqopt->info[type].dqi_format = fmt;
2385	dqopt->info[type].dqi_fmt_id = format_id;
2386	INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
 
2387	error = dqopt->ops[type]->read_file_info(sb, type);
2388	if (error < 0)
2389		goto out_fmt;
2390	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
2391		spin_lock(&dq_data_lock);
2392		dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2393		spin_unlock(&dq_data_lock);
2394	}
 
2395	spin_lock(&dq_state_lock);
2396	dqopt->flags |= dquot_state_flag(flags, type);
2397	spin_unlock(&dq_state_lock);
2398
2399	error = add_dquot_ref(sb, type);
2400	if (error)
2401		dquot_disable(sb, type, flags);
2402
2403	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2404out_fmt:
2405	put_quota_format(fmt);
2406
2407	return error;
2408}
2409EXPORT_SYMBOL(dquot_load_quota_sb);
2410
2411/*
2412 * More powerful function for turning on quotas on given quota inode allowing
2413 * setting of individual quota flags
2414 */
2415int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
2416	unsigned int flags)
2417{
2418	int err;
2419
2420	err = vfs_setup_quota_inode(inode, type);
2421	if (err < 0)
2422		return err;
2423	err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
2424	if (err < 0)
2425		vfs_cleanup_quota_inode(inode->i_sb, type);
2426	return err;
2427}
2428EXPORT_SYMBOL(dquot_load_quota_inode);
2429
2430/* Reenable quotas on remount RW */
2431int dquot_resume(struct super_block *sb, int type)
2432{
2433	struct quota_info *dqopt = sb_dqopt(sb);
 
2434	int ret = 0, cnt;
2435	unsigned int flags;
2436
2437	/* s_umount should be held in exclusive mode */
2438	if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2439		up_read(&sb->s_umount);
2440
2441	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2442		if (type != -1 && cnt != type)
2443			continue;
2444		if (!sb_has_quota_suspended(sb, cnt))
2445			continue;
2446
 
 
 
 
 
 
 
2447		spin_lock(&dq_state_lock);
2448		flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2449							DQUOT_LIMITS_ENABLED,
2450							cnt);
2451		dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2452		spin_unlock(&dq_state_lock);
 
2453
2454		flags = dquot_generic_flag(flags, cnt);
2455		ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
2456					  flags);
2457		if (ret < 0)
2458			vfs_cleanup_quota_inode(sb, type);
2459	}
2460
2461	return ret;
2462}
2463EXPORT_SYMBOL(dquot_resume);
2464
2465int dquot_quota_on(struct super_block *sb, int type, int format_id,
2466		   const struct path *path)
2467{
2468	int error = security_quota_on(path->dentry);
2469	if (error)
2470		return error;
2471	/* Quota file not on the same filesystem? */
2472	if (path->dentry->d_sb != sb)
2473		error = -EXDEV;
2474	else
2475		error = dquot_load_quota_inode(d_inode(path->dentry), type,
2476					     format_id, DQUOT_USAGE_ENABLED |
2477					     DQUOT_LIMITS_ENABLED);
2478	return error;
2479}
2480EXPORT_SYMBOL(dquot_quota_on);
2481
2482/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2483 * This function is used when filesystem needs to initialize quotas
2484 * during mount time.
2485 */
2486int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2487		int format_id, int type)
2488{
2489	struct dentry *dentry;
2490	int error;
2491
2492	dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name));
 
 
2493	if (IS_ERR(dentry))
2494		return PTR_ERR(dentry);
2495
 
 
 
 
 
2496	error = security_quota_on(dentry);
2497	if (!error)
2498		error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
2499				DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2500
 
2501	dput(dentry);
2502	return error;
2503}
2504EXPORT_SYMBOL(dquot_quota_on_mount);
2505
2506static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2507{
2508	int ret;
2509	int type;
2510	struct quota_info *dqopt = sb_dqopt(sb);
2511
2512	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2513		return -ENOSYS;
2514	/* Accounting cannot be turned on while fs is mounted */
2515	flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2516	if (!flags)
2517		return -EINVAL;
2518	for (type = 0; type < MAXQUOTAS; type++) {
2519		if (!(flags & qtype_enforce_flag(type)))
2520			continue;
2521		/* Can't enforce without accounting */
2522		if (!sb_has_quota_usage_enabled(sb, type)) {
2523			ret = -EINVAL;
2524			goto out_err;
2525		}
2526		if (sb_has_quota_limits_enabled(sb, type)) {
2527			ret = -EBUSY;
2528			goto out_err;
2529		}
2530		spin_lock(&dq_state_lock);
2531		dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2532		spin_unlock(&dq_state_lock);
2533	}
2534	return 0;
2535out_err:
2536	/* Backout enforcement enablement we already did */
2537	for (type--; type >= 0; type--)  {
2538		if (flags & qtype_enforce_flag(type))
2539			dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2540	}
2541	/* Error code translation for better compatibility with XFS */
2542	if (ret == -EBUSY)
2543		ret = -EEXIST;
2544	return ret;
2545}
2546
2547static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2548{
2549	int ret;
2550	int type;
2551	struct quota_info *dqopt = sb_dqopt(sb);
2552
2553	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2554		return -ENOSYS;
2555	/*
2556	 * We don't support turning off accounting via quotactl. In principle
2557	 * quota infrastructure can do this but filesystems don't expect
2558	 * userspace to be able to do it.
2559	 */
2560	if (flags &
2561		  (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2562		return -EOPNOTSUPP;
2563
2564	/* Filter out limits not enabled */
2565	for (type = 0; type < MAXQUOTAS; type++)
2566		if (!sb_has_quota_limits_enabled(sb, type))
2567			flags &= ~qtype_enforce_flag(type);
2568	/* Nothing left? */
2569	if (!flags)
2570		return -EEXIST;
2571	for (type = 0; type < MAXQUOTAS; type++) {
2572		if (flags & qtype_enforce_flag(type)) {
2573			ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2574			if (ret < 0)
2575				goto out_err;
2576		}
2577	}
2578	return 0;
2579out_err:
2580	/* Backout enforcement disabling we already did */
2581	for (type--; type >= 0; type--)  {
2582		if (flags & qtype_enforce_flag(type)) {
2583			spin_lock(&dq_state_lock);
2584			dqopt->flags |=
2585				dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
2586			spin_unlock(&dq_state_lock);
2587		}
2588	}
2589	return ret;
2590}
2591
2592/* Generic routine for getting common part of quota structure */
2593static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2594{
2595	struct mem_dqblk *dm = &dquot->dq_dqb;
2596
2597	memset(di, 0, sizeof(*di));
2598	spin_lock(&dquot->dq_dqb_lock);
2599	di->d_spc_hardlimit = dm->dqb_bhardlimit;
2600	di->d_spc_softlimit = dm->dqb_bsoftlimit;
 
 
 
 
 
2601	di->d_ino_hardlimit = dm->dqb_ihardlimit;
2602	di->d_ino_softlimit = dm->dqb_isoftlimit;
2603	di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2604	di->d_ino_count = dm->dqb_curinodes;
2605	di->d_spc_timer = dm->dqb_btime;
2606	di->d_ino_timer = dm->dqb_itime;
2607	spin_unlock(&dquot->dq_dqb_lock);
2608}
2609
2610int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2611		    struct qc_dqblk *di)
2612{
2613	struct dquot *dquot;
2614
2615	dquot = dqget(sb, qid);
2616	if (IS_ERR(dquot))
2617		return PTR_ERR(dquot);
2618	do_get_dqblk(dquot, di);
2619	dqput(dquot);
2620
2621	return 0;
2622}
2623EXPORT_SYMBOL(dquot_get_dqblk);
2624
2625int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2626			 struct qc_dqblk *di)
2627{
2628	struct dquot *dquot;
2629	int err;
2630
2631	if (!sb->dq_op->get_next_id)
2632		return -ENOSYS;
2633	err = sb->dq_op->get_next_id(sb, qid);
2634	if (err < 0)
2635		return err;
2636	dquot = dqget(sb, *qid);
2637	if (IS_ERR(dquot))
2638		return PTR_ERR(dquot);
2639	do_get_dqblk(dquot, di);
2640	dqput(dquot);
2641
2642	return 0;
2643}
2644EXPORT_SYMBOL(dquot_get_next_dqblk);
2645
2646#define VFS_QC_MASK \
2647	(QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2648	 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2649	 QC_SPC_TIMER | QC_INO_TIMER)
2650
2651/* Generic routine for setting common part of quota structure */
2652static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2653{
2654	struct mem_dqblk *dm = &dquot->dq_dqb;
2655	int check_blim = 0, check_ilim = 0;
2656	struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2657
2658	if (di->d_fieldmask & ~VFS_QC_MASK)
2659		return -EINVAL;
2660
2661	if (((di->d_fieldmask & QC_SPC_SOFT) &&
2662	     di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2663	    ((di->d_fieldmask & QC_SPC_HARD) &&
2664	     di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2665	    ((di->d_fieldmask & QC_INO_SOFT) &&
2666	     (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2667	    ((di->d_fieldmask & QC_INO_HARD) &&
2668	     (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2669		return -ERANGE;
2670
2671	spin_lock(&dquot->dq_dqb_lock);
2672	if (di->d_fieldmask & QC_SPACE) {
2673		dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2674		check_blim = 1;
2675		set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2676	}
2677
2678	if (di->d_fieldmask & QC_SPC_SOFT)
2679		dm->dqb_bsoftlimit = di->d_spc_softlimit;
2680	if (di->d_fieldmask & QC_SPC_HARD)
2681		dm->dqb_bhardlimit = di->d_spc_hardlimit;
2682	if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2683		check_blim = 1;
2684		set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2685	}
2686
2687	if (di->d_fieldmask & QC_INO_COUNT) {
2688		dm->dqb_curinodes = di->d_ino_count;
2689		check_ilim = 1;
2690		set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2691	}
2692
2693	if (di->d_fieldmask & QC_INO_SOFT)
2694		dm->dqb_isoftlimit = di->d_ino_softlimit;
2695	if (di->d_fieldmask & QC_INO_HARD)
2696		dm->dqb_ihardlimit = di->d_ino_hardlimit;
2697	if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2698		check_ilim = 1;
2699		set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2700	}
2701
2702	if (di->d_fieldmask & QC_SPC_TIMER) {
2703		dm->dqb_btime = di->d_spc_timer;
2704		check_blim = 1;
2705		set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2706	}
2707
2708	if (di->d_fieldmask & QC_INO_TIMER) {
2709		dm->dqb_itime = di->d_ino_timer;
2710		check_ilim = 1;
2711		set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2712	}
2713
2714	if (check_blim) {
2715		if (!dm->dqb_bsoftlimit ||
2716		    dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
2717			dm->dqb_btime = 0;
2718			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2719		} else if (!(di->d_fieldmask & QC_SPC_TIMER))
2720			/* Set grace only if user hasn't provided his own... */
2721			dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2722	}
2723	if (check_ilim) {
2724		if (!dm->dqb_isoftlimit ||
2725		    dm->dqb_curinodes <= dm->dqb_isoftlimit) {
2726			dm->dqb_itime = 0;
2727			clear_bit(DQ_INODES_B, &dquot->dq_flags);
2728		} else if (!(di->d_fieldmask & QC_INO_TIMER))
2729			/* Set grace only if user hasn't provided his own... */
2730			dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2731	}
2732	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2733	    dm->dqb_isoftlimit)
2734		clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2735	else
2736		set_bit(DQ_FAKE_B, &dquot->dq_flags);
2737	spin_unlock(&dquot->dq_dqb_lock);
2738	mark_dquot_dirty(dquot);
2739
2740	return 0;
2741}
2742
2743int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2744		  struct qc_dqblk *di)
2745{
2746	struct dquot *dquot;
2747	int rc;
2748
2749	dquot = dqget(sb, qid);
2750	if (IS_ERR(dquot)) {
2751		rc = PTR_ERR(dquot);
2752		goto out;
2753	}
2754	rc = do_set_dqblk(dquot, di);
2755	dqput(dquot);
2756out:
2757	return rc;
2758}
2759EXPORT_SYMBOL(dquot_set_dqblk);
2760
2761/* Generic routine for getting common part of quota file information */
2762int dquot_get_state(struct super_block *sb, struct qc_state *state)
2763{
2764	struct mem_dqinfo *mi;
2765	struct qc_type_state *tstate;
2766	struct quota_info *dqopt = sb_dqopt(sb);
2767	int type;
2768
2769	memset(state, 0, sizeof(*state));
2770	for (type = 0; type < MAXQUOTAS; type++) {
2771		if (!sb_has_quota_active(sb, type))
2772			continue;
2773		tstate = state->s_state + type;
2774		mi = sb_dqopt(sb)->info + type;
2775		tstate->flags = QCI_ACCT_ENABLED;
2776		spin_lock(&dq_data_lock);
2777		if (mi->dqi_flags & DQF_SYS_FILE)
2778			tstate->flags |= QCI_SYSFILE;
2779		if (mi->dqi_flags & DQF_ROOT_SQUASH)
2780			tstate->flags |= QCI_ROOT_SQUASH;
2781		if (sb_has_quota_limits_enabled(sb, type))
2782			tstate->flags |= QCI_LIMITS_ENFORCED;
2783		tstate->spc_timelimit = mi->dqi_bgrace;
2784		tstate->ino_timelimit = mi->dqi_igrace;
2785		if (dqopt->files[type]) {
2786			tstate->ino = dqopt->files[type]->i_ino;
2787			tstate->blocks = dqopt->files[type]->i_blocks;
2788		}
2789		tstate->nextents = 1;	/* We don't know... */
2790		spin_unlock(&dq_data_lock);
2791	}
 
 
 
 
 
 
 
 
2792	return 0;
2793}
2794EXPORT_SYMBOL(dquot_get_state);
2795
2796/* Generic routine for setting common part of quota file information */
2797int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2798{
2799	struct mem_dqinfo *mi;
2800	int err = 0;
2801
2802	if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2803	    (ii->i_fieldmask & QC_RT_SPC_TIMER))
2804		return -EINVAL;
2805	if (!sb_has_quota_active(sb, type))
2806		return -ESRCH;
2807	mi = sb_dqopt(sb)->info + type;
2808	if (ii->i_fieldmask & QC_FLAGS) {
2809		if ((ii->i_flags & QCI_ROOT_SQUASH &&
2810		     mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2811			return -EINVAL;
2812	}
 
2813	spin_lock(&dq_data_lock);
2814	if (ii->i_fieldmask & QC_SPC_TIMER)
2815		mi->dqi_bgrace = ii->i_spc_timelimit;
2816	if (ii->i_fieldmask & QC_INO_TIMER)
2817		mi->dqi_igrace = ii->i_ino_timelimit;
2818	if (ii->i_fieldmask & QC_FLAGS) {
2819		if (ii->i_flags & QCI_ROOT_SQUASH)
2820			mi->dqi_flags |= DQF_ROOT_SQUASH;
2821		else
2822			mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2823	}
2824	spin_unlock(&dq_data_lock);
2825	mark_info_dirty(sb, type);
2826	/* Force write to disk */
2827	sb->dq_op->write_info(sb, type);
 
 
2828	return err;
2829}
2830EXPORT_SYMBOL(dquot_set_dqinfo);
2831
2832const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2833	.quota_enable	= dquot_quota_enable,
2834	.quota_disable	= dquot_quota_disable,
2835	.quota_sync	= dquot_quota_sync,
2836	.get_state	= dquot_get_state,
2837	.set_info	= dquot_set_dqinfo,
2838	.get_dqblk	= dquot_get_dqblk,
2839	.get_nextdqblk	= dquot_get_next_dqblk,
2840	.set_dqblk	= dquot_set_dqblk
2841};
2842EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2843
2844static int do_proc_dqstats(struct ctl_table *table, int write,
2845		     void *buffer, size_t *lenp, loff_t *ppos)
2846{
2847	unsigned int type = (unsigned long *)table->data - dqstats.stat;
2848	s64 value = percpu_counter_sum(&dqstats.counter[type]);
2849
2850	/* Filter negative values for non-monotonic counters */
2851	if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
2852			  type == DQST_FREE_DQUOTS))
2853		value = 0;
2854
2855	/* Update global table */
2856	dqstats.stat[type] = value;
2857	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 
2858}
2859
2860static struct ctl_table fs_dqstats_table[] = {
2861	{
2862		.procname	= "lookups",
2863		.data		= &dqstats.stat[DQST_LOOKUPS],
2864		.maxlen		= sizeof(unsigned long),
2865		.mode		= 0444,
2866		.proc_handler	= do_proc_dqstats,
2867	},
2868	{
2869		.procname	= "drops",
2870		.data		= &dqstats.stat[DQST_DROPS],
2871		.maxlen		= sizeof(unsigned long),
2872		.mode		= 0444,
2873		.proc_handler	= do_proc_dqstats,
2874	},
2875	{
2876		.procname	= "reads",
2877		.data		= &dqstats.stat[DQST_READS],
2878		.maxlen		= sizeof(unsigned long),
2879		.mode		= 0444,
2880		.proc_handler	= do_proc_dqstats,
2881	},
2882	{
2883		.procname	= "writes",
2884		.data		= &dqstats.stat[DQST_WRITES],
2885		.maxlen		= sizeof(unsigned long),
2886		.mode		= 0444,
2887		.proc_handler	= do_proc_dqstats,
2888	},
2889	{
2890		.procname	= "cache_hits",
2891		.data		= &dqstats.stat[DQST_CACHE_HITS],
2892		.maxlen		= sizeof(unsigned long),
2893		.mode		= 0444,
2894		.proc_handler	= do_proc_dqstats,
2895	},
2896	{
2897		.procname	= "allocated_dquots",
2898		.data		= &dqstats.stat[DQST_ALLOC_DQUOTS],
2899		.maxlen		= sizeof(unsigned long),
2900		.mode		= 0444,
2901		.proc_handler	= do_proc_dqstats,
2902	},
2903	{
2904		.procname	= "free_dquots",
2905		.data		= &dqstats.stat[DQST_FREE_DQUOTS],
2906		.maxlen		= sizeof(unsigned long),
2907		.mode		= 0444,
2908		.proc_handler	= do_proc_dqstats,
2909	},
2910	{
2911		.procname	= "syncs",
2912		.data		= &dqstats.stat[DQST_SYNCS],
2913		.maxlen		= sizeof(unsigned long),
2914		.mode		= 0444,
2915		.proc_handler	= do_proc_dqstats,
2916	},
2917#ifdef CONFIG_PRINT_QUOTA_WARNING
2918	{
2919		.procname	= "warnings",
2920		.data		= &flag_print_warnings,
2921		.maxlen		= sizeof(int),
2922		.mode		= 0644,
2923		.proc_handler	= proc_dointvec,
2924	},
2925#endif
2926	{ },
2927};
2928
2929static struct ctl_table fs_table[] = {
2930	{
2931		.procname	= "quota",
2932		.mode		= 0555,
2933		.child		= fs_dqstats_table,
2934	},
2935	{ },
2936};
2937
2938static struct ctl_table sys_table[] = {
2939	{
2940		.procname	= "fs",
2941		.mode		= 0555,
2942		.child		= fs_table,
2943	},
2944	{ },
2945};
2946
2947static int __init dquot_init(void)
2948{
2949	int i, ret;
2950	unsigned long nr_hash, order;
2951
2952	printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2953
2954	register_sysctl_table(sys_table);
2955
2956	dquot_cachep = kmem_cache_create("dquot",
2957			sizeof(struct dquot), sizeof(unsigned long) * 4,
2958			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2959				SLAB_MEM_SPREAD|SLAB_PANIC),
2960			NULL);
2961
2962	order = 0;
2963	dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
2964	if (!dquot_hash)
2965		panic("Cannot create dquot hash table");
2966
2967	for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2968		ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
2969		if (ret)
2970			panic("Cannot create dquot stat counters");
2971	}
2972
2973	/* Find power-of-two hlist_heads which can fit into allocation */
2974	nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2975	dq_hash_bits = ilog2(nr_hash);
 
 
 
 
2976
2977	nr_hash = 1UL << dq_hash_bits;
2978	dq_hash_mask = nr_hash - 1;
2979	for (i = 0; i < nr_hash; i++)
2980		INIT_HLIST_HEAD(dquot_hash + i);
2981
2982	pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
2983		" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
2984
2985	if (register_shrinker(&dqcache_shrinker))
2986		panic("Cannot register dquot shrinker");
2987
2988	return 0;
2989}
2990fs_initcall(dquot_init);
v3.1
 
   1/*
   2 * Implementation of the diskquota system for the LINUX operating system. QUOTA
   3 * is implemented using the BSD system call interface as the means of
   4 * communication with the user level. This file contains the generic routines
   5 * called by the different filesystems on allocation of an inode or block.
   6 * These routines take care of the administration needed to have a consistent
   7 * diskquota tracking system. The ideas of both user and group quotas are based
   8 * on the Melbourne quota system as used on BSD derived systems. The internal
   9 * implementation is based on one of the several variants of the LINUX
  10 * inode-subsystem with added complexity of the diskquota system.
  11 * 
  12 * Author:	Marco van Wieringen <mvw@planets.elm.net>
  13 *
  14 * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
  15 *
  16 *		Revised list management to avoid races
  17 *		-- Bill Hawes, <whawes@star.net>, 9/98
  18 *
  19 *		Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
  20 *		As the consequence the locking was moved from dquot_decr_...(),
  21 *		dquot_incr_...() to calling functions.
  22 *		invalidate_dquots() now writes modified dquots.
  23 *		Serialized quota_off() and quota_on() for mount point.
  24 *		Fixed a few bugs in grow_dquots().
  25 *		Fixed deadlock in write_dquot() - we no longer account quotas on
  26 *		quota files
  27 *		remove_dquot_ref() moved to inode.c - it now traverses through inodes
  28 *		add_dquot_ref() restarts after blocking
  29 *		Added check for bogus uid and fixed check for group in quotactl.
  30 *		Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
  31 *
  32 *		Used struct list_head instead of own list struct
  33 *		Invalidation of referenced dquots is no longer possible
  34 *		Improved free_dquots list management
  35 *		Quota and i_blocks are now updated in one place to avoid races
  36 *		Warnings are now delayed so we won't block in critical section
  37 *		Write updated not to require dquot lock
  38 *		Jan Kara, <jack@suse.cz>, 9/2000
  39 *
  40 *		Added dynamic quota structure allocation
  41 *		Jan Kara <jack@suse.cz> 12/2000
  42 *
  43 *		Rewritten quota interface. Implemented new quota format and
  44 *		formats registering.
  45 *		Jan Kara, <jack@suse.cz>, 2001,2002
  46 *
  47 *		New SMP locking.
  48 *		Jan Kara, <jack@suse.cz>, 10/2002
  49 *
  50 *		Added journalled quota support, fix lock inversion problems
  51 *		Jan Kara, <jack@suse.cz>, 2003,2004
  52 *
  53 * (C) Copyright 1994 - 1997 Marco van Wieringen 
  54 */
  55
  56#include <linux/errno.h>
  57#include <linux/kernel.h>
  58#include <linux/fs.h>
  59#include <linux/mount.h>
  60#include <linux/mm.h>
  61#include <linux/time.h>
  62#include <linux/types.h>
  63#include <linux/string.h>
  64#include <linux/fcntl.h>
  65#include <linux/stat.h>
  66#include <linux/tty.h>
  67#include <linux/file.h>
  68#include <linux/slab.h>
  69#include <linux/sysctl.h>
  70#include <linux/init.h>
  71#include <linux/module.h>
  72#include <linux/proc_fs.h>
  73#include <linux/security.h>
 
 
  74#include <linux/kmod.h>
  75#include <linux/namei.h>
  76#include <linux/buffer_head.h>
  77#include <linux/capability.h>
  78#include <linux/quotaops.h>
 
  79#include "../internal.h" /* ugh */
  80
  81#include <asm/uaccess.h>
  82
  83/*
  84 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
  85 * and quota formats.
  86 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
  87 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
  88 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
  89 * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
  90 * modifications of quota state (on quotaon and quotaoff) and readers who care
  91 * about latest values take it as well.
 
 
  92 *
  93 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
 
  94 *   dq_list_lock > dq_state_lock
  95 *
  96 * Note that some things (eg. sb pointer, type, id) doesn't change during
  97 * the life of the dquot structure and so needn't to be protected by a lock
  98 *
  99 * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
 100 * operation is just reading pointers from inode (or not using them at all) the
 101 * read lock is enough. If pointers are altered function must hold write lock.
 
 
 
 102 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
 103 * inode is a quota file). Functions adding pointers from inode to dquots have
 104 * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
 105 * have to do all pointer modifications before dropping dqptr_sem. This makes
 106 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
 107 * then drops all pointers to dquots from an inode.
 108 *
 109 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
 110 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
 111 * Currently dquot is locked only when it is being read to memory (or space for
 112 * it is being allocated) on the first dqget() and when it is being released on
 113 * the last dqput(). The allocation and release oparations are serialized by
 114 * the dq_lock and by checking the use count in dquot_release().  Write
 115 * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
 116 * spinlock to internal buffers before writing.
 117 *
 118 * Lock ordering (including related VFS locks) is the following:
 119 *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
 120 *   dqio_mutex
 121 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
 122 * dqptr_sem. But filesystem has to count with the fact that functions such as
 123 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
 124 * from inside a transaction to keep filesystem consistency after a crash. Also
 125 * filesystems usually want to do some IO on dquot from ->mark_dirty which is
 126 * called with dqptr_sem held.
 127 * i_mutex on quota files is special (it's below dqio_mutex)
 128 */
 129
 130static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
 131static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
 132__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
 133EXPORT_SYMBOL(dq_data_lock);
 
 
 
 134
 135void __quota_error(struct super_block *sb, const char *func,
 136		   const char *fmt, ...)
 137{
 138	if (printk_ratelimit()) {
 139		va_list args;
 140		struct va_format vaf;
 141
 142		va_start(args, fmt);
 143
 144		vaf.fmt = fmt;
 145		vaf.va = &args;
 146
 147		printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
 148		       sb->s_id, func, &vaf);
 149
 150		va_end(args);
 151	}
 152}
 153EXPORT_SYMBOL(__quota_error);
 154
 155#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
 156static char *quotatypes[] = INITQFNAMES;
 157#endif
 158static struct quota_format_type *quota_formats;	/* List of registered formats */
 159static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
 160
 161/* SLAB cache for dquot structures */
 162static struct kmem_cache *dquot_cachep;
 163
 164int register_quota_format(struct quota_format_type *fmt)
 165{
 166	spin_lock(&dq_list_lock);
 167	fmt->qf_next = quota_formats;
 168	quota_formats = fmt;
 169	spin_unlock(&dq_list_lock);
 170	return 0;
 171}
 172EXPORT_SYMBOL(register_quota_format);
 173
 174void unregister_quota_format(struct quota_format_type *fmt)
 175{
 176	struct quota_format_type **actqf;
 177
 178	spin_lock(&dq_list_lock);
 179	for (actqf = &quota_formats; *actqf && *actqf != fmt;
 180	     actqf = &(*actqf)->qf_next)
 181		;
 182	if (*actqf)
 183		*actqf = (*actqf)->qf_next;
 184	spin_unlock(&dq_list_lock);
 185}
 186EXPORT_SYMBOL(unregister_quota_format);
 187
 188static struct quota_format_type *find_quota_format(int id)
 189{
 190	struct quota_format_type *actqf;
 191
 192	spin_lock(&dq_list_lock);
 193	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
 194	     actqf = actqf->qf_next)
 195		;
 196	if (!actqf || !try_module_get(actqf->qf_owner)) {
 197		int qm;
 198
 199		spin_unlock(&dq_list_lock);
 200		
 201		for (qm = 0; module_names[qm].qm_fmt_id &&
 202			     module_names[qm].qm_fmt_id != id; qm++)
 203			;
 204		if (!module_names[qm].qm_fmt_id ||
 205		    request_module(module_names[qm].qm_mod_name))
 206			return NULL;
 207
 208		spin_lock(&dq_list_lock);
 209		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
 210		     actqf = actqf->qf_next)
 211			;
 212		if (actqf && !try_module_get(actqf->qf_owner))
 213			actqf = NULL;
 214	}
 215	spin_unlock(&dq_list_lock);
 216	return actqf;
 217}
 218
 219static void put_quota_format(struct quota_format_type *fmt)
 220{
 221	module_put(fmt->qf_owner);
 222}
 223
 224/*
 225 * Dquot List Management:
 226 * The quota code uses three lists for dquot management: the inuse_list,
 227 * free_dquots, and dquot_hash[] array. A single dquot structure may be
 228 * on all three lists, depending on its current state.
 229 *
 230 * All dquots are placed to the end of inuse_list when first created, and this
 231 * list is used for invalidate operation, which must look at every dquot.
 232 *
 233 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
 234 * and this list is searched whenever we need an available dquot.  Dquots are
 235 * removed from the list as soon as they are used again, and
 236 * dqstats.free_dquots gives the number of dquots on the list. When
 237 * dquot is invalidated it's completely released from memory.
 238 *
 
 
 
 
 
 239 * Dquots with a specific identity (device, type and id) are placed on
 240 * one of the dquot_hash[] hash chains. The provides an efficient search
 241 * mechanism to locate a specific dquot.
 242 */
 243
 244static LIST_HEAD(inuse_list);
 245static LIST_HEAD(free_dquots);
 246static unsigned int dq_hash_bits, dq_hash_mask;
 247static struct hlist_head *dquot_hash;
 248
 249struct dqstats dqstats;
 250EXPORT_SYMBOL(dqstats);
 251
 252static qsize_t inode_get_rsv_space(struct inode *inode);
 253static void __dquot_initialize(struct inode *inode, int type);
 
 254
 255static inline unsigned int
 256hashfn(const struct super_block *sb, unsigned int id, int type)
 257{
 
 
 258	unsigned long tmp;
 259
 260	tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
 261	return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
 262}
 263
 264/*
 265 * Following list functions expect dq_list_lock to be held
 266 */
 267static inline void insert_dquot_hash(struct dquot *dquot)
 268{
 269	struct hlist_head *head;
 270	head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
 271	hlist_add_head(&dquot->dq_hash, head);
 272}
 273
 274static inline void remove_dquot_hash(struct dquot *dquot)
 275{
 276	hlist_del_init(&dquot->dq_hash);
 277}
 278
 279static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
 280				unsigned int id, int type)
 281{
 282	struct hlist_node *node;
 283	struct dquot *dquot;
 284
 285	hlist_for_each (node, dquot_hash+hashent) {
 286		dquot = hlist_entry(node, struct dquot, dq_hash);
 287		if (dquot->dq_sb == sb && dquot->dq_id == id &&
 288		    dquot->dq_type == type)
 289			return dquot;
 290	}
 291	return NULL;
 292}
 293
 294/* Add a dquot to the tail of the free list */
 295static inline void put_dquot_last(struct dquot *dquot)
 296{
 297	list_add_tail(&dquot->dq_free, &free_dquots);
 298	dqstats_inc(DQST_FREE_DQUOTS);
 299}
 300
 301static inline void remove_free_dquot(struct dquot *dquot)
 302{
 303	if (list_empty(&dquot->dq_free))
 304		return;
 305	list_del_init(&dquot->dq_free);
 306	dqstats_dec(DQST_FREE_DQUOTS);
 307}
 308
 309static inline void put_inuse(struct dquot *dquot)
 310{
 311	/* We add to the back of inuse list so we don't have to restart
 312	 * when traversing this list and we block */
 313	list_add_tail(&dquot->dq_inuse, &inuse_list);
 314	dqstats_inc(DQST_ALLOC_DQUOTS);
 315}
 316
 317static inline void remove_inuse(struct dquot *dquot)
 318{
 319	dqstats_dec(DQST_ALLOC_DQUOTS);
 320	list_del(&dquot->dq_inuse);
 321}
 322/*
 323 * End of list functions needing dq_list_lock
 324 */
 325
 326static void wait_on_dquot(struct dquot *dquot)
 327{
 328	mutex_lock(&dquot->dq_lock);
 329	mutex_unlock(&dquot->dq_lock);
 330}
 331
 332static inline int dquot_dirty(struct dquot *dquot)
 333{
 334	return test_bit(DQ_MOD_B, &dquot->dq_flags);
 335}
 336
 337static inline int mark_dquot_dirty(struct dquot *dquot)
 338{
 339	return dquot->dq_sb->dq_op->mark_dirty(dquot);
 340}
 341
 342/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
 343int dquot_mark_dquot_dirty(struct dquot *dquot)
 344{
 345	int ret = 1;
 346
 
 
 
 
 
 
 347	/* If quota is dirty already, we don't have to acquire dq_list_lock */
 348	if (test_bit(DQ_MOD_B, &dquot->dq_flags))
 349		return 1;
 350
 351	spin_lock(&dq_list_lock);
 352	if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
 353		list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
 354				info[dquot->dq_type].dqi_dirty_list);
 355		ret = 0;
 356	}
 357	spin_unlock(&dq_list_lock);
 358	return ret;
 359}
 360EXPORT_SYMBOL(dquot_mark_dquot_dirty);
 361
 362/* Dirtify all the dquots - this can block when journalling */
 363static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
 364{
 365	int ret, err, cnt;
 366
 367	ret = err = 0;
 368	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 369		if (dquot[cnt])
 370			/* Even in case of error we have to continue */
 371			ret = mark_dquot_dirty(dquot[cnt]);
 372		if (!err)
 373			err = ret;
 374	}
 375	return err;
 376}
 377
 378static inline void dqput_all(struct dquot **dquot)
 379{
 380	unsigned int cnt;
 381
 382	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 383		dqput(dquot[cnt]);
 384}
 385
 386/* This function needs dq_list_lock */
 387static inline int clear_dquot_dirty(struct dquot *dquot)
 388{
 389	if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
 
 
 
 
 
 390		return 0;
 
 391	list_del_init(&dquot->dq_dirty);
 
 392	return 1;
 393}
 394
 395void mark_info_dirty(struct super_block *sb, int type)
 396{
 397	set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
 
 
 398}
 399EXPORT_SYMBOL(mark_info_dirty);
 400
 401/*
 402 *	Read dquot from disk and alloc space for it
 403 */
 404
 405int dquot_acquire(struct dquot *dquot)
 406{
 407	int ret = 0, ret2 = 0;
 408	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 409
 410	mutex_lock(&dquot->dq_lock);
 411	mutex_lock(&dqopt->dqio_mutex);
 412	if (!test_bit(DQ_READ_B, &dquot->dq_flags))
 413		ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
 414	if (ret < 0)
 415		goto out_iolock;
 
 
 416	set_bit(DQ_READ_B, &dquot->dq_flags);
 417	/* Instantiate dquot if needed */
 418	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
 419		ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
 420		/* Write the info if needed */
 421		if (info_dirty(&dqopt->info[dquot->dq_type])) {
 422			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
 423						dquot->dq_sb, dquot->dq_type);
 424		}
 425		if (ret < 0)
 426			goto out_iolock;
 427		if (ret2 < 0) {
 428			ret = ret2;
 429			goto out_iolock;
 430		}
 431	}
 
 
 
 
 
 432	set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
 433out_iolock:
 434	mutex_unlock(&dqopt->dqio_mutex);
 435	mutex_unlock(&dquot->dq_lock);
 436	return ret;
 437}
 438EXPORT_SYMBOL(dquot_acquire);
 439
 440/*
 441 *	Write dquot to disk
 442 */
 443int dquot_commit(struct dquot *dquot)
 444{
 445	int ret = 0;
 446	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 447
 448	mutex_lock(&dqopt->dqio_mutex);
 449	spin_lock(&dq_list_lock);
 450	if (!clear_dquot_dirty(dquot)) {
 451		spin_unlock(&dq_list_lock);
 452		goto out_sem;
 453	}
 454	spin_unlock(&dq_list_lock);
 455	/* Inactive dquot can be only if there was error during read/init
 456	 * => we have better not writing it */
 457	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
 458		ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
 459	else
 460		ret = -EIO;
 461out_sem:
 462	mutex_unlock(&dqopt->dqio_mutex);
 463	return ret;
 464}
 465EXPORT_SYMBOL(dquot_commit);
 466
 467/*
 468 *	Release dquot
 469 */
 470int dquot_release(struct dquot *dquot)
 471{
 472	int ret = 0, ret2 = 0;
 473	struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 474
 475	mutex_lock(&dquot->dq_lock);
 476	/* Check whether we are not racing with some other dqget() */
 477	if (atomic_read(&dquot->dq_count) > 1)
 478		goto out_dqlock;
 479	mutex_lock(&dqopt->dqio_mutex);
 480	if (dqopt->ops[dquot->dq_type]->release_dqblk) {
 481		ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
 482		/* Write the info */
 483		if (info_dirty(&dqopt->info[dquot->dq_type])) {
 484			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
 485						dquot->dq_sb, dquot->dq_type);
 486		}
 487		if (ret >= 0)
 488			ret = ret2;
 489	}
 490	clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
 491	mutex_unlock(&dqopt->dqio_mutex);
 492out_dqlock:
 493	mutex_unlock(&dquot->dq_lock);
 494	return ret;
 495}
 496EXPORT_SYMBOL(dquot_release);
 497
 498void dquot_destroy(struct dquot *dquot)
 499{
 500	kmem_cache_free(dquot_cachep, dquot);
 501}
 502EXPORT_SYMBOL(dquot_destroy);
 503
 504static inline void do_destroy_dquot(struct dquot *dquot)
 505{
 506	dquot->dq_sb->dq_op->destroy_dquot(dquot);
 507}
 508
 509/* Invalidate all dquots on the list. Note that this function is called after
 510 * quota is disabled and pointers from inodes removed so there cannot be new
 511 * quota users. There can still be some users of quotas due to inodes being
 512 * just deleted or pruned by prune_icache() (those are not attached to any
 513 * list) or parallel quotactl call. We have to wait for such users.
 514 */
 515static void invalidate_dquots(struct super_block *sb, int type)
 516{
 517	struct dquot *dquot, *tmp;
 518
 519restart:
 520	spin_lock(&dq_list_lock);
 521	list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
 522		if (dquot->dq_sb != sb)
 523			continue;
 524		if (dquot->dq_type != type)
 525			continue;
 526		/* Wait for dquot users */
 527		if (atomic_read(&dquot->dq_count)) {
 528			DEFINE_WAIT(wait);
 529
 530			atomic_inc(&dquot->dq_count);
 531			prepare_to_wait(&dquot->dq_wait_unused, &wait,
 532					TASK_UNINTERRUPTIBLE);
 533			spin_unlock(&dq_list_lock);
 534			/* Once dqput() wakes us up, we know it's time to free
 
 535			 * the dquot.
 536			 * IMPORTANT: we rely on the fact that there is always
 537			 * at most one process waiting for dquot to free.
 538			 * Otherwise dq_count would be > 1 and we would never
 539			 * wake up.
 540			 */
 541			if (atomic_read(&dquot->dq_count) > 1)
 542				schedule();
 543			finish_wait(&dquot->dq_wait_unused, &wait);
 544			dqput(dquot);
 545			/* At this moment dquot() need not exist (it could be
 546			 * reclaimed by prune_dqcache(). Hence we must
 547			 * restart. */
 548			goto restart;
 549		}
 550		/*
 551		 * Quota now has no users and it has been written on last
 552		 * dqput()
 553		 */
 554		remove_dquot_hash(dquot);
 555		remove_free_dquot(dquot);
 556		remove_inuse(dquot);
 557		do_destroy_dquot(dquot);
 558	}
 559	spin_unlock(&dq_list_lock);
 560}
 561
 562/* Call callback for every active dquot on given filesystem */
 563int dquot_scan_active(struct super_block *sb,
 564		      int (*fn)(struct dquot *dquot, unsigned long priv),
 565		      unsigned long priv)
 566{
 567	struct dquot *dquot, *old_dquot = NULL;
 568	int ret = 0;
 569
 570	mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
 
 571	spin_lock(&dq_list_lock);
 572	list_for_each_entry(dquot, &inuse_list, dq_inuse) {
 573		if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
 574			continue;
 575		if (dquot->dq_sb != sb)
 576			continue;
 577		/* Now we have active dquot so we can just increase use count */
 578		atomic_inc(&dquot->dq_count);
 579		spin_unlock(&dq_list_lock);
 580		dqstats_inc(DQST_LOOKUPS);
 581		dqput(old_dquot);
 582		old_dquot = dquot;
 583		ret = fn(dquot, priv);
 584		if (ret < 0)
 585			goto out;
 
 
 
 
 
 
 
 
 586		spin_lock(&dq_list_lock);
 587		/* We are safe to continue now because our dquot could not
 588		 * be moved out of the inuse list while we hold the reference */
 589	}
 590	spin_unlock(&dq_list_lock);
 591out:
 592	dqput(old_dquot);
 593	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
 594	return ret;
 595}
 596EXPORT_SYMBOL(dquot_scan_active);
 597
 598int dquot_quota_sync(struct super_block *sb, int type, int wait)
 
 599{
 600	struct list_head *dirty;
 601	struct dquot *dquot;
 602	struct quota_info *dqopt = sb_dqopt(sb);
 603	int cnt;
 
 
 
 604
 605	mutex_lock(&dqopt->dqonoff_mutex);
 606	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 607		if (type != -1 && cnt != type)
 608			continue;
 609		if (!sb_has_quota_active(sb, cnt))
 610			continue;
 611		spin_lock(&dq_list_lock);
 612		dirty = &dqopt->info[cnt].dqi_dirty_list;
 613		while (!list_empty(dirty)) {
 614			dquot = list_first_entry(dirty, struct dquot,
 
 615						 dq_dirty);
 616			/* Dirty and inactive can be only bad dquot... */
 617			if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
 618				clear_dquot_dirty(dquot);
 619				continue;
 620			}
 621			/* Now we have active dquot from which someone is
 622 			 * holding reference so we can safely just increase
 623			 * use count */
 624			atomic_inc(&dquot->dq_count);
 625			spin_unlock(&dq_list_lock);
 626			dqstats_inc(DQST_LOOKUPS);
 627			sb->dq_op->write_dquot(dquot);
 
 
 
 
 
 
 
 
 628			dqput(dquot);
 629			spin_lock(&dq_list_lock);
 630		}
 631		spin_unlock(&dq_list_lock);
 632	}
 633
 634	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 635		if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
 636		    && info_dirty(&dqopt->info[cnt]))
 637			sb->dq_op->write_info(sb, cnt);
 638	dqstats_inc(DQST_SYNCS);
 639	mutex_unlock(&dqopt->dqonoff_mutex);
 640
 641	if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 642		return 0;
 643
 644	/* This is not very clever (and fast) but currently I don't know about
 645	 * any other simple way of getting quota data to disk and we must get
 646	 * them there for userspace to be visible... */
 647	if (sb->s_op->sync_fs)
 648		sb->s_op->sync_fs(sb, 1);
 649	sync_blockdev(sb->s_bdev);
 650
 651	/*
 652	 * Now when everything is written we can discard the pagecache so
 653	 * that userspace sees the changes.
 654	 */
 655	mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
 656	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 657		if (type != -1 && cnt != type)
 658			continue;
 659		if (!sb_has_quota_active(sb, cnt))
 660			continue;
 661		mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
 662				  I_MUTEX_QUOTA);
 663		truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
 664		mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
 665	}
 666	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
 667
 668	return 0;
 669}
 670EXPORT_SYMBOL(dquot_quota_sync);
 671
 672/* Free unused dquots from cache */
 673static void prune_dqcache(int count)
 674{
 675	struct list_head *head;
 676	struct dquot *dquot;
 
 677
 678	head = free_dquots.prev;
 679	while (head != &free_dquots && count) {
 680		dquot = list_entry(head, struct dquot, dq_free);
 681		remove_dquot_hash(dquot);
 682		remove_free_dquot(dquot);
 683		remove_inuse(dquot);
 684		do_destroy_dquot(dquot);
 685		count--;
 686		head = free_dquots.prev;
 687	}
 
 
 688}
 689
 690/*
 691 * This is called from kswapd when we think we need some
 692 * more memory
 693 */
 694static int shrink_dqcache_memory(struct shrinker *shrink,
 695				 struct shrink_control *sc)
 696{
 697	int nr = sc->nr_to_scan;
 698
 699	if (nr) {
 700		spin_lock(&dq_list_lock);
 701		prune_dqcache(nr);
 702		spin_unlock(&dq_list_lock);
 703	}
 704	return ((unsigned)
 705		percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])
 706		/100) * sysctl_vfs_cache_pressure;
 707}
 708
 709static struct shrinker dqcache_shrinker = {
 710	.shrink = shrink_dqcache_memory,
 
 711	.seeks = DEFAULT_SEEKS,
 712};
 713
 714/*
 715 * Put reference to dquot
 716 * NOTE: If you change this function please check whether dqput_blocks() works right...
 717 */
 718void dqput(struct dquot *dquot)
 719{
 720	int ret;
 721
 722	if (!dquot)
 723		return;
 724#ifdef CONFIG_QUOTA_DEBUG
 725	if (!atomic_read(&dquot->dq_count)) {
 726		quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
 727			    quotatypes[dquot->dq_type], dquot->dq_id);
 
 728		BUG();
 729	}
 730#endif
 731	dqstats_inc(DQST_DROPS);
 732we_slept:
 733	spin_lock(&dq_list_lock);
 734	if (atomic_read(&dquot->dq_count) > 1) {
 735		/* We have more than one user... nothing to do */
 736		atomic_dec(&dquot->dq_count);
 737		/* Releasing dquot during quotaoff phase? */
 738		if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
 739		    atomic_read(&dquot->dq_count) == 1)
 740			wake_up(&dquot->dq_wait_unused);
 741		spin_unlock(&dq_list_lock);
 742		return;
 743	}
 744	/* Need to release dquot? */
 745	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
 746		spin_unlock(&dq_list_lock);
 747		/* Commit dquot before releasing */
 748		ret = dquot->dq_sb->dq_op->write_dquot(dquot);
 749		if (ret < 0) {
 750			quota_error(dquot->dq_sb, "Can't write quota structure"
 751				    " (error %d). Quota may get out of sync!",
 752				    ret);
 753			/*
 754			 * We clear dirty bit anyway, so that we avoid
 755			 * infinite loop here
 756			 */
 757			spin_lock(&dq_list_lock);
 758			clear_dquot_dirty(dquot);
 759			spin_unlock(&dq_list_lock);
 760		}
 761		goto we_slept;
 762	}
 763	/* Clear flag in case dquot was inactive (something bad happened) */
 764	clear_dquot_dirty(dquot);
 765	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
 766		spin_unlock(&dq_list_lock);
 767		dquot->dq_sb->dq_op->release_dquot(dquot);
 768		goto we_slept;
 769	}
 770	atomic_dec(&dquot->dq_count);
 771#ifdef CONFIG_QUOTA_DEBUG
 772	/* sanity check */
 773	BUG_ON(!list_empty(&dquot->dq_free));
 774#endif
 775	put_dquot_last(dquot);
 776	spin_unlock(&dq_list_lock);
 777}
 778EXPORT_SYMBOL(dqput);
 779
 780struct dquot *dquot_alloc(struct super_block *sb, int type)
 781{
 782	return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
 783}
 784EXPORT_SYMBOL(dquot_alloc);
 785
 786static struct dquot *get_empty_dquot(struct super_block *sb, int type)
 787{
 788	struct dquot *dquot;
 789
 790	dquot = sb->dq_op->alloc_dquot(sb, type);
 791	if(!dquot)
 792		return NULL;
 793
 794	mutex_init(&dquot->dq_lock);
 795	INIT_LIST_HEAD(&dquot->dq_free);
 796	INIT_LIST_HEAD(&dquot->dq_inuse);
 797	INIT_HLIST_NODE(&dquot->dq_hash);
 798	INIT_LIST_HEAD(&dquot->dq_dirty);
 799	init_waitqueue_head(&dquot->dq_wait_unused);
 800	dquot->dq_sb = sb;
 801	dquot->dq_type = type;
 802	atomic_set(&dquot->dq_count, 1);
 
 803
 804	return dquot;
 805}
 806
 807/*
 808 * Get reference to dquot
 809 *
 810 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
 811 * destroying our dquot by:
 812 *   a) checking for quota flags under dq_list_lock and
 813 *   b) getting a reference to dquot before we release dq_list_lock
 814 */
 815struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
 816{
 817	unsigned int hashent = hashfn(sb, id, type);
 818	struct dquot *dquot = NULL, *empty = NULL;
 
 
 
 819
 820        if (!sb_has_quota_active(sb, type))
 821		return NULL;
 822we_slept:
 823	spin_lock(&dq_list_lock);
 824	spin_lock(&dq_state_lock);
 825	if (!sb_has_quota_active(sb, type)) {
 826		spin_unlock(&dq_state_lock);
 827		spin_unlock(&dq_list_lock);
 
 828		goto out;
 829	}
 830	spin_unlock(&dq_state_lock);
 831
 832	dquot = find_dquot(hashent, sb, id, type);
 833	if (!dquot) {
 834		if (!empty) {
 835			spin_unlock(&dq_list_lock);
 836			empty = get_empty_dquot(sb, type);
 837			if (!empty)
 838				schedule();	/* Try to wait for a moment... */
 839			goto we_slept;
 840		}
 841		dquot = empty;
 842		empty = NULL;
 843		dquot->dq_id = id;
 844		/* all dquots go on the inuse_list */
 845		put_inuse(dquot);
 846		/* hash it first so it can be found */
 847		insert_dquot_hash(dquot);
 848		spin_unlock(&dq_list_lock);
 849		dqstats_inc(DQST_LOOKUPS);
 850	} else {
 851		if (!atomic_read(&dquot->dq_count))
 852			remove_free_dquot(dquot);
 853		atomic_inc(&dquot->dq_count);
 854		spin_unlock(&dq_list_lock);
 855		dqstats_inc(DQST_CACHE_HITS);
 856		dqstats_inc(DQST_LOOKUPS);
 857	}
 858	/* Wait for dq_lock - after this we know that either dquot_release() is
 859	 * already finished or it will be canceled due to dq_count > 1 test */
 860	wait_on_dquot(dquot);
 861	/* Read the dquot / allocate space in quota file */
 862	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
 863	    sb->dq_op->acquire_dquot(dquot) < 0) {
 864		dqput(dquot);
 865		dquot = NULL;
 866		goto out;
 
 
 
 
 867	}
 
 
 
 
 
 868#ifdef CONFIG_QUOTA_DEBUG
 869	BUG_ON(!dquot->dq_sb);	/* Has somebody invalidated entry under us? */
 870#endif
 871out:
 872	if (empty)
 873		do_destroy_dquot(empty);
 874
 875	return dquot;
 876}
 877EXPORT_SYMBOL(dqget);
 878
 
 
 
 
 
 879static int dqinit_needed(struct inode *inode, int type)
 880{
 
 881	int cnt;
 882
 883	if (IS_NOQUOTA(inode))
 884		return 0;
 
 
 885	if (type != -1)
 886		return !inode->i_dquot[type];
 887	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 888		if (!inode->i_dquot[cnt])
 889			return 1;
 890	return 0;
 891}
 892
 893/* This routine is guarded by dqonoff_mutex mutex */
 894static void add_dquot_ref(struct super_block *sb, int type)
 895{
 896	struct inode *inode, *old_inode = NULL;
 897#ifdef CONFIG_QUOTA_DEBUG
 898	int reserved = 0;
 899#endif
 
 900
 901	spin_lock(&inode_sb_list_lock);
 902	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
 903		spin_lock(&inode->i_lock);
 904		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
 905		    !atomic_read(&inode->i_writecount) ||
 906		    !dqinit_needed(inode, type)) {
 907			spin_unlock(&inode->i_lock);
 908			continue;
 909		}
 
 
 
 
 910#ifdef CONFIG_QUOTA_DEBUG
 911		if (unlikely(inode_get_rsv_space(inode) > 0))
 912			reserved = 1;
 913#endif
 914		__iget(inode);
 915		spin_unlock(&inode->i_lock);
 916		spin_unlock(&inode_sb_list_lock);
 917
 918		iput(old_inode);
 919		__dquot_initialize(inode, type);
 
 
 
 
 920
 921		/*
 922		 * We hold a reference to 'inode' so it couldn't have been
 923		 * removed from s_inodes list while we dropped the
 924		 * inode_sb_list_lock We cannot iput the inode now as we can be
 925		 * holding the last reference and we cannot iput it under
 926		 * inode_sb_list_lock. So we keep the reference and iput it
 927		 * later.
 928		 */
 929		old_inode = inode;
 930		spin_lock(&inode_sb_list_lock);
 
 931	}
 932	spin_unlock(&inode_sb_list_lock);
 933	iput(old_inode);
 934
 935#ifdef CONFIG_QUOTA_DEBUG
 936	if (reserved) {
 937		quota_error(sb, "Writes happened before quota was turned on "
 938			"thus quota information is probably inconsistent. "
 939			"Please run quotacheck(8)");
 940	}
 941#endif
 942}
 943
 944/*
 945 * Return 0 if dqput() won't block.
 946 * (note that 1 doesn't necessarily mean blocking)
 947 */
 948static inline int dqput_blocks(struct dquot *dquot)
 949{
 950	if (atomic_read(&dquot->dq_count) <= 1)
 951		return 1;
 952	return 0;
 953}
 954
 955/*
 956 * Remove references to dquots from inode and add dquot to list for freeing
 957 * if we have the last reference to dquot
 958 * We can't race with anybody because we hold dqptr_sem for writing...
 959 */
 960static int remove_inode_dquot_ref(struct inode *inode, int type,
 961				  struct list_head *tofree_head)
 962{
 963	struct dquot *dquot = inode->i_dquot[type];
 
 964
 965	inode->i_dquot[type] = NULL;
 966	if (dquot) {
 967		if (dqput_blocks(dquot)) {
 968#ifdef CONFIG_QUOTA_DEBUG
 969			if (atomic_read(&dquot->dq_count) != 1)
 970				quota_error(inode->i_sb, "Adding dquot with "
 971					    "dq_count %d to dispose list",
 972					    atomic_read(&dquot->dq_count));
 973#endif
 974			spin_lock(&dq_list_lock);
 975			/* As dquot must have currently users it can't be on
 976			 * the free list... */
 977			list_add(&dquot->dq_free, tofree_head);
 978			spin_unlock(&dq_list_lock);
 979			return 1;
 980		}
 981		else
 982			dqput(dquot);   /* We have guaranteed we won't block */
 983	}
 984	return 0;
 985}
 986
 987/*
 988 * Free list of dquots
 989 * Dquots are removed from inodes and no new references can be got so we are
 990 * the only ones holding reference
 991 */
 992static void put_dquot_list(struct list_head *tofree_head)
 993{
 994	struct list_head *act_head;
 995	struct dquot *dquot;
 996
 997	act_head = tofree_head->next;
 998	while (act_head != tofree_head) {
 999		dquot = list_entry(act_head, struct dquot, dq_free);
1000		act_head = act_head->next;
1001		/* Remove dquot from the list so we won't have problems... */
1002		list_del_init(&dquot->dq_free);
1003		dqput(dquot);
1004	}
1005}
1006
1007static void remove_dquot_ref(struct super_block *sb, int type,
1008		struct list_head *tofree_head)
1009{
1010	struct inode *inode;
 
1011	int reserved = 0;
 
1012
1013	spin_lock(&inode_sb_list_lock);
1014	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1015		/*
1016		 *  We have to scan also I_NEW inodes because they can already
1017		 *  have quota pointer initialized. Luckily, we need to touch
1018		 *  only quota pointers and these have separate locking
1019		 *  (dqptr_sem).
1020		 */
 
1021		if (!IS_NOQUOTA(inode)) {
 
1022			if (unlikely(inode_get_rsv_space(inode) > 0))
1023				reserved = 1;
 
1024			remove_inode_dquot_ref(inode, type, tofree_head);
1025		}
 
1026	}
1027	spin_unlock(&inode_sb_list_lock);
1028#ifdef CONFIG_QUOTA_DEBUG
1029	if (reserved) {
1030		printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1031			" was disabled thus quota information is probably "
1032			"inconsistent. Please run quotacheck(8).\n", sb->s_id);
1033	}
1034#endif
1035}
1036
1037/* Gather all references from inodes and drop them */
1038static void drop_dquot_ref(struct super_block *sb, int type)
1039{
1040	LIST_HEAD(tofree_head);
1041
1042	if (sb->dq_op) {
1043		down_write(&sb_dqopt(sb)->dqptr_sem);
1044		remove_dquot_ref(sb, type, &tofree_head);
1045		up_write(&sb_dqopt(sb)->dqptr_sem);
1046		put_dquot_list(&tofree_head);
1047	}
1048}
1049
1050static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
1051{
1052	dquot->dq_dqb.dqb_curinodes += number;
1053}
1054
1055static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
1056{
1057	dquot->dq_dqb.dqb_curspace += number;
1058}
1059
1060static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
1061{
1062	dquot->dq_dqb.dqb_rsvspace += number;
1063}
1064
1065/*
1066 * Claim reserved quota space
1067 */
1068static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
1069{
1070	if (dquot->dq_dqb.dqb_rsvspace < number) {
1071		WARN_ON_ONCE(1);
1072		number = dquot->dq_dqb.dqb_rsvspace;
1073	}
1074	dquot->dq_dqb.dqb_curspace += number;
1075	dquot->dq_dqb.dqb_rsvspace -= number;
1076}
1077
1078static inline
1079void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1080{
1081	if (dquot->dq_dqb.dqb_rsvspace >= number)
1082		dquot->dq_dqb.dqb_rsvspace -= number;
1083	else {
1084		WARN_ON_ONCE(1);
1085		dquot->dq_dqb.dqb_rsvspace = 0;
1086	}
 
 
 
 
1087}
1088
1089static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1090{
1091	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1092	    dquot->dq_dqb.dqb_curinodes >= number)
1093		dquot->dq_dqb.dqb_curinodes -= number;
1094	else
1095		dquot->dq_dqb.dqb_curinodes = 0;
1096	if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1097		dquot->dq_dqb.dqb_itime = (time_t) 0;
1098	clear_bit(DQ_INODES_B, &dquot->dq_flags);
1099}
1100
1101static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1102{
1103	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1104	    dquot->dq_dqb.dqb_curspace >= number)
1105		dquot->dq_dqb.dqb_curspace -= number;
1106	else
1107		dquot->dq_dqb.dqb_curspace = 0;
1108	if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1109		dquot->dq_dqb.dqb_btime = (time_t) 0;
 
1110	clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1111}
1112
 
 
 
 
 
 
1113static int warning_issued(struct dquot *dquot, const int warntype)
1114{
1115	int flag = (warntype == QUOTA_NL_BHARDWARN ||
1116		warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1117		((warntype == QUOTA_NL_IHARDWARN ||
1118		warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1119
1120	if (!flag)
1121		return 0;
1122	return test_and_set_bit(flag, &dquot->dq_flags);
1123}
1124
1125#ifdef CONFIG_PRINT_QUOTA_WARNING
1126static int flag_print_warnings = 1;
1127
1128static int need_print_warning(struct dquot *dquot)
1129{
1130	if (!flag_print_warnings)
1131		return 0;
1132
1133	switch (dquot->dq_type) {
1134		case USRQUOTA:
1135			return current_fsuid() == dquot->dq_id;
1136		case GRPQUOTA:
1137			return in_group_p(dquot->dq_id);
 
 
1138	}
1139	return 0;
1140}
1141
1142/* Print warning to user which exceeded quota */
1143static void print_warning(struct dquot *dquot, const int warntype)
1144{
1145	char *msg = NULL;
1146	struct tty_struct *tty;
 
1147
1148	if (warntype == QUOTA_NL_IHARDBELOW ||
1149	    warntype == QUOTA_NL_ISOFTBELOW ||
1150	    warntype == QUOTA_NL_BHARDBELOW ||
1151	    warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot))
1152		return;
1153
1154	tty = get_current_tty();
1155	if (!tty)
1156		return;
1157	tty_write_message(tty, dquot->dq_sb->s_id);
1158	if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1159		tty_write_message(tty, ": warning, ");
1160	else
1161		tty_write_message(tty, ": write failed, ");
1162	tty_write_message(tty, quotatypes[dquot->dq_type]);
1163	switch (warntype) {
1164		case QUOTA_NL_IHARDWARN:
1165			msg = " file limit reached.\r\n";
1166			break;
1167		case QUOTA_NL_ISOFTLONGWARN:
1168			msg = " file quota exceeded too long.\r\n";
1169			break;
1170		case QUOTA_NL_ISOFTWARN:
1171			msg = " file quota exceeded.\r\n";
1172			break;
1173		case QUOTA_NL_BHARDWARN:
1174			msg = " block limit reached.\r\n";
1175			break;
1176		case QUOTA_NL_BSOFTLONGWARN:
1177			msg = " block quota exceeded too long.\r\n";
1178			break;
1179		case QUOTA_NL_BSOFTWARN:
1180			msg = " block quota exceeded.\r\n";
1181			break;
1182	}
1183	tty_write_message(tty, msg);
1184	tty_kref_put(tty);
1185}
1186#endif
1187
 
 
 
 
 
 
 
 
 
 
1188/*
1189 * Write warnings to the console and send warning messages over netlink.
1190 *
1191 * Note that this function can sleep.
1192 */
1193static void flush_warnings(struct dquot *const *dquots, char *warntype)
1194{
1195	struct dquot *dq;
1196	int i;
1197
1198	for (i = 0; i < MAXQUOTAS; i++) {
1199		dq = dquots[i];
1200		if (dq && warntype[i] != QUOTA_NL_NOWARN &&
1201		    !warning_issued(dq, warntype[i])) {
1202#ifdef CONFIG_PRINT_QUOTA_WARNING
1203			print_warning(dq, warntype[i]);
1204#endif
1205			quota_send_warning(dq->dq_type, dq->dq_id,
1206					   dq->dq_sb->s_dev, warntype[i]);
1207		}
1208	}
1209}
1210
1211static int ignore_hardlimit(struct dquot *dquot)
1212{
1213	struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
1214
1215	return capable(CAP_SYS_RESOURCE) &&
1216	       (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1217		!(info->dqi_flags & V1_DQF_RSQUASH));
1218}
1219
1220/* needs dq_data_lock */
1221static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1222{
1223	qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
 
1224
1225	*warntype = QUOTA_NL_NOWARN;
1226	if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
 
1227	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1228		return 0;
1229
1230	if (dquot->dq_dqb.dqb_ihardlimit &&
1231	    newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1232            !ignore_hardlimit(dquot)) {
1233		*warntype = QUOTA_NL_IHARDWARN;
1234		return -EDQUOT;
 
1235	}
1236
1237	if (dquot->dq_dqb.dqb_isoftlimit &&
1238	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1239	    dquot->dq_dqb.dqb_itime &&
1240	    get_seconds() >= dquot->dq_dqb.dqb_itime &&
1241            !ignore_hardlimit(dquot)) {
1242		*warntype = QUOTA_NL_ISOFTLONGWARN;
1243		return -EDQUOT;
 
1244	}
1245
1246	if (dquot->dq_dqb.dqb_isoftlimit &&
1247	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1248	    dquot->dq_dqb.dqb_itime == 0) {
1249		*warntype = QUOTA_NL_ISOFTWARN;
1250		dquot->dq_dqb.dqb_itime = get_seconds() +
1251		    sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
1252	}
 
 
1253
1254	return 0;
 
 
1255}
1256
1257/* needs dq_data_lock */
1258static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
 
1259{
1260	qsize_t tspace;
1261	struct super_block *sb = dquot->dq_sb;
 
1262
1263	*warntype = QUOTA_NL_NOWARN;
1264	if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
1265	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
1266		return 0;
1267
1268	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1269		+ space;
1270
1271	if (dquot->dq_dqb.dqb_bhardlimit &&
1272	    tspace > dquot->dq_dqb.dqb_bhardlimit &&
1273            !ignore_hardlimit(dquot)) {
1274		if (!prealloc)
1275			*warntype = QUOTA_NL_BHARDWARN;
1276		return -EDQUOT;
 
1277	}
1278
1279	if (dquot->dq_dqb.dqb_bsoftlimit &&
1280	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1281	    dquot->dq_dqb.dqb_btime &&
1282	    get_seconds() >= dquot->dq_dqb.dqb_btime &&
1283            !ignore_hardlimit(dquot)) {
1284		if (!prealloc)
1285			*warntype = QUOTA_NL_BSOFTLONGWARN;
1286		return -EDQUOT;
 
1287	}
1288
1289	if (dquot->dq_dqb.dqb_bsoftlimit &&
1290	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1291	    dquot->dq_dqb.dqb_btime == 0) {
1292		if (!prealloc) {
1293			*warntype = QUOTA_NL_BSOFTWARN;
1294			dquot->dq_dqb.dqb_btime = get_seconds() +
1295			    sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
1296		}
1297		else
1298			/*
1299			 * We don't allow preallocation to exceed softlimit so exceeding will
1300			 * be always printed
1301			 */
1302			return -EDQUOT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1303	}
1304
1305	return 0;
1306}
1307
1308static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1309{
1310	qsize_t newinodes;
1311
1312	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1313	    dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1314	    !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
1315		return QUOTA_NL_NOWARN;
1316
1317	newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1318	if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1319		return QUOTA_NL_ISOFTBELOW;
1320	if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1321	    newinodes < dquot->dq_dqb.dqb_ihardlimit)
1322		return QUOTA_NL_IHARDBELOW;
1323	return QUOTA_NL_NOWARN;
1324}
1325
1326static int info_bdq_free(struct dquot *dquot, qsize_t space)
1327{
 
 
 
 
1328	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1329	    dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1330		return QUOTA_NL_NOWARN;
1331
1332	if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1333		return QUOTA_NL_BSOFTBELOW;
1334	if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
1335	    dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
1336		return QUOTA_NL_BHARDBELOW;
1337	return QUOTA_NL_NOWARN;
1338}
1339
1340static int dquot_active(const struct inode *inode)
1341{
1342	struct super_block *sb = inode->i_sb;
1343
1344	if (IS_NOQUOTA(inode))
1345		return 0;
1346	return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1347}
1348
1349/*
1350 * Initialize quota pointers in inode
1351 *
1352 * We do things in a bit complicated way but by that we avoid calling
1353 * dqget() and thus filesystem callbacks under dqptr_sem.
1354 *
1355 * It is better to call this function outside of any transaction as it
1356 * might need a lot of space in journal for dquot structure allocation.
1357 */
1358static void __dquot_initialize(struct inode *inode, int type)
1359{
1360	unsigned int id = 0;
1361	int cnt;
1362	struct dquot *got[MAXQUOTAS];
1363	struct super_block *sb = inode->i_sb;
1364	qsize_t rsv;
 
1365
1366	/* First test before acquiring mutex - solves deadlocks when we
1367         * re-enter the quota code and are already holding the mutex */
1368	if (!dquot_active(inode))
1369		return;
 
 
1370
1371	/* First get references to structures we might need. */
1372	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1373		got[cnt] = NULL;
 
 
 
 
1374		if (type != -1 && cnt != type)
1375			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
1376		switch (cnt) {
1377		case USRQUOTA:
1378			id = inode->i_uid;
1379			break;
1380		case GRPQUOTA:
1381			id = inode->i_gid;
 
 
 
 
 
 
1382			break;
1383		}
1384		got[cnt] = dqget(sb, id, cnt);
 
 
 
 
 
 
 
 
 
1385	}
1386
1387	down_write(&sb_dqopt(sb)->dqptr_sem);
 
 
 
 
1388	if (IS_NOQUOTA(inode))
1389		goto out_err;
1390	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1391		if (type != -1 && cnt != type)
1392			continue;
1393		/* Avoid races with quotaoff() */
1394		if (!sb_has_quota_active(sb, cnt))
1395			continue;
1396		/* We could race with quotaon or dqget() could have failed */
1397		if (!got[cnt])
1398			continue;
1399		if (!inode->i_dquot[cnt]) {
1400			inode->i_dquot[cnt] = got[cnt];
1401			got[cnt] = NULL;
1402			/*
1403			 * Make quota reservation system happy if someone
1404			 * did a write before quota was turned on
1405			 */
1406			rsv = inode_get_rsv_space(inode);
1407			if (unlikely(rsv))
1408				dquot_resv_space(inode->i_dquot[cnt], rsv);
 
 
 
 
 
 
 
1409		}
1410	}
1411out_err:
1412	up_write(&sb_dqopt(sb)->dqptr_sem);
 
1413	/* Drop unused references */
1414	dqput_all(got);
 
 
1415}
1416
1417void dquot_initialize(struct inode *inode)
1418{
1419	__dquot_initialize(inode, -1);
1420}
1421EXPORT_SYMBOL(dquot_initialize);
1422
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423/*
1424 * 	Release all quotas referenced by inode
 
 
 
 
 
1425 */
1426static void __dquot_drop(struct inode *inode)
1427{
1428	int cnt;
 
1429	struct dquot *put[MAXQUOTAS];
1430
1431	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1432	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1433		put[cnt] = inode->i_dquot[cnt];
1434		inode->i_dquot[cnt] = NULL;
1435	}
1436	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1437	dqput_all(put);
1438}
1439
1440void dquot_drop(struct inode *inode)
1441{
 
1442	int cnt;
1443
1444	if (IS_NOQUOTA(inode))
1445		return;
1446
1447	/*
1448	 * Test before calling to rule out calls from proc and such
1449	 * where we are not allowed to block. Note that this is
1450	 * actually reliable test even without the lock - the caller
1451	 * must assure that nobody can come after the DQUOT_DROP and
1452	 * add quota pointers back anyway.
1453	 */
 
1454	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1455		if (inode->i_dquot[cnt])
1456			break;
1457	}
1458
1459	if (cnt < MAXQUOTAS)
1460		__dquot_drop(inode);
1461}
1462EXPORT_SYMBOL(dquot_drop);
1463
1464/*
1465 * inode_reserved_space is managed internally by quota, and protected by
1466 * i_lock similar to i_blocks+i_bytes.
1467 */
1468static qsize_t *inode_reserved_space(struct inode * inode)
1469{
1470	/* Filesystem must explicitly define it's own method in order to use
1471	 * quota reservation interface */
1472	BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1473	return inode->i_sb->dq_op->get_reserved_space(inode);
1474}
1475
1476void inode_add_rsv_space(struct inode *inode, qsize_t number)
1477{
1478	spin_lock(&inode->i_lock);
1479	*inode_reserved_space(inode) += number;
1480	spin_unlock(&inode->i_lock);
1481}
1482EXPORT_SYMBOL(inode_add_rsv_space);
1483
1484void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1485{
1486	spin_lock(&inode->i_lock);
1487	*inode_reserved_space(inode) -= number;
1488	__inode_add_bytes(inode, number);
1489	spin_unlock(&inode->i_lock);
1490}
1491EXPORT_SYMBOL(inode_claim_rsv_space);
1492
1493void inode_sub_rsv_space(struct inode *inode, qsize_t number)
1494{
1495	spin_lock(&inode->i_lock);
1496	*inode_reserved_space(inode) -= number;
1497	spin_unlock(&inode->i_lock);
1498}
1499EXPORT_SYMBOL(inode_sub_rsv_space);
1500
1501static qsize_t inode_get_rsv_space(struct inode *inode)
1502{
1503	qsize_t ret;
1504
1505	if (!inode->i_sb->dq_op->get_reserved_space)
1506		return 0;
1507	spin_lock(&inode->i_lock);
1508	ret = *inode_reserved_space(inode);
1509	spin_unlock(&inode->i_lock);
1510	return ret;
1511}
1512
1513static void inode_incr_space(struct inode *inode, qsize_t number,
1514				int reserve)
1515{
1516	if (reserve)
1517		inode_add_rsv_space(inode, number);
1518	else
1519		inode_add_bytes(inode, number);
1520}
1521
1522static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1523{
1524	if (reserve)
1525		inode_sub_rsv_space(inode, number);
1526	else
1527		inode_sub_bytes(inode, number);
1528}
1529
1530/*
1531 * This functions updates i_blocks+i_bytes fields and quota information
1532 * (together with appropriate checks).
1533 *
1534 * NOTE: We absolutely rely on the fact that caller dirties the inode
1535 * (usually helpers in quotaops.h care about this) and holds a handle for
1536 * the current transaction so that dquot write and inode write go into the
1537 * same transaction.
1538 */
1539
1540/*
1541 * This operation can block, but only after everything is updated
1542 */
1543int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1544{
1545	int cnt, ret = 0;
1546	char warntype[MAXQUOTAS];
1547	int warn = flags & DQUOT_SPACE_WARN;
1548	int reserve = flags & DQUOT_SPACE_RESERVE;
1549	int nofail = flags & DQUOT_SPACE_NOFAIL;
1550
1551	/*
1552	 * First test before acquiring mutex - solves deadlocks when we
1553	 * re-enter the quota code and are already holding the mutex
1554	 */
1555	if (!dquot_active(inode)) {
1556		inode_incr_space(inode, number, reserve);
 
 
 
 
 
 
1557		goto out;
1558	}
1559
1560	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1561	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1562		warntype[cnt] = QUOTA_NL_NOWARN;
1563
1564	spin_lock(&dq_data_lock);
 
 
1565	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1566		if (!inode->i_dquot[cnt])
1567			continue;
1568		ret = check_bdq(inode->i_dquot[cnt], number, !warn,
1569				warntype+cnt);
1570		if (ret && !nofail) {
1571			spin_unlock(&dq_data_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1572			goto out_flush_warn;
1573		}
1574	}
1575	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1576		if (!inode->i_dquot[cnt])
1577			continue;
1578		if (reserve)
1579			dquot_resv_space(inode->i_dquot[cnt], number);
1580		else
1581			dquot_incr_space(inode->i_dquot[cnt], number);
1582	}
1583	inode_incr_space(inode, number, reserve);
1584	spin_unlock(&dq_data_lock);
1585
1586	if (reserve)
1587		goto out_flush_warn;
1588	mark_all_dquot_dirty(inode->i_dquot);
1589out_flush_warn:
1590	flush_warnings(inode->i_dquot, warntype);
1591	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1592out:
1593	return ret;
1594}
1595EXPORT_SYMBOL(__dquot_alloc_space);
1596
1597/*
1598 * This operation can block, but only after everything is updated
1599 */
1600int dquot_alloc_inode(const struct inode *inode)
1601{
1602	int cnt, ret = 0;
1603	char warntype[MAXQUOTAS];
 
1604
1605	/* First test before acquiring mutex - solves deadlocks when we
1606         * re-enter the quota code and are already holding the mutex */
1607	if (!dquot_active(inode))
1608		return 0;
1609	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1610		warntype[cnt] = QUOTA_NL_NOWARN;
1611	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1612	spin_lock(&dq_data_lock);
 
 
1613	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1614		if (!inode->i_dquot[cnt])
1615			continue;
1616		ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt);
1617		if (ret)
 
 
 
 
 
 
 
 
1618			goto warn_put_all;
1619	}
1620
1621	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1622		if (!inode->i_dquot[cnt])
1623			continue;
1624		dquot_incr_inodes(inode->i_dquot[cnt], 1);
1625	}
1626
1627warn_put_all:
1628	spin_unlock(&dq_data_lock);
1629	if (ret == 0)
1630		mark_all_dquot_dirty(inode->i_dquot);
1631	flush_warnings(inode->i_dquot, warntype);
1632	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1633	return ret;
1634}
1635EXPORT_SYMBOL(dquot_alloc_inode);
1636
1637/*
1638 * Convert in-memory reserved quotas to real consumed quotas
1639 */
1640int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1641{
1642	int cnt;
 
1643
1644	if (!dquot_active(inode)) {
1645		inode_claim_rsv_space(inode, number);
 
 
 
1646		return 0;
1647	}
1648
1649	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1650	spin_lock(&dq_data_lock);
 
1651	/* Claim reserved quotas to allocated quotas */
1652	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1653		if (inode->i_dquot[cnt])
1654			dquot_claim_reserved_space(inode->i_dquot[cnt],
1655							number);
 
 
 
 
 
 
 
1656	}
1657	/* Update inode bytes */
1658	inode_claim_rsv_space(inode, number);
1659	spin_unlock(&dq_data_lock);
1660	mark_all_dquot_dirty(inode->i_dquot);
1661	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
 
1662	return 0;
1663}
1664EXPORT_SYMBOL(dquot_claim_space_nodirty);
1665
1666/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1667 * This operation can block, but only after everything is updated
1668 */
1669void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1670{
1671	unsigned int cnt;
1672	char warntype[MAXQUOTAS];
1673	int reserve = flags & DQUOT_SPACE_RESERVE;
 
1674
1675	/* First test before acquiring mutex - solves deadlocks when we
1676         * re-enter the quota code and are already holding the mutex */
1677	if (!dquot_active(inode)) {
1678		inode_decr_space(inode, number, reserve);
 
 
 
 
 
 
1679		return;
1680	}
1681
1682	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1683	spin_lock(&dq_data_lock);
 
1684	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1685		if (!inode->i_dquot[cnt])
 
 
 
1686			continue;
1687		warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
 
 
 
1688		if (reserve)
1689			dquot_free_reserved_space(inode->i_dquot[cnt], number);
1690		else
1691			dquot_decr_space(inode->i_dquot[cnt], number);
 
1692	}
1693	inode_decr_space(inode, number, reserve);
1694	spin_unlock(&dq_data_lock);
 
 
 
1695
1696	if (reserve)
1697		goto out_unlock;
1698	mark_all_dquot_dirty(inode->i_dquot);
1699out_unlock:
1700	flush_warnings(inode->i_dquot, warntype);
1701	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1702}
1703EXPORT_SYMBOL(__dquot_free_space);
1704
1705/*
1706 * This operation can block, but only after everything is updated
1707 */
1708void dquot_free_inode(const struct inode *inode)
1709{
1710	unsigned int cnt;
1711	char warntype[MAXQUOTAS];
 
 
1712
1713	/* First test before acquiring mutex - solves deadlocks when we
1714         * re-enter the quota code and are already holding the mutex */
1715	if (!dquot_active(inode))
1716		return;
1717
1718	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1719	spin_lock(&dq_data_lock);
 
1720	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1721		if (!inode->i_dquot[cnt])
 
 
 
1722			continue;
1723		warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
1724		dquot_decr_inodes(inode->i_dquot[cnt], 1);
 
 
 
 
1725	}
1726	spin_unlock(&dq_data_lock);
1727	mark_all_dquot_dirty(inode->i_dquot);
1728	flush_warnings(inode->i_dquot, warntype);
1729	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1730}
1731EXPORT_SYMBOL(dquot_free_inode);
1732
1733/*
1734 * Transfer the number of inode and blocks from one diskquota to an other.
1735 * On success, dquot references in transfer_to are consumed and references
1736 * to original dquots that need to be released are placed there. On failure,
1737 * references are kept untouched.
1738 *
1739 * This operation can block, but only after everything is updated
1740 * A transaction must be started when entering this function.
1741 *
 
 
1742 */
1743int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1744{
1745	qsize_t space, cur_space;
1746	qsize_t rsv_space = 0;
 
1747	struct dquot *transfer_from[MAXQUOTAS] = {};
1748	int cnt, ret = 0;
1749	char is_valid[MAXQUOTAS] = {};
1750	char warntype_to[MAXQUOTAS];
1751	char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
 
1752
1753	/* First test before acquiring mutex - solves deadlocks when we
1754         * re-enter the quota code and are already holding the mutex */
1755	if (IS_NOQUOTA(inode))
1756		return 0;
 
 
 
 
 
 
 
1757	/* Initialize the arrays */
1758	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1759		warntype_to[cnt] = QUOTA_NL_NOWARN;
1760	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
 
 
 
 
 
1761	if (IS_NOQUOTA(inode)) {	/* File without quota accounting? */
1762		up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
 
1763		return 0;
1764	}
1765	spin_lock(&dq_data_lock);
1766	cur_space = inode_get_bytes(inode);
1767	rsv_space = inode_get_rsv_space(inode);
1768	space = cur_space + rsv_space;
1769	/* Build the transfer_from list and check the limits */
 
1770	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1771		/*
1772		 * Skip changes for same uid or gid or for turned off quota-type.
1773		 */
1774		if (!transfer_to[cnt])
1775			continue;
1776		/* Avoid races with quotaoff() */
1777		if (!sb_has_quota_active(inode->i_sb, cnt))
1778			continue;
1779		is_valid[cnt] = 1;
1780		transfer_from[cnt] = inode->i_dquot[cnt];
1781		ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt);
 
1782		if (ret)
1783			goto over_quota;
1784		ret = check_bdq(transfer_to[cnt], space, 0, warntype_to + cnt);
1785		if (ret)
 
 
 
 
1786			goto over_quota;
 
1787	}
1788
1789	/*
1790	 * Finally perform the needed transfer from transfer_from to transfer_to
1791	 */
1792	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1793		if (!is_valid[cnt])
1794			continue;
1795		/* Due to IO error we might not have transfer_from[] structure */
1796		if (transfer_from[cnt]) {
1797			warntype_from_inodes[cnt] =
1798				info_idq_free(transfer_from[cnt], 1);
1799			warntype_from_space[cnt] =
1800				info_bdq_free(transfer_from[cnt], space);
1801			dquot_decr_inodes(transfer_from[cnt], 1);
 
 
 
 
 
 
 
 
1802			dquot_decr_space(transfer_from[cnt], cur_space);
1803			dquot_free_reserved_space(transfer_from[cnt],
1804						  rsv_space);
 
1805		}
1806
1807		dquot_incr_inodes(transfer_to[cnt], 1);
1808		dquot_incr_space(transfer_to[cnt], cur_space);
1809		dquot_resv_space(transfer_to[cnt], rsv_space);
1810
1811		inode->i_dquot[cnt] = transfer_to[cnt];
1812	}
 
1813	spin_unlock(&dq_data_lock);
1814	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1815
1816	mark_all_dquot_dirty(transfer_from);
1817	mark_all_dquot_dirty(transfer_to);
1818	flush_warnings(transfer_to, warntype_to);
1819	flush_warnings(transfer_from, warntype_from_inodes);
1820	flush_warnings(transfer_from, warntype_from_space);
1821	/* Pass back references to put */
1822	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1823		if (is_valid[cnt])
1824			transfer_to[cnt] = transfer_from[cnt];
1825	return 0;
1826over_quota:
 
 
 
 
 
 
 
 
 
 
 
1827	spin_unlock(&dq_data_lock);
1828	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1829	flush_warnings(transfer_to, warntype_to);
1830	return ret;
1831}
1832EXPORT_SYMBOL(__dquot_transfer);
1833
1834/* Wrapper for transferring ownership of an inode for uid/gid only
1835 * Called from FSXXX_setattr()
1836 */
1837int dquot_transfer(struct inode *inode, struct iattr *iattr)
1838{
1839	struct dquot *transfer_to[MAXQUOTAS] = {};
 
1840	struct super_block *sb = inode->i_sb;
1841	int ret;
1842
1843	if (!dquot_active(inode))
1844		return 0;
1845
1846	if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
1847		transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA);
1848	if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)
1849		transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_gid, GRPQUOTA);
1850
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1851	ret = __dquot_transfer(inode, transfer_to);
 
1852	dqput_all(transfer_to);
1853	return ret;
1854}
1855EXPORT_SYMBOL(dquot_transfer);
1856
1857/*
1858 * Write info of quota file to disk
1859 */
1860int dquot_commit_info(struct super_block *sb, int type)
1861{
1862	int ret;
1863	struct quota_info *dqopt = sb_dqopt(sb);
1864
1865	mutex_lock(&dqopt->dqio_mutex);
1866	ret = dqopt->ops[type]->write_file_info(sb, type);
1867	mutex_unlock(&dqopt->dqio_mutex);
1868	return ret;
1869}
1870EXPORT_SYMBOL(dquot_commit_info);
1871
 
 
 
 
 
 
 
 
 
 
 
 
1872/*
1873 * Definitions of diskquota operations.
1874 */
1875const struct dquot_operations dquot_operations = {
1876	.write_dquot	= dquot_commit,
1877	.acquire_dquot	= dquot_acquire,
1878	.release_dquot	= dquot_release,
1879	.mark_dirty	= dquot_mark_dquot_dirty,
1880	.write_info	= dquot_commit_info,
1881	.alloc_dquot	= dquot_alloc,
1882	.destroy_dquot	= dquot_destroy,
 
1883};
1884EXPORT_SYMBOL(dquot_operations);
1885
1886/*
1887 * Generic helper for ->open on filesystems supporting disk quotas.
1888 */
1889int dquot_file_open(struct inode *inode, struct file *file)
1890{
1891	int error;
1892
1893	error = generic_file_open(inode, file);
1894	if (!error && (file->f_mode & FMODE_WRITE))
1895		dquot_initialize(inode);
1896	return error;
1897}
1898EXPORT_SYMBOL(dquot_file_open);
1899
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1900/*
1901 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1902 */
1903int dquot_disable(struct super_block *sb, int type, unsigned int flags)
1904{
1905	int cnt, ret = 0;
1906	struct quota_info *dqopt = sb_dqopt(sb);
1907	struct inode *toputinode[MAXQUOTAS];
 
 
 
1908
1909	/* Cannot turn off usage accounting without turning off limits, or
1910	 * suspend quotas and simultaneously turn quotas off. */
1911	if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
1912	    || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
1913	    DQUOT_USAGE_ENABLED)))
1914		return -EINVAL;
1915
1916	/* We need to serialize quota_off() for device */
1917	mutex_lock(&dqopt->dqonoff_mutex);
1918
1919	/*
1920	 * Skip everything if there's nothing to do. We have to do this because
1921	 * sometimes we are called when fill_super() failed and calling
1922	 * sync_fs() in such cases does no good.
1923	 */
1924	if (!sb_any_quota_loaded(sb)) {
1925		mutex_unlock(&dqopt->dqonoff_mutex);
1926		return 0;
1927	}
1928	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1929		toputinode[cnt] = NULL;
1930		if (type != -1 && cnt != type)
1931			continue;
1932		if (!sb_has_quota_loaded(sb, cnt))
1933			continue;
1934
1935		if (flags & DQUOT_SUSPENDED) {
1936			spin_lock(&dq_state_lock);
1937			dqopt->flags |=
1938				dquot_state_flag(DQUOT_SUSPENDED, cnt);
1939			spin_unlock(&dq_state_lock);
1940		} else {
1941			spin_lock(&dq_state_lock);
1942			dqopt->flags &= ~dquot_state_flag(flags, cnt);
1943			/* Turning off suspended quotas? */
1944			if (!sb_has_quota_loaded(sb, cnt) &&
1945			    sb_has_quota_suspended(sb, cnt)) {
1946				dqopt->flags &=	~dquot_state_flag(
1947							DQUOT_SUSPENDED, cnt);
1948				spin_unlock(&dq_state_lock);
1949				iput(dqopt->files[cnt]);
1950				dqopt->files[cnt] = NULL;
1951				continue;
1952			}
1953			spin_unlock(&dq_state_lock);
1954		}
1955
1956		/* We still have to keep quota loaded? */
1957		if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
1958			continue;
1959
1960		/* Note: these are blocking operations */
1961		drop_dquot_ref(sb, cnt);
1962		invalidate_dquots(sb, cnt);
1963		/*
1964		 * Now all dquots should be invalidated, all writes done so we
1965		 * should be only users of the info. No locks needed.
1966		 */
1967		if (info_dirty(&dqopt->info[cnt]))
1968			sb->dq_op->write_info(sb, cnt);
1969		if (dqopt->ops[cnt]->free_file_info)
1970			dqopt->ops[cnt]->free_file_info(sb, cnt);
1971		put_quota_format(dqopt->info[cnt].dqi_format);
1972
1973		toputinode[cnt] = dqopt->files[cnt];
1974		if (!sb_has_quota_loaded(sb, cnt))
1975			dqopt->files[cnt] = NULL;
1976		dqopt->info[cnt].dqi_flags = 0;
1977		dqopt->info[cnt].dqi_igrace = 0;
1978		dqopt->info[cnt].dqi_bgrace = 0;
1979		dqopt->ops[cnt] = NULL;
1980	}
1981	mutex_unlock(&dqopt->dqonoff_mutex);
1982
1983	/* Skip syncing and setting flags if quota files are hidden */
1984	if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
1985		goto put_inodes;
1986
1987	/* Sync the superblock so that buffers with quota data are written to
1988	 * disk (and so userspace sees correct data afterwards). */
1989	if (sb->s_op->sync_fs)
1990		sb->s_op->sync_fs(sb, 1);
1991	sync_blockdev(sb->s_bdev);
1992	/* Now the quota files are just ordinary files and we can set the
1993	 * inode flags back. Moreover we discard the pagecache so that
1994	 * userspace sees the writes we did bypassing the pagecache. We
1995	 * must also discard the blockdev buffers so that we see the
1996	 * changes done by userspace on the next quotaon() */
1997	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1998		if (toputinode[cnt]) {
1999			mutex_lock(&dqopt->dqonoff_mutex);
2000			/* If quota was reenabled in the meantime, we have
2001			 * nothing to do */
2002			if (!sb_has_quota_loaded(sb, cnt)) {
2003				mutex_lock_nested(&toputinode[cnt]->i_mutex,
2004						  I_MUTEX_QUOTA);
2005				toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
2006				  S_NOATIME | S_NOQUOTA);
2007				truncate_inode_pages(&toputinode[cnt]->i_data,
2008						     0);
2009				mutex_unlock(&toputinode[cnt]->i_mutex);
2010				mark_inode_dirty_sync(toputinode[cnt]);
2011			}
2012			mutex_unlock(&dqopt->dqonoff_mutex);
2013		}
2014	if (sb->s_bdev)
2015		invalidate_bdev(sb->s_bdev);
2016put_inodes:
 
 
 
 
2017	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2018		if (toputinode[cnt]) {
2019			/* On remount RO, we keep the inode pointer so that we
2020			 * can reenable quota on the subsequent remount RW. We
2021			 * have to check 'flags' variable and not use sb_has_
2022			 * function because another quotaon / quotaoff could
2023			 * change global state before we got here. We refuse
2024			 * to suspend quotas when there is pending delete on
2025			 * the quota file... */
2026			if (!(flags & DQUOT_SUSPENDED))
2027				iput(toputinode[cnt]);
2028			else if (!toputinode[cnt]->i_nlink)
2029				ret = -EBUSY;
2030		}
2031	return ret;
2032}
2033EXPORT_SYMBOL(dquot_disable);
2034
2035int dquot_quota_off(struct super_block *sb, int type)
2036{
2037	return dquot_disable(sb, type,
2038			     DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2039}
2040EXPORT_SYMBOL(dquot_quota_off);
2041
2042/*
2043 *	Turn quotas on on a device
2044 */
2045
2046/*
2047 * Helper function to turn quotas on when we already have the inode of
2048 * quota file and no quota information is loaded.
2049 */
2050static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2051	unsigned int flags)
2052{
2053	struct quota_format_type *fmt = find_quota_format(format_id);
2054	struct super_block *sb = inode->i_sb;
2055	struct quota_info *dqopt = sb_dqopt(sb);
2056	int error;
2057	int oldflags = -1;
 
 
 
 
 
2058
2059	if (!fmt)
2060		return -ESRCH;
2061	if (!S_ISREG(inode->i_mode)) {
2062		error = -EACCES;
2063		goto out_fmt;
2064	}
2065	if (IS_RDONLY(inode)) {
2066		error = -EROFS;
2067		goto out_fmt;
2068	}
2069	if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
 
2070		error = -EINVAL;
2071		goto out_fmt;
2072	}
2073	/* Usage always has to be set... */
2074	if (!(flags & DQUOT_USAGE_ENABLED)) {
2075		error = -EINVAL;
2076		goto out_fmt;
2077	}
 
 
 
 
2078
2079	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2080		/* As we bypass the pagecache we must now flush all the
2081		 * dirty data and invalidate caches so that kernel sees
2082		 * changes from userspace. It is not enough to just flush
2083		 * the quota file since if blocksize < pagesize, invalidation
2084		 * of the cache could fail because of other unrelated dirty
2085		 * data */
2086		sync_filesystem(sb);
2087		invalidate_bdev(sb->s_bdev);
2088	}
2089	mutex_lock(&dqopt->dqonoff_mutex);
2090	if (sb_has_quota_loaded(sb, type)) {
2091		error = -EBUSY;
2092		goto out_lock;
2093	}
2094
2095	if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2096		/* We don't want quota and atime on quota files (deadlocks
2097		 * possible) Also nobody should write to the file - we use
2098		 * special IO operations which ignore the immutable bit. */
2099		mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2100		oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2101					     S_NOQUOTA);
2102		inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
2103		mutex_unlock(&inode->i_mutex);
2104		/*
2105		 * When S_NOQUOTA is set, remove dquot references as no more
2106		 * references can be added
2107		 */
2108		__dquot_drop(inode);
2109	}
2110
2111	error = -EIO;
2112	dqopt->files[type] = igrab(inode);
2113	if (!dqopt->files[type])
2114		goto out_lock;
2115	error = -EINVAL;
2116	if (!fmt->qf_ops->check_quota_file(sb, type))
2117		goto out_file_init;
2118
2119	dqopt->ops[type] = fmt->qf_ops;
2120	dqopt->info[type].dqi_format = fmt;
2121	dqopt->info[type].dqi_fmt_id = format_id;
2122	INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2123	mutex_lock(&dqopt->dqio_mutex);
2124	error = dqopt->ops[type]->read_file_info(sb, type);
2125	if (error < 0) {
2126		mutex_unlock(&dqopt->dqio_mutex);
2127		goto out_file_init;
 
 
 
2128	}
2129	mutex_unlock(&dqopt->dqio_mutex);
2130	spin_lock(&dq_state_lock);
2131	dqopt->flags |= dquot_state_flag(flags, type);
2132	spin_unlock(&dq_state_lock);
2133
2134	add_dquot_ref(sb, type);
2135	mutex_unlock(&dqopt->dqonoff_mutex);
 
2136
2137	return 0;
2138
2139out_file_init:
2140	dqopt->files[type] = NULL;
2141	iput(inode);
2142out_lock:
2143	if (oldflags != -1) {
2144		mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2145		/* Set the flags back (in the case of accidental quotaon()
2146		 * on a wrong file we don't want to mess up the flags) */
2147		inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
2148		inode->i_flags |= oldflags;
2149		mutex_unlock(&inode->i_mutex);
2150	}
2151	mutex_unlock(&dqopt->dqonoff_mutex);
2152out_fmt:
2153	put_quota_format(fmt);
2154
2155	return error; 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2156}
 
2157
2158/* Reenable quotas on remount RW */
2159int dquot_resume(struct super_block *sb, int type)
2160{
2161	struct quota_info *dqopt = sb_dqopt(sb);
2162	struct inode *inode;
2163	int ret = 0, cnt;
2164	unsigned int flags;
2165
 
 
 
 
2166	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2167		if (type != -1 && cnt != type)
2168			continue;
 
 
2169
2170		mutex_lock(&dqopt->dqonoff_mutex);
2171		if (!sb_has_quota_suspended(sb, cnt)) {
2172			mutex_unlock(&dqopt->dqonoff_mutex);
2173			continue;
2174		}
2175		inode = dqopt->files[cnt];
2176		dqopt->files[cnt] = NULL;
2177		spin_lock(&dq_state_lock);
2178		flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2179							DQUOT_LIMITS_ENABLED,
2180							cnt);
2181		dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2182		spin_unlock(&dq_state_lock);
2183		mutex_unlock(&dqopt->dqonoff_mutex);
2184
2185		flags = dquot_generic_flag(flags, cnt);
2186		ret = vfs_load_quota_inode(inode, cnt,
2187				dqopt->info[cnt].dqi_fmt_id, flags);
2188		iput(inode);
 
2189	}
2190
2191	return ret;
2192}
2193EXPORT_SYMBOL(dquot_resume);
2194
2195int dquot_quota_on(struct super_block *sb, int type, int format_id,
2196		   struct path *path)
2197{
2198	int error = security_quota_on(path->dentry);
2199	if (error)
2200		return error;
2201	/* Quota file not on the same filesystem? */
2202	if (path->mnt->mnt_sb != sb)
2203		error = -EXDEV;
2204	else
2205		error = vfs_load_quota_inode(path->dentry->d_inode, type,
2206					     format_id, DQUOT_USAGE_ENABLED |
2207					     DQUOT_LIMITS_ENABLED);
2208	return error;
2209}
2210EXPORT_SYMBOL(dquot_quota_on);
2211
2212/*
2213 * More powerful function for turning on quotas allowing setting
2214 * of individual quota flags
2215 */
2216int dquot_enable(struct inode *inode, int type, int format_id,
2217		 unsigned int flags)
2218{
2219	int ret = 0;
2220	struct super_block *sb = inode->i_sb;
2221	struct quota_info *dqopt = sb_dqopt(sb);
2222
2223	/* Just unsuspend quotas? */
2224	BUG_ON(flags & DQUOT_SUSPENDED);
2225
2226	if (!flags)
2227		return 0;
2228	/* Just updating flags needed? */
2229	if (sb_has_quota_loaded(sb, type)) {
2230		mutex_lock(&dqopt->dqonoff_mutex);
2231		/* Now do a reliable test... */
2232		if (!sb_has_quota_loaded(sb, type)) {
2233			mutex_unlock(&dqopt->dqonoff_mutex);
2234			goto load_quota;
2235		}
2236		if (flags & DQUOT_USAGE_ENABLED &&
2237		    sb_has_quota_usage_enabled(sb, type)) {
2238			ret = -EBUSY;
2239			goto out_lock;
2240		}
2241		if (flags & DQUOT_LIMITS_ENABLED &&
2242		    sb_has_quota_limits_enabled(sb, type)) {
2243			ret = -EBUSY;
2244			goto out_lock;
2245		}
2246		spin_lock(&dq_state_lock);
2247		sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2248		spin_unlock(&dq_state_lock);
2249out_lock:
2250		mutex_unlock(&dqopt->dqonoff_mutex);
2251		return ret;
2252	}
2253
2254load_quota:
2255	return vfs_load_quota_inode(inode, type, format_id, flags);
2256}
2257EXPORT_SYMBOL(dquot_enable);
2258
2259/*
2260 * This function is used when filesystem needs to initialize quotas
2261 * during mount time.
2262 */
2263int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2264		int format_id, int type)
2265{
2266	struct dentry *dentry;
2267	int error;
2268
2269	mutex_lock(&sb->s_root->d_inode->i_mutex);
2270	dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
2271	mutex_unlock(&sb->s_root->d_inode->i_mutex);
2272	if (IS_ERR(dentry))
2273		return PTR_ERR(dentry);
2274
2275	if (!dentry->d_inode) {
2276		error = -ENOENT;
2277		goto out;
2278	}
2279
2280	error = security_quota_on(dentry);
2281	if (!error)
2282		error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
2283				DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2284
2285out:
2286	dput(dentry);
2287	return error;
2288}
2289EXPORT_SYMBOL(dquot_quota_on_mount);
2290
2291static inline qsize_t qbtos(qsize_t blocks)
2292{
2293	return blocks << QIF_DQBLKSIZE_BITS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2294}
2295
2296static inline qsize_t stoqb(qsize_t space)
2297{
2298	return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2299}
2300
2301/* Generic routine for getting common part of quota structure */
2302static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2303{
2304	struct mem_dqblk *dm = &dquot->dq_dqb;
2305
2306	memset(di, 0, sizeof(*di));
2307	di->d_version = FS_DQUOT_VERSION;
2308	di->d_flags = dquot->dq_type == USRQUOTA ?
2309			FS_USER_QUOTA : FS_GROUP_QUOTA;
2310	di->d_id = dquot->dq_id;
2311
2312	spin_lock(&dq_data_lock);
2313	di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
2314	di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
2315	di->d_ino_hardlimit = dm->dqb_ihardlimit;
2316	di->d_ino_softlimit = dm->dqb_isoftlimit;
2317	di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
2318	di->d_icount = dm->dqb_curinodes;
2319	di->d_btimer = dm->dqb_btime;
2320	di->d_itimer = dm->dqb_itime;
2321	spin_unlock(&dq_data_lock);
2322}
2323
2324int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
2325		    struct fs_disk_quota *di)
2326{
2327	struct dquot *dquot;
2328
2329	dquot = dqget(sb, id, type);
2330	if (!dquot)
2331		return -ESRCH;
2332	do_get_dqblk(dquot, di);
2333	dqput(dquot);
2334
2335	return 0;
2336}
2337EXPORT_SYMBOL(dquot_get_dqblk);
2338
2339#define VFS_FS_DQ_MASK \
2340	(FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
2341	 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
2342	 FS_DQ_BTIMER | FS_DQ_ITIMER)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2343
2344/* Generic routine for setting common part of quota structure */
2345static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2346{
2347	struct mem_dqblk *dm = &dquot->dq_dqb;
2348	int check_blim = 0, check_ilim = 0;
2349	struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
2350
2351	if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
2352		return -EINVAL;
2353
2354	if (((di->d_fieldmask & FS_DQ_BSOFT) &&
2355	     (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
2356	    ((di->d_fieldmask & FS_DQ_BHARD) &&
2357	     (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
2358	    ((di->d_fieldmask & FS_DQ_ISOFT) &&
2359	     (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
2360	    ((di->d_fieldmask & FS_DQ_IHARD) &&
2361	     (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
2362		return -ERANGE;
2363
2364	spin_lock(&dq_data_lock);
2365	if (di->d_fieldmask & FS_DQ_BCOUNT) {
2366		dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
2367		check_blim = 1;
2368		set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2369	}
2370
2371	if (di->d_fieldmask & FS_DQ_BSOFT)
2372		dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
2373	if (di->d_fieldmask & FS_DQ_BHARD)
2374		dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
2375	if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
2376		check_blim = 1;
2377		set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2378	}
2379
2380	if (di->d_fieldmask & FS_DQ_ICOUNT) {
2381		dm->dqb_curinodes = di->d_icount;
2382		check_ilim = 1;
2383		set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2384	}
2385
2386	if (di->d_fieldmask & FS_DQ_ISOFT)
2387		dm->dqb_isoftlimit = di->d_ino_softlimit;
2388	if (di->d_fieldmask & FS_DQ_IHARD)
2389		dm->dqb_ihardlimit = di->d_ino_hardlimit;
2390	if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
2391		check_ilim = 1;
2392		set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2393	}
2394
2395	if (di->d_fieldmask & FS_DQ_BTIMER) {
2396		dm->dqb_btime = di->d_btimer;
2397		check_blim = 1;
2398		set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2399	}
2400
2401	if (di->d_fieldmask & FS_DQ_ITIMER) {
2402		dm->dqb_itime = di->d_itimer;
2403		check_ilim = 1;
2404		set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2405	}
2406
2407	if (check_blim) {
2408		if (!dm->dqb_bsoftlimit ||
2409		    dm->dqb_curspace < dm->dqb_bsoftlimit) {
2410			dm->dqb_btime = 0;
2411			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2412		} else if (!(di->d_fieldmask & FS_DQ_BTIMER))
2413			/* Set grace only if user hasn't provided his own... */
2414			dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2415	}
2416	if (check_ilim) {
2417		if (!dm->dqb_isoftlimit ||
2418		    dm->dqb_curinodes < dm->dqb_isoftlimit) {
2419			dm->dqb_itime = 0;
2420			clear_bit(DQ_INODES_B, &dquot->dq_flags);
2421		} else if (!(di->d_fieldmask & FS_DQ_ITIMER))
2422			/* Set grace only if user hasn't provided his own... */
2423			dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2424	}
2425	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2426	    dm->dqb_isoftlimit)
2427		clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2428	else
2429		set_bit(DQ_FAKE_B, &dquot->dq_flags);
2430	spin_unlock(&dq_data_lock);
2431	mark_dquot_dirty(dquot);
2432
2433	return 0;
2434}
2435
2436int dquot_set_dqblk(struct super_block *sb, int type, qid_t id,
2437		  struct fs_disk_quota *di)
2438{
2439	struct dquot *dquot;
2440	int rc;
2441
2442	dquot = dqget(sb, id, type);
2443	if (!dquot) {
2444		rc = -ESRCH;
2445		goto out;
2446	}
2447	rc = do_set_dqblk(dquot, di);
2448	dqput(dquot);
2449out:
2450	return rc;
2451}
2452EXPORT_SYMBOL(dquot_set_dqblk);
2453
2454/* Generic routine for getting common part of quota file information */
2455int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2456{
2457	struct mem_dqinfo *mi;
2458  
2459	mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2460	if (!sb_has_quota_active(sb, type)) {
2461		mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2462		return -ESRCH;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2463	}
2464	mi = sb_dqopt(sb)->info + type;
2465	spin_lock(&dq_data_lock);
2466	ii->dqi_bgrace = mi->dqi_bgrace;
2467	ii->dqi_igrace = mi->dqi_igrace;
2468	ii->dqi_flags = mi->dqi_flags & DQF_MASK;
2469	ii->dqi_valid = IIF_ALL;
2470	spin_unlock(&dq_data_lock);
2471	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2472	return 0;
2473}
2474EXPORT_SYMBOL(dquot_get_dqinfo);
2475
2476/* Generic routine for setting common part of quota file information */
2477int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2478{
2479	struct mem_dqinfo *mi;
2480	int err = 0;
2481
2482	mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2483	if (!sb_has_quota_active(sb, type)) {
2484		err = -ESRCH;
2485		goto out;
 
 
 
 
 
 
2486	}
2487	mi = sb_dqopt(sb)->info + type;
2488	spin_lock(&dq_data_lock);
2489	if (ii->dqi_valid & IIF_BGRACE)
2490		mi->dqi_bgrace = ii->dqi_bgrace;
2491	if (ii->dqi_valid & IIF_IGRACE)
2492		mi->dqi_igrace = ii->dqi_igrace;
2493	if (ii->dqi_valid & IIF_FLAGS)
2494		mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
2495				(ii->dqi_flags & DQF_MASK);
 
 
 
2496	spin_unlock(&dq_data_lock);
2497	mark_info_dirty(sb, type);
2498	/* Force write to disk */
2499	sb->dq_op->write_info(sb, type);
2500out:
2501	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2502	return err;
2503}
2504EXPORT_SYMBOL(dquot_set_dqinfo);
2505
2506const struct quotactl_ops dquot_quotactl_ops = {
2507	.quota_on	= dquot_quota_on,
2508	.quota_off	= dquot_quota_off,
2509	.quota_sync	= dquot_quota_sync,
2510	.get_info	= dquot_get_dqinfo,
2511	.set_info	= dquot_set_dqinfo,
2512	.get_dqblk	= dquot_get_dqblk,
 
2513	.set_dqblk	= dquot_set_dqblk
2514};
2515EXPORT_SYMBOL(dquot_quotactl_ops);
2516
2517static int do_proc_dqstats(struct ctl_table *table, int write,
2518		     void __user *buffer, size_t *lenp, loff_t *ppos)
2519{
2520	unsigned int type = (int *)table->data - dqstats.stat;
 
 
 
 
 
 
2521
2522	/* Update global table */
2523	dqstats.stat[type] =
2524			percpu_counter_sum_positive(&dqstats.counter[type]);
2525	return proc_dointvec(table, write, buffer, lenp, ppos);
2526}
2527
2528static ctl_table fs_dqstats_table[] = {
2529	{
2530		.procname	= "lookups",
2531		.data		= &dqstats.stat[DQST_LOOKUPS],
2532		.maxlen		= sizeof(int),
2533		.mode		= 0444,
2534		.proc_handler	= do_proc_dqstats,
2535	},
2536	{
2537		.procname	= "drops",
2538		.data		= &dqstats.stat[DQST_DROPS],
2539		.maxlen		= sizeof(int),
2540		.mode		= 0444,
2541		.proc_handler	= do_proc_dqstats,
2542	},
2543	{
2544		.procname	= "reads",
2545		.data		= &dqstats.stat[DQST_READS],
2546		.maxlen		= sizeof(int),
2547		.mode		= 0444,
2548		.proc_handler	= do_proc_dqstats,
2549	},
2550	{
2551		.procname	= "writes",
2552		.data		= &dqstats.stat[DQST_WRITES],
2553		.maxlen		= sizeof(int),
2554		.mode		= 0444,
2555		.proc_handler	= do_proc_dqstats,
2556	},
2557	{
2558		.procname	= "cache_hits",
2559		.data		= &dqstats.stat[DQST_CACHE_HITS],
2560		.maxlen		= sizeof(int),
2561		.mode		= 0444,
2562		.proc_handler	= do_proc_dqstats,
2563	},
2564	{
2565		.procname	= "allocated_dquots",
2566		.data		= &dqstats.stat[DQST_ALLOC_DQUOTS],
2567		.maxlen		= sizeof(int),
2568		.mode		= 0444,
2569		.proc_handler	= do_proc_dqstats,
2570	},
2571	{
2572		.procname	= "free_dquots",
2573		.data		= &dqstats.stat[DQST_FREE_DQUOTS],
2574		.maxlen		= sizeof(int),
2575		.mode		= 0444,
2576		.proc_handler	= do_proc_dqstats,
2577	},
2578	{
2579		.procname	= "syncs",
2580		.data		= &dqstats.stat[DQST_SYNCS],
2581		.maxlen		= sizeof(int),
2582		.mode		= 0444,
2583		.proc_handler	= do_proc_dqstats,
2584	},
2585#ifdef CONFIG_PRINT_QUOTA_WARNING
2586	{
2587		.procname	= "warnings",
2588		.data		= &flag_print_warnings,
2589		.maxlen		= sizeof(int),
2590		.mode		= 0644,
2591		.proc_handler	= proc_dointvec,
2592	},
2593#endif
2594	{ },
2595};
2596
2597static ctl_table fs_table[] = {
2598	{
2599		.procname	= "quota",
2600		.mode		= 0555,
2601		.child		= fs_dqstats_table,
2602	},
2603	{ },
2604};
2605
2606static ctl_table sys_table[] = {
2607	{
2608		.procname	= "fs",
2609		.mode		= 0555,
2610		.child		= fs_table,
2611	},
2612	{ },
2613};
2614
2615static int __init dquot_init(void)
2616{
2617	int i, ret;
2618	unsigned long nr_hash, order;
2619
2620	printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2621
2622	register_sysctl_table(sys_table);
2623
2624	dquot_cachep = kmem_cache_create("dquot",
2625			sizeof(struct dquot), sizeof(unsigned long) * 4,
2626			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2627				SLAB_MEM_SPREAD|SLAB_PANIC),
2628			NULL);
2629
2630	order = 0;
2631	dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2632	if (!dquot_hash)
2633		panic("Cannot create dquot hash table");
2634
2635	for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2636		ret = percpu_counter_init(&dqstats.counter[i], 0);
2637		if (ret)
2638			panic("Cannot create dquot stat counters");
2639	}
2640
2641	/* Find power-of-two hlist_heads which can fit into allocation */
2642	nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2643	dq_hash_bits = 0;
2644	do {
2645		dq_hash_bits++;
2646	} while (nr_hash >> dq_hash_bits);
2647	dq_hash_bits--;
2648
2649	nr_hash = 1UL << dq_hash_bits;
2650	dq_hash_mask = nr_hash - 1;
2651	for (i = 0; i < nr_hash; i++)
2652		INIT_HLIST_HEAD(dquot_hash + i);
2653
2654	printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
2655			nr_hash, order, (PAGE_SIZE << order));
2656
2657	register_shrinker(&dqcache_shrinker);
 
2658
2659	return 0;
2660}
2661module_init(dquot_init);