Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_mount.h"
27#include "xfs_inode.h"
28#include "xfs_ialloc.h"
29#include "xfs_itable.h"
30#include "xfs_quota.h"
31#include "xfs_error.h"
32#include "xfs_bmap.h"
33#include "xfs_bmap_btree.h"
34#include "xfs_trans.h"
35#include "xfs_trans_space.h"
36#include "xfs_qm.h"
37#include "xfs_trace.h"
38#include "xfs_icache.h"
39#include "xfs_cksum.h"
40
41/*
42 * The global quota manager. There is only one of these for the entire
43 * system, _not_ one per file system. XQM keeps track of the overall
44 * quota functionality, including maintaining the freelist and hash
45 * tables of dquots.
46 */
47STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
48STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
49
50
51STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
52/*
53 * We use the batch lookup interface to iterate over the dquots as it
54 * currently is the only interface into the radix tree code that allows
55 * fuzzy lookups instead of exact matches. Holding the lock over multiple
56 * operations is fine as all callers are used either during mount/umount
57 * or quotaoff.
58 */
59#define XFS_DQ_LOOKUP_BATCH 32
60
61STATIC int
62xfs_qm_dquot_walk(
63 struct xfs_mount *mp,
64 int type,
65 int (*execute)(struct xfs_dquot *dqp, void *data),
66 void *data)
67{
68 struct xfs_quotainfo *qi = mp->m_quotainfo;
69 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
70 uint32_t next_index;
71 int last_error = 0;
72 int skipped;
73 int nr_found;
74
75restart:
76 skipped = 0;
77 next_index = 0;
78 nr_found = 0;
79
80 while (1) {
81 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
82 int error = 0;
83 int i;
84
85 mutex_lock(&qi->qi_tree_lock);
86 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
87 next_index, XFS_DQ_LOOKUP_BATCH);
88 if (!nr_found) {
89 mutex_unlock(&qi->qi_tree_lock);
90 break;
91 }
92
93 for (i = 0; i < nr_found; i++) {
94 struct xfs_dquot *dqp = batch[i];
95
96 next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
97
98 error = execute(batch[i], data);
99 if (error == -EAGAIN) {
100 skipped++;
101 continue;
102 }
103 if (error && last_error != -EFSCORRUPTED)
104 last_error = error;
105 }
106
107 mutex_unlock(&qi->qi_tree_lock);
108
109 /* bail out if the filesystem is corrupted. */
110 if (last_error == -EFSCORRUPTED) {
111 skipped = 0;
112 break;
113 }
114 }
115
116 if (skipped) {
117 delay(1);
118 goto restart;
119 }
120
121 return last_error;
122}
123
124
125/*
126 * Purge a dquot from all tracking data structures and free it.
127 */
128STATIC int
129xfs_qm_dqpurge(
130 struct xfs_dquot *dqp,
131 void *data)
132{
133 struct xfs_mount *mp = dqp->q_mount;
134 struct xfs_quotainfo *qi = mp->m_quotainfo;
135
136 xfs_dqlock(dqp);
137 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
138 xfs_dqunlock(dqp);
139 return -EAGAIN;
140 }
141
142 dqp->dq_flags |= XFS_DQ_FREEING;
143
144 xfs_dqflock(dqp);
145
146 /*
147 * If we are turning this type of quotas off, we don't care
148 * about the dirty metadata sitting in this dquot. OTOH, if
149 * we're unmounting, we do care, so we flush it and wait.
150 */
151 if (XFS_DQ_IS_DIRTY(dqp)) {
152 struct xfs_buf *bp = NULL;
153 int error;
154
155 /*
156 * We don't care about getting disk errors here. We need
157 * to purge this dquot anyway, so we go ahead regardless.
158 */
159 error = xfs_qm_dqflush(dqp, &bp);
160 if (error) {
161 xfs_warn(mp, "%s: dquot %p flush failed",
162 __func__, dqp);
163 } else {
164 error = xfs_bwrite(bp);
165 xfs_buf_relse(bp);
166 }
167 xfs_dqflock(dqp);
168 }
169
170 ASSERT(atomic_read(&dqp->q_pincount) == 0);
171 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
172 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
173
174 xfs_dqfunlock(dqp);
175 xfs_dqunlock(dqp);
176
177 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
178 be32_to_cpu(dqp->q_core.d_id));
179 qi->qi_dquots--;
180
181 /*
182 * We move dquots to the freelist as soon as their reference count
183 * hits zero, so it really should be on the freelist here.
184 */
185 ASSERT(!list_empty(&dqp->q_lru));
186 list_lru_del(&qi->qi_lru, &dqp->q_lru);
187 XFS_STATS_DEC(mp, xs_qm_dquot_unused);
188
189 xfs_qm_dqdestroy(dqp);
190 return 0;
191}
192
193/*
194 * Purge the dquot cache.
195 */
196void
197xfs_qm_dqpurge_all(
198 struct xfs_mount *mp,
199 uint flags)
200{
201 if (flags & XFS_QMOPT_UQUOTA)
202 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
203 if (flags & XFS_QMOPT_GQUOTA)
204 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
205 if (flags & XFS_QMOPT_PQUOTA)
206 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
207}
208
209/*
210 * Just destroy the quotainfo structure.
211 */
212void
213xfs_qm_unmount(
214 struct xfs_mount *mp)
215{
216 if (mp->m_quotainfo) {
217 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
218 xfs_qm_destroy_quotainfo(mp);
219 }
220}
221
222/*
223 * Called from the vfsops layer.
224 */
225void
226xfs_qm_unmount_quotas(
227 xfs_mount_t *mp)
228{
229 /*
230 * Release the dquots that root inode, et al might be holding,
231 * before we flush quotas and blow away the quotainfo structure.
232 */
233 ASSERT(mp->m_rootip);
234 xfs_qm_dqdetach(mp->m_rootip);
235 if (mp->m_rbmip)
236 xfs_qm_dqdetach(mp->m_rbmip);
237 if (mp->m_rsumip)
238 xfs_qm_dqdetach(mp->m_rsumip);
239
240 /*
241 * Release the quota inodes.
242 */
243 if (mp->m_quotainfo) {
244 if (mp->m_quotainfo->qi_uquotaip) {
245 IRELE(mp->m_quotainfo->qi_uquotaip);
246 mp->m_quotainfo->qi_uquotaip = NULL;
247 }
248 if (mp->m_quotainfo->qi_gquotaip) {
249 IRELE(mp->m_quotainfo->qi_gquotaip);
250 mp->m_quotainfo->qi_gquotaip = NULL;
251 }
252 if (mp->m_quotainfo->qi_pquotaip) {
253 IRELE(mp->m_quotainfo->qi_pquotaip);
254 mp->m_quotainfo->qi_pquotaip = NULL;
255 }
256 }
257}
258
259STATIC int
260xfs_qm_dqattach_one(
261 xfs_inode_t *ip,
262 xfs_dqid_t id,
263 uint type,
264 uint doalloc,
265 xfs_dquot_t **IO_idqpp)
266{
267 xfs_dquot_t *dqp;
268 int error;
269
270 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
271 error = 0;
272
273 /*
274 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
275 * or &i_gdquot. This made the code look weird, but made the logic a lot
276 * simpler.
277 */
278 dqp = *IO_idqpp;
279 if (dqp) {
280 trace_xfs_dqattach_found(dqp);
281 return 0;
282 }
283
284 /*
285 * Find the dquot from somewhere. This bumps the reference count of
286 * dquot and returns it locked. This can return ENOENT if dquot didn't
287 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
288 * turned off suddenly.
289 */
290 error = xfs_qm_dqget(ip->i_mount, ip, id, type,
291 doalloc | XFS_QMOPT_DOWARN, &dqp);
292 if (error)
293 return error;
294
295 trace_xfs_dqattach_get(dqp);
296
297 /*
298 * dqget may have dropped and re-acquired the ilock, but it guarantees
299 * that the dquot returned is the one that should go in the inode.
300 */
301 *IO_idqpp = dqp;
302 xfs_dqunlock(dqp);
303 return 0;
304}
305
306static bool
307xfs_qm_need_dqattach(
308 struct xfs_inode *ip)
309{
310 struct xfs_mount *mp = ip->i_mount;
311
312 if (!XFS_IS_QUOTA_RUNNING(mp))
313 return false;
314 if (!XFS_IS_QUOTA_ON(mp))
315 return false;
316 if (!XFS_NOT_DQATTACHED(mp, ip))
317 return false;
318 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
319 return false;
320 return true;
321}
322
323/*
324 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
325 * into account.
326 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
327 * Inode may get unlocked and relocked in here, and the caller must deal with
328 * the consequences.
329 */
330int
331xfs_qm_dqattach_locked(
332 xfs_inode_t *ip,
333 uint flags)
334{
335 xfs_mount_t *mp = ip->i_mount;
336 int error = 0;
337
338 if (!xfs_qm_need_dqattach(ip))
339 return 0;
340
341 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
342
343 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
344 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
345 flags & XFS_QMOPT_DQALLOC,
346 &ip->i_udquot);
347 if (error)
348 goto done;
349 ASSERT(ip->i_udquot);
350 }
351
352 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
353 error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
354 flags & XFS_QMOPT_DQALLOC,
355 &ip->i_gdquot);
356 if (error)
357 goto done;
358 ASSERT(ip->i_gdquot);
359 }
360
361 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
362 error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
363 flags & XFS_QMOPT_DQALLOC,
364 &ip->i_pdquot);
365 if (error)
366 goto done;
367 ASSERT(ip->i_pdquot);
368 }
369
370done:
371 /*
372 * Don't worry about the dquots that we may have attached before any
373 * error - they'll get detached later if it has not already been done.
374 */
375 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
376 return error;
377}
378
379int
380xfs_qm_dqattach(
381 struct xfs_inode *ip,
382 uint flags)
383{
384 int error;
385
386 if (!xfs_qm_need_dqattach(ip))
387 return 0;
388
389 xfs_ilock(ip, XFS_ILOCK_EXCL);
390 error = xfs_qm_dqattach_locked(ip, flags);
391 xfs_iunlock(ip, XFS_ILOCK_EXCL);
392
393 return error;
394}
395
396/*
397 * Release dquots (and their references) if any.
398 * The inode should be locked EXCL except when this's called by
399 * xfs_ireclaim.
400 */
401void
402xfs_qm_dqdetach(
403 xfs_inode_t *ip)
404{
405 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
406 return;
407
408 trace_xfs_dquot_dqdetach(ip);
409
410 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
411 if (ip->i_udquot) {
412 xfs_qm_dqrele(ip->i_udquot);
413 ip->i_udquot = NULL;
414 }
415 if (ip->i_gdquot) {
416 xfs_qm_dqrele(ip->i_gdquot);
417 ip->i_gdquot = NULL;
418 }
419 if (ip->i_pdquot) {
420 xfs_qm_dqrele(ip->i_pdquot);
421 ip->i_pdquot = NULL;
422 }
423}
424
425struct xfs_qm_isolate {
426 struct list_head buffers;
427 struct list_head dispose;
428};
429
430static enum lru_status
431xfs_qm_dquot_isolate(
432 struct list_head *item,
433 struct list_lru_one *lru,
434 spinlock_t *lru_lock,
435 void *arg)
436 __releases(lru_lock) __acquires(lru_lock)
437{
438 struct xfs_dquot *dqp = container_of(item,
439 struct xfs_dquot, q_lru);
440 struct xfs_qm_isolate *isol = arg;
441
442 if (!xfs_dqlock_nowait(dqp))
443 goto out_miss_busy;
444
445 /*
446 * This dquot has acquired a reference in the meantime remove it from
447 * the freelist and try again.
448 */
449 if (dqp->q_nrefs) {
450 xfs_dqunlock(dqp);
451 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
452
453 trace_xfs_dqreclaim_want(dqp);
454 list_lru_isolate(lru, &dqp->q_lru);
455 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
456 return LRU_REMOVED;
457 }
458
459 /*
460 * If the dquot is dirty, flush it. If it's already being flushed, just
461 * skip it so there is time for the IO to complete before we try to
462 * reclaim it again on the next LRU pass.
463 */
464 if (!xfs_dqflock_nowait(dqp)) {
465 xfs_dqunlock(dqp);
466 goto out_miss_busy;
467 }
468
469 if (XFS_DQ_IS_DIRTY(dqp)) {
470 struct xfs_buf *bp = NULL;
471 int error;
472
473 trace_xfs_dqreclaim_dirty(dqp);
474
475 /* we have to drop the LRU lock to flush the dquot */
476 spin_unlock(lru_lock);
477
478 error = xfs_qm_dqflush(dqp, &bp);
479 if (error) {
480 xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
481 __func__, dqp);
482 goto out_unlock_dirty;
483 }
484
485 xfs_buf_delwri_queue(bp, &isol->buffers);
486 xfs_buf_relse(bp);
487 goto out_unlock_dirty;
488 }
489 xfs_dqfunlock(dqp);
490
491 /*
492 * Prevent lookups now that we are past the point of no return.
493 */
494 dqp->dq_flags |= XFS_DQ_FREEING;
495 xfs_dqunlock(dqp);
496
497 ASSERT(dqp->q_nrefs == 0);
498 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
499 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
500 trace_xfs_dqreclaim_done(dqp);
501 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
502 return LRU_REMOVED;
503
504out_miss_busy:
505 trace_xfs_dqreclaim_busy(dqp);
506 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
507 return LRU_SKIP;
508
509out_unlock_dirty:
510 trace_xfs_dqreclaim_busy(dqp);
511 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
512 xfs_dqunlock(dqp);
513 spin_lock(lru_lock);
514 return LRU_RETRY;
515}
516
517static unsigned long
518xfs_qm_shrink_scan(
519 struct shrinker *shrink,
520 struct shrink_control *sc)
521{
522 struct xfs_quotainfo *qi = container_of(shrink,
523 struct xfs_quotainfo, qi_shrinker);
524 struct xfs_qm_isolate isol;
525 unsigned long freed;
526 int error;
527
528 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
529 return 0;
530
531 INIT_LIST_HEAD(&isol.buffers);
532 INIT_LIST_HEAD(&isol.dispose);
533
534 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
535 xfs_qm_dquot_isolate, &isol);
536
537 error = xfs_buf_delwri_submit(&isol.buffers);
538 if (error)
539 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
540
541 while (!list_empty(&isol.dispose)) {
542 struct xfs_dquot *dqp;
543
544 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
545 list_del_init(&dqp->q_lru);
546 xfs_qm_dqfree_one(dqp);
547 }
548
549 return freed;
550}
551
552static unsigned long
553xfs_qm_shrink_count(
554 struct shrinker *shrink,
555 struct shrink_control *sc)
556{
557 struct xfs_quotainfo *qi = container_of(shrink,
558 struct xfs_quotainfo, qi_shrinker);
559
560 return list_lru_shrink_count(&qi->qi_lru, sc);
561}
562
563STATIC void
564xfs_qm_set_defquota(
565 xfs_mount_t *mp,
566 uint type,
567 xfs_quotainfo_t *qinf)
568{
569 xfs_dquot_t *dqp;
570 struct xfs_def_quota *defq;
571 int error;
572
573 error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp);
574
575 if (!error) {
576 xfs_disk_dquot_t *ddqp = &dqp->q_core;
577
578 defq = xfs_get_defquota(dqp, qinf);
579
580 /*
581 * Timers and warnings have been already set, let's just set the
582 * default limits for this quota type
583 */
584 defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
585 defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
586 defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
587 defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
588 defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
589 defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
590 xfs_qm_dqdestroy(dqp);
591 }
592}
593
594/*
595 * This initializes all the quota information that's kept in the
596 * mount structure
597 */
598STATIC int
599xfs_qm_init_quotainfo(
600 xfs_mount_t *mp)
601{
602 xfs_quotainfo_t *qinf;
603 int error;
604 xfs_dquot_t *dqp;
605
606 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
607
608 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
609
610 error = list_lru_init(&qinf->qi_lru);
611 if (error)
612 goto out_free_qinf;
613
614 /*
615 * See if quotainodes are setup, and if not, allocate them,
616 * and change the superblock accordingly.
617 */
618 error = xfs_qm_init_quotainos(mp);
619 if (error)
620 goto out_free_lru;
621
622 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
623 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
624 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
625 mutex_init(&qinf->qi_tree_lock);
626
627 /* mutex used to serialize quotaoffs */
628 mutex_init(&qinf->qi_quotaofflock);
629
630 /* Precalc some constants */
631 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
632 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
633
634 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
635
636 /*
637 * We try to get the limits from the superuser's limits fields.
638 * This is quite hacky, but it is standard quota practice.
639 *
640 * Since we may not have done a quotacheck by this point, just read
641 * the dquot without attaching it to any hashtables or lists.
642 *
643 * Timers and warnings are globally set by the first timer found in
644 * user/group/proj quota types, otherwise a default value is used.
645 * This should be split into different fields per quota type.
646 */
647 error = xfs_qm_dqread(mp, 0,
648 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
649 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
650 XFS_DQ_PROJ),
651 XFS_QMOPT_DOWARN, &dqp);
652
653 if (!error) {
654 xfs_disk_dquot_t *ddqp = &dqp->q_core;
655
656 /*
657 * The warnings and timers set the grace period given to
658 * a user or group before he or she can not perform any
659 * more writing. If it is zero, a default is used.
660 */
661 qinf->qi_btimelimit = ddqp->d_btimer ?
662 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
663 qinf->qi_itimelimit = ddqp->d_itimer ?
664 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
665 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
666 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
667 qinf->qi_bwarnlimit = ddqp->d_bwarns ?
668 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
669 qinf->qi_iwarnlimit = ddqp->d_iwarns ?
670 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
671 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
672 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
673 xfs_qm_dqdestroy(dqp);
674 } else {
675 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
676 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
677 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
678 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
679 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
680 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
681 }
682
683 if (XFS_IS_UQUOTA_RUNNING(mp))
684 xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
685 if (XFS_IS_GQUOTA_RUNNING(mp))
686 xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
687 if (XFS_IS_PQUOTA_RUNNING(mp))
688 xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
689
690 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
691 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
692 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
693 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
694 register_shrinker(&qinf->qi_shrinker);
695 return 0;
696
697out_free_lru:
698 list_lru_destroy(&qinf->qi_lru);
699out_free_qinf:
700 kmem_free(qinf);
701 mp->m_quotainfo = NULL;
702 return error;
703}
704
705
706/*
707 * Gets called when unmounting a filesystem or when all quotas get
708 * turned off.
709 * This purges the quota inodes, destroys locks and frees itself.
710 */
711void
712xfs_qm_destroy_quotainfo(
713 xfs_mount_t *mp)
714{
715 xfs_quotainfo_t *qi;
716
717 qi = mp->m_quotainfo;
718 ASSERT(qi != NULL);
719
720 unregister_shrinker(&qi->qi_shrinker);
721 list_lru_destroy(&qi->qi_lru);
722
723 if (qi->qi_uquotaip) {
724 IRELE(qi->qi_uquotaip);
725 qi->qi_uquotaip = NULL; /* paranoia */
726 }
727 if (qi->qi_gquotaip) {
728 IRELE(qi->qi_gquotaip);
729 qi->qi_gquotaip = NULL;
730 }
731 if (qi->qi_pquotaip) {
732 IRELE(qi->qi_pquotaip);
733 qi->qi_pquotaip = NULL;
734 }
735 mutex_destroy(&qi->qi_quotaofflock);
736 kmem_free(qi);
737 mp->m_quotainfo = NULL;
738}
739
740/*
741 * Create an inode and return with a reference already taken, but unlocked
742 * This is how we create quota inodes
743 */
744STATIC int
745xfs_qm_qino_alloc(
746 xfs_mount_t *mp,
747 xfs_inode_t **ip,
748 uint flags)
749{
750 xfs_trans_t *tp;
751 int error;
752 int committed;
753 bool need_alloc = true;
754
755 *ip = NULL;
756 /*
757 * With superblock that doesn't have separate pquotino, we
758 * share an inode between gquota and pquota. If the on-disk
759 * superblock has GQUOTA and the filesystem is now mounted
760 * with PQUOTA, just use sb_gquotino for sb_pquotino and
761 * vice-versa.
762 */
763 if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
764 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
765 xfs_ino_t ino = NULLFSINO;
766
767 if ((flags & XFS_QMOPT_PQUOTA) &&
768 (mp->m_sb.sb_gquotino != NULLFSINO)) {
769 ino = mp->m_sb.sb_gquotino;
770 ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
771 } else if ((flags & XFS_QMOPT_GQUOTA) &&
772 (mp->m_sb.sb_pquotino != NULLFSINO)) {
773 ino = mp->m_sb.sb_pquotino;
774 ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
775 }
776 if (ino != NULLFSINO) {
777 error = xfs_iget(mp, NULL, ino, 0, 0, ip);
778 if (error)
779 return error;
780 mp->m_sb.sb_gquotino = NULLFSINO;
781 mp->m_sb.sb_pquotino = NULLFSINO;
782 need_alloc = false;
783 }
784 }
785
786 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
787 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
788 XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
789 if (error) {
790 xfs_trans_cancel(tp);
791 return error;
792 }
793
794 if (need_alloc) {
795 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
796 &committed);
797 if (error) {
798 xfs_trans_cancel(tp);
799 return error;
800 }
801 }
802
803 /*
804 * Make the changes in the superblock, and log those too.
805 * sbfields arg may contain fields other than *QUOTINO;
806 * VERSIONNUM for example.
807 */
808 spin_lock(&mp->m_sb_lock);
809 if (flags & XFS_QMOPT_SBVERSION) {
810 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
811
812 xfs_sb_version_addquota(&mp->m_sb);
813 mp->m_sb.sb_uquotino = NULLFSINO;
814 mp->m_sb.sb_gquotino = NULLFSINO;
815 mp->m_sb.sb_pquotino = NULLFSINO;
816
817 /* qflags will get updated fully _after_ quotacheck */
818 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
819 }
820 if (flags & XFS_QMOPT_UQUOTA)
821 mp->m_sb.sb_uquotino = (*ip)->i_ino;
822 else if (flags & XFS_QMOPT_GQUOTA)
823 mp->m_sb.sb_gquotino = (*ip)->i_ino;
824 else
825 mp->m_sb.sb_pquotino = (*ip)->i_ino;
826 spin_unlock(&mp->m_sb_lock);
827 xfs_log_sb(tp);
828
829 error = xfs_trans_commit(tp);
830 if (error) {
831 ASSERT(XFS_FORCED_SHUTDOWN(mp));
832 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
833 }
834 if (need_alloc)
835 xfs_finish_inode_setup(*ip);
836 return error;
837}
838
839
840STATIC void
841xfs_qm_reset_dqcounts(
842 xfs_mount_t *mp,
843 xfs_buf_t *bp,
844 xfs_dqid_t id,
845 uint type)
846{
847 struct xfs_dqblk *dqb;
848 int j;
849
850 trace_xfs_reset_dqcounts(bp, _RET_IP_);
851
852 /*
853 * Reset all counters and timers. They'll be
854 * started afresh by xfs_qm_quotacheck.
855 */
856#ifdef DEBUG
857 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
858 do_div(j, sizeof(xfs_dqblk_t));
859 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
860#endif
861 dqb = bp->b_addr;
862 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
863 struct xfs_disk_dquot *ddq;
864
865 ddq = (struct xfs_disk_dquot *)&dqb[j];
866
867 /*
868 * Do a sanity check, and if needed, repair the dqblk. Don't
869 * output any warnings because it's perfectly possible to
870 * find uninitialised dquot blks. See comment in xfs_dqcheck.
871 */
872 xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
873 "xfs_quotacheck");
874 /*
875 * Reset type in case we are reusing group quota file for
876 * project quotas or vice versa
877 */
878 ddq->d_flags = type;
879 ddq->d_bcount = 0;
880 ddq->d_icount = 0;
881 ddq->d_rtbcount = 0;
882 ddq->d_btimer = 0;
883 ddq->d_itimer = 0;
884 ddq->d_rtbtimer = 0;
885 ddq->d_bwarns = 0;
886 ddq->d_iwarns = 0;
887 ddq->d_rtbwarns = 0;
888
889 if (xfs_sb_version_hascrc(&mp->m_sb)) {
890 xfs_update_cksum((char *)&dqb[j],
891 sizeof(struct xfs_dqblk),
892 XFS_DQUOT_CRC_OFF);
893 }
894 }
895}
896
897STATIC int
898xfs_qm_dqiter_bufs(
899 struct xfs_mount *mp,
900 xfs_dqid_t firstid,
901 xfs_fsblock_t bno,
902 xfs_filblks_t blkcnt,
903 uint flags,
904 struct list_head *buffer_list)
905{
906 struct xfs_buf *bp;
907 int error;
908 int type;
909
910 ASSERT(blkcnt > 0);
911 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
912 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
913 error = 0;
914
915 /*
916 * Blkcnt arg can be a very big number, and might even be
917 * larger than the log itself. So, we have to break it up into
918 * manageable-sized transactions.
919 * Note that we don't start a permanent transaction here; we might
920 * not be able to get a log reservation for the whole thing up front,
921 * and we don't really care to either, because we just discard
922 * everything if we were to crash in the middle of this loop.
923 */
924 while (blkcnt--) {
925 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
926 XFS_FSB_TO_DADDR(mp, bno),
927 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
928 &xfs_dquot_buf_ops);
929
930 /*
931 * CRC and validation errors will return a EFSCORRUPTED here. If
932 * this occurs, re-read without CRC validation so that we can
933 * repair the damage via xfs_qm_reset_dqcounts(). This process
934 * will leave a trace in the log indicating corruption has
935 * been detected.
936 */
937 if (error == -EFSCORRUPTED) {
938 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
939 XFS_FSB_TO_DADDR(mp, bno),
940 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
941 NULL);
942 }
943
944 if (error)
945 break;
946
947 /*
948 * A corrupt buffer might not have a verifier attached, so
949 * make sure we have the correct one attached before writeback
950 * occurs.
951 */
952 bp->b_ops = &xfs_dquot_buf_ops;
953 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
954 xfs_buf_delwri_queue(bp, buffer_list);
955 xfs_buf_relse(bp);
956
957 /* goto the next block. */
958 bno++;
959 firstid += mp->m_quotainfo->qi_dqperchunk;
960 }
961
962 return error;
963}
964
965/*
966 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
967 * caller supplied function for every chunk of dquots that we find.
968 */
969STATIC int
970xfs_qm_dqiterate(
971 struct xfs_mount *mp,
972 struct xfs_inode *qip,
973 uint flags,
974 struct list_head *buffer_list)
975{
976 struct xfs_bmbt_irec *map;
977 int i, nmaps; /* number of map entries */
978 int error; /* return value */
979 xfs_fileoff_t lblkno;
980 xfs_filblks_t maxlblkcnt;
981 xfs_dqid_t firstid;
982 xfs_fsblock_t rablkno;
983 xfs_filblks_t rablkcnt;
984
985 error = 0;
986 /*
987 * This looks racy, but we can't keep an inode lock across a
988 * trans_reserve. But, this gets called during quotacheck, and that
989 * happens only at mount time which is single threaded.
990 */
991 if (qip->i_d.di_nblocks == 0)
992 return 0;
993
994 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
995
996 lblkno = 0;
997 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
998 do {
999 uint lock_mode;
1000
1001 nmaps = XFS_DQITER_MAP_SIZE;
1002 /*
1003 * We aren't changing the inode itself. Just changing
1004 * some of its data. No new blocks are added here, and
1005 * the inode is never added to the transaction.
1006 */
1007 lock_mode = xfs_ilock_data_map_shared(qip);
1008 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1009 map, &nmaps, 0);
1010 xfs_iunlock(qip, lock_mode);
1011 if (error)
1012 break;
1013
1014 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1015 for (i = 0; i < nmaps; i++) {
1016 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1017 ASSERT(map[i].br_blockcount);
1018
1019
1020 lblkno += map[i].br_blockcount;
1021
1022 if (map[i].br_startblock == HOLESTARTBLOCK)
1023 continue;
1024
1025 firstid = (xfs_dqid_t) map[i].br_startoff *
1026 mp->m_quotainfo->qi_dqperchunk;
1027 /*
1028 * Do a read-ahead on the next extent.
1029 */
1030 if ((i+1 < nmaps) &&
1031 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1032 rablkcnt = map[i+1].br_blockcount;
1033 rablkno = map[i+1].br_startblock;
1034 while (rablkcnt--) {
1035 xfs_buf_readahead(mp->m_ddev_targp,
1036 XFS_FSB_TO_DADDR(mp, rablkno),
1037 mp->m_quotainfo->qi_dqchunklen,
1038 &xfs_dquot_buf_ops);
1039 rablkno++;
1040 }
1041 }
1042 /*
1043 * Iterate thru all the blks in the extent and
1044 * reset the counters of all the dquots inside them.
1045 */
1046 error = xfs_qm_dqiter_bufs(mp, firstid,
1047 map[i].br_startblock,
1048 map[i].br_blockcount,
1049 flags, buffer_list);
1050 if (error)
1051 goto out;
1052 }
1053 } while (nmaps > 0);
1054
1055out:
1056 kmem_free(map);
1057 return error;
1058}
1059
1060/*
1061 * Called by dqusage_adjust in doing a quotacheck.
1062 *
1063 * Given the inode, and a dquot id this updates both the incore dqout as well
1064 * as the buffer copy. This is so that once the quotacheck is done, we can
1065 * just log all the buffers, as opposed to logging numerous updates to
1066 * individual dquots.
1067 */
1068STATIC int
1069xfs_qm_quotacheck_dqadjust(
1070 struct xfs_inode *ip,
1071 xfs_dqid_t id,
1072 uint type,
1073 xfs_qcnt_t nblks,
1074 xfs_qcnt_t rtblks)
1075{
1076 struct xfs_mount *mp = ip->i_mount;
1077 struct xfs_dquot *dqp;
1078 int error;
1079
1080 error = xfs_qm_dqget(mp, ip, id, type,
1081 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1082 if (error) {
1083 /*
1084 * Shouldn't be able to turn off quotas here.
1085 */
1086 ASSERT(error != -ESRCH);
1087 ASSERT(error != -ENOENT);
1088 return error;
1089 }
1090
1091 trace_xfs_dqadjust(dqp);
1092
1093 /*
1094 * Adjust the inode count and the block count to reflect this inode's
1095 * resource usage.
1096 */
1097 be64_add_cpu(&dqp->q_core.d_icount, 1);
1098 dqp->q_res_icount++;
1099 if (nblks) {
1100 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1101 dqp->q_res_bcount += nblks;
1102 }
1103 if (rtblks) {
1104 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1105 dqp->q_res_rtbcount += rtblks;
1106 }
1107
1108 /*
1109 * Set default limits, adjust timers (since we changed usages)
1110 *
1111 * There are no timers for the default values set in the root dquot.
1112 */
1113 if (dqp->q_core.d_id) {
1114 xfs_qm_adjust_dqlimits(mp, dqp);
1115 xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1116 }
1117
1118 dqp->dq_flags |= XFS_DQ_DIRTY;
1119 xfs_qm_dqput(dqp);
1120 return 0;
1121}
1122
1123STATIC int
1124xfs_qm_get_rtblks(
1125 xfs_inode_t *ip,
1126 xfs_qcnt_t *O_rtblks)
1127{
1128 xfs_filblks_t rtblks; /* total rt blks */
1129 xfs_extnum_t idx; /* extent record index */
1130 xfs_ifork_t *ifp; /* inode fork pointer */
1131 xfs_extnum_t nextents; /* number of extent entries */
1132 int error;
1133
1134 ASSERT(XFS_IS_REALTIME_INODE(ip));
1135 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1136 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1137 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1138 return error;
1139 }
1140 rtblks = 0;
1141 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1142 for (idx = 0; idx < nextents; idx++)
1143 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1144 *O_rtblks = (xfs_qcnt_t)rtblks;
1145 return 0;
1146}
1147
1148/*
1149 * callback routine supplied to bulkstat(). Given an inumber, find its
1150 * dquots and update them to account for resources taken by that inode.
1151 */
1152/* ARGSUSED */
1153STATIC int
1154xfs_qm_dqusage_adjust(
1155 xfs_mount_t *mp, /* mount point for filesystem */
1156 xfs_ino_t ino, /* inode number to get data for */
1157 void __user *buffer, /* not used */
1158 int ubsize, /* not used */
1159 int *ubused, /* not used */
1160 int *res) /* result code value */
1161{
1162 xfs_inode_t *ip;
1163 xfs_qcnt_t nblks, rtblks = 0;
1164 int error;
1165
1166 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1167
1168 /*
1169 * rootino must have its resources accounted for, not so with the quota
1170 * inodes.
1171 */
1172 if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1173 *res = BULKSTAT_RV_NOTHING;
1174 return -EINVAL;
1175 }
1176
1177 /*
1178 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1179 * interface expects the inode to be exclusively locked because that's
1180 * the case in all other instances. It's OK that we do this because
1181 * quotacheck is done only at mount time.
1182 */
1183 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1184 if (error) {
1185 *res = BULKSTAT_RV_NOTHING;
1186 return error;
1187 }
1188
1189 ASSERT(ip->i_delayed_blks == 0);
1190
1191 if (XFS_IS_REALTIME_INODE(ip)) {
1192 /*
1193 * Walk thru the extent list and count the realtime blocks.
1194 */
1195 error = xfs_qm_get_rtblks(ip, &rtblks);
1196 if (error)
1197 goto error0;
1198 }
1199
1200 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1201
1202 /*
1203 * Add the (disk blocks and inode) resources occupied by this
1204 * inode to its dquots. We do this adjustment in the incore dquot,
1205 * and also copy the changes to its buffer.
1206 * We don't care about putting these changes in a transaction
1207 * envelope because if we crash in the middle of a 'quotacheck'
1208 * we have to start from the beginning anyway.
1209 * Once we're done, we'll log all the dquot bufs.
1210 *
1211 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1212 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1213 */
1214 if (XFS_IS_UQUOTA_ON(mp)) {
1215 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1216 XFS_DQ_USER, nblks, rtblks);
1217 if (error)
1218 goto error0;
1219 }
1220
1221 if (XFS_IS_GQUOTA_ON(mp)) {
1222 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1223 XFS_DQ_GROUP, nblks, rtblks);
1224 if (error)
1225 goto error0;
1226 }
1227
1228 if (XFS_IS_PQUOTA_ON(mp)) {
1229 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1230 XFS_DQ_PROJ, nblks, rtblks);
1231 if (error)
1232 goto error0;
1233 }
1234
1235 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1236 IRELE(ip);
1237 *res = BULKSTAT_RV_DIDONE;
1238 return 0;
1239
1240error0:
1241 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1242 IRELE(ip);
1243 *res = BULKSTAT_RV_GIVEUP;
1244 return error;
1245}
1246
1247STATIC int
1248xfs_qm_flush_one(
1249 struct xfs_dquot *dqp,
1250 void *data)
1251{
1252 struct list_head *buffer_list = data;
1253 struct xfs_buf *bp = NULL;
1254 int error = 0;
1255
1256 xfs_dqlock(dqp);
1257 if (dqp->dq_flags & XFS_DQ_FREEING)
1258 goto out_unlock;
1259 if (!XFS_DQ_IS_DIRTY(dqp))
1260 goto out_unlock;
1261
1262 xfs_dqflock(dqp);
1263 error = xfs_qm_dqflush(dqp, &bp);
1264 if (error)
1265 goto out_unlock;
1266
1267 xfs_buf_delwri_queue(bp, buffer_list);
1268 xfs_buf_relse(bp);
1269out_unlock:
1270 xfs_dqunlock(dqp);
1271 return error;
1272}
1273
1274/*
1275 * Walk thru all the filesystem inodes and construct a consistent view
1276 * of the disk quota world. If the quotacheck fails, disable quotas.
1277 */
1278STATIC int
1279xfs_qm_quotacheck(
1280 xfs_mount_t *mp)
1281{
1282 int done, count, error, error2;
1283 xfs_ino_t lastino;
1284 size_t structsz;
1285 uint flags;
1286 LIST_HEAD (buffer_list);
1287 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1288 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1289 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1290
1291 count = INT_MAX;
1292 structsz = 1;
1293 lastino = 0;
1294 flags = 0;
1295
1296 ASSERT(uip || gip || pip);
1297 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1298
1299 xfs_notice(mp, "Quotacheck needed: Please wait.");
1300
1301 /*
1302 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1303 * their counters to zero. We need a clean slate.
1304 * We don't log our changes till later.
1305 */
1306 if (uip) {
1307 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1308 &buffer_list);
1309 if (error)
1310 goto error_return;
1311 flags |= XFS_UQUOTA_CHKD;
1312 }
1313
1314 if (gip) {
1315 error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
1316 &buffer_list);
1317 if (error)
1318 goto error_return;
1319 flags |= XFS_GQUOTA_CHKD;
1320 }
1321
1322 if (pip) {
1323 error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
1324 &buffer_list);
1325 if (error)
1326 goto error_return;
1327 flags |= XFS_PQUOTA_CHKD;
1328 }
1329
1330 do {
1331 /*
1332 * Iterate thru all the inodes in the file system,
1333 * adjusting the corresponding dquot counters in core.
1334 */
1335 error = xfs_bulkstat(mp, &lastino, &count,
1336 xfs_qm_dqusage_adjust,
1337 structsz, NULL, &done);
1338 if (error)
1339 break;
1340
1341 } while (!done);
1342
1343 /*
1344 * We've made all the changes that we need to make incore. Flush them
1345 * down to disk buffers if everything was updated successfully.
1346 */
1347 if (XFS_IS_UQUOTA_ON(mp)) {
1348 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1349 &buffer_list);
1350 }
1351 if (XFS_IS_GQUOTA_ON(mp)) {
1352 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1353 &buffer_list);
1354 if (!error)
1355 error = error2;
1356 }
1357 if (XFS_IS_PQUOTA_ON(mp)) {
1358 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1359 &buffer_list);
1360 if (!error)
1361 error = error2;
1362 }
1363
1364 error2 = xfs_buf_delwri_submit(&buffer_list);
1365 if (!error)
1366 error = error2;
1367
1368 /*
1369 * We can get this error if we couldn't do a dquot allocation inside
1370 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1371 * dirty dquots that might be cached, we just want to get rid of them
1372 * and turn quotaoff. The dquots won't be attached to any of the inodes
1373 * at this point (because we intentionally didn't in dqget_noattach).
1374 */
1375 if (error) {
1376 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1377 goto error_return;
1378 }
1379
1380 /*
1381 * If one type of quotas is off, then it will lose its
1382 * quotachecked status, since we won't be doing accounting for
1383 * that type anymore.
1384 */
1385 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1386 mp->m_qflags |= flags;
1387
1388 error_return:
1389 while (!list_empty(&buffer_list)) {
1390 struct xfs_buf *bp =
1391 list_first_entry(&buffer_list, struct xfs_buf, b_list);
1392 list_del_init(&bp->b_list);
1393 xfs_buf_relse(bp);
1394 }
1395
1396 if (error) {
1397 xfs_warn(mp,
1398 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1399 error);
1400 /*
1401 * We must turn off quotas.
1402 */
1403 ASSERT(mp->m_quotainfo != NULL);
1404 xfs_qm_destroy_quotainfo(mp);
1405 if (xfs_mount_reset_sbqflags(mp)) {
1406 xfs_warn(mp,
1407 "Quotacheck: Failed to reset quota flags.");
1408 }
1409 } else
1410 xfs_notice(mp, "Quotacheck: Done.");
1411 return error;
1412}
1413
1414/*
1415 * This is called from xfs_mountfs to start quotas and initialize all
1416 * necessary data structures like quotainfo. This is also responsible for
1417 * running a quotacheck as necessary. We are guaranteed that the superblock
1418 * is consistently read in at this point.
1419 *
1420 * If we fail here, the mount will continue with quota turned off. We don't
1421 * need to inidicate success or failure at all.
1422 */
1423void
1424xfs_qm_mount_quotas(
1425 struct xfs_mount *mp)
1426{
1427 int error = 0;
1428 uint sbf;
1429
1430 /*
1431 * If quotas on realtime volumes is not supported, we disable
1432 * quotas immediately.
1433 */
1434 if (mp->m_sb.sb_rextents) {
1435 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1436 mp->m_qflags = 0;
1437 goto write_changes;
1438 }
1439
1440 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1441
1442 /*
1443 * Allocate the quotainfo structure inside the mount struct, and
1444 * create quotainode(s), and change/rev superblock if necessary.
1445 */
1446 error = xfs_qm_init_quotainfo(mp);
1447 if (error) {
1448 /*
1449 * We must turn off quotas.
1450 */
1451 ASSERT(mp->m_quotainfo == NULL);
1452 mp->m_qflags = 0;
1453 goto write_changes;
1454 }
1455 /*
1456 * If any of the quotas are not consistent, do a quotacheck.
1457 */
1458 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1459 error = xfs_qm_quotacheck(mp);
1460 if (error) {
1461 /* Quotacheck failed and disabled quotas. */
1462 return;
1463 }
1464 }
1465 /*
1466 * If one type of quotas is off, then it will lose its
1467 * quotachecked status, since we won't be doing accounting for
1468 * that type anymore.
1469 */
1470 if (!XFS_IS_UQUOTA_ON(mp))
1471 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1472 if (!XFS_IS_GQUOTA_ON(mp))
1473 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1474 if (!XFS_IS_PQUOTA_ON(mp))
1475 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1476
1477 write_changes:
1478 /*
1479 * We actually don't have to acquire the m_sb_lock at all.
1480 * This can only be called from mount, and that's single threaded. XXX
1481 */
1482 spin_lock(&mp->m_sb_lock);
1483 sbf = mp->m_sb.sb_qflags;
1484 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1485 spin_unlock(&mp->m_sb_lock);
1486
1487 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1488 if (xfs_sync_sb(mp, false)) {
1489 /*
1490 * We could only have been turning quotas off.
1491 * We aren't in very good shape actually because
1492 * the incore structures are convinced that quotas are
1493 * off, but the on disk superblock doesn't know that !
1494 */
1495 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1496 xfs_alert(mp, "%s: Superblock update failed!",
1497 __func__);
1498 }
1499 }
1500
1501 if (error) {
1502 xfs_warn(mp, "Failed to initialize disk quotas.");
1503 return;
1504 }
1505}
1506
1507/*
1508 * This is called after the superblock has been read in and we're ready to
1509 * iget the quota inodes.
1510 */
1511STATIC int
1512xfs_qm_init_quotainos(
1513 xfs_mount_t *mp)
1514{
1515 struct xfs_inode *uip = NULL;
1516 struct xfs_inode *gip = NULL;
1517 struct xfs_inode *pip = NULL;
1518 int error;
1519 uint flags = 0;
1520
1521 ASSERT(mp->m_quotainfo);
1522
1523 /*
1524 * Get the uquota and gquota inodes
1525 */
1526 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1527 if (XFS_IS_UQUOTA_ON(mp) &&
1528 mp->m_sb.sb_uquotino != NULLFSINO) {
1529 ASSERT(mp->m_sb.sb_uquotino > 0);
1530 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1531 0, 0, &uip);
1532 if (error)
1533 return error;
1534 }
1535 if (XFS_IS_GQUOTA_ON(mp) &&
1536 mp->m_sb.sb_gquotino != NULLFSINO) {
1537 ASSERT(mp->m_sb.sb_gquotino > 0);
1538 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1539 0, 0, &gip);
1540 if (error)
1541 goto error_rele;
1542 }
1543 if (XFS_IS_PQUOTA_ON(mp) &&
1544 mp->m_sb.sb_pquotino != NULLFSINO) {
1545 ASSERT(mp->m_sb.sb_pquotino > 0);
1546 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1547 0, 0, &pip);
1548 if (error)
1549 goto error_rele;
1550 }
1551 } else {
1552 flags |= XFS_QMOPT_SBVERSION;
1553 }
1554
1555 /*
1556 * Create the three inodes, if they don't exist already. The changes
1557 * made above will get added to a transaction and logged in one of
1558 * the qino_alloc calls below. If the device is readonly,
1559 * temporarily switch to read-write to do this.
1560 */
1561 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1562 error = xfs_qm_qino_alloc(mp, &uip,
1563 flags | XFS_QMOPT_UQUOTA);
1564 if (error)
1565 goto error_rele;
1566
1567 flags &= ~XFS_QMOPT_SBVERSION;
1568 }
1569 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1570 error = xfs_qm_qino_alloc(mp, &gip,
1571 flags | XFS_QMOPT_GQUOTA);
1572 if (error)
1573 goto error_rele;
1574
1575 flags &= ~XFS_QMOPT_SBVERSION;
1576 }
1577 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1578 error = xfs_qm_qino_alloc(mp, &pip,
1579 flags | XFS_QMOPT_PQUOTA);
1580 if (error)
1581 goto error_rele;
1582 }
1583
1584 mp->m_quotainfo->qi_uquotaip = uip;
1585 mp->m_quotainfo->qi_gquotaip = gip;
1586 mp->m_quotainfo->qi_pquotaip = pip;
1587
1588 return 0;
1589
1590error_rele:
1591 if (uip)
1592 IRELE(uip);
1593 if (gip)
1594 IRELE(gip);
1595 if (pip)
1596 IRELE(pip);
1597 return error;
1598}
1599
1600STATIC void
1601xfs_qm_dqfree_one(
1602 struct xfs_dquot *dqp)
1603{
1604 struct xfs_mount *mp = dqp->q_mount;
1605 struct xfs_quotainfo *qi = mp->m_quotainfo;
1606
1607 mutex_lock(&qi->qi_tree_lock);
1608 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1609 be32_to_cpu(dqp->q_core.d_id));
1610
1611 qi->qi_dquots--;
1612 mutex_unlock(&qi->qi_tree_lock);
1613
1614 xfs_qm_dqdestroy(dqp);
1615}
1616
1617/* --------------- utility functions for vnodeops ---------------- */
1618
1619
1620/*
1621 * Given an inode, a uid, gid and prid make sure that we have
1622 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1623 * quotas by creating this file.
1624 * This also attaches dquot(s) to the given inode after locking it,
1625 * and returns the dquots corresponding to the uid and/or gid.
1626 *
1627 * in : inode (unlocked)
1628 * out : udquot, gdquot with references taken and unlocked
1629 */
1630int
1631xfs_qm_vop_dqalloc(
1632 struct xfs_inode *ip,
1633 xfs_dqid_t uid,
1634 xfs_dqid_t gid,
1635 prid_t prid,
1636 uint flags,
1637 struct xfs_dquot **O_udqpp,
1638 struct xfs_dquot **O_gdqpp,
1639 struct xfs_dquot **O_pdqpp)
1640{
1641 struct xfs_mount *mp = ip->i_mount;
1642 struct xfs_dquot *uq = NULL;
1643 struct xfs_dquot *gq = NULL;
1644 struct xfs_dquot *pq = NULL;
1645 int error;
1646 uint lockflags;
1647
1648 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1649 return 0;
1650
1651 lockflags = XFS_ILOCK_EXCL;
1652 xfs_ilock(ip, lockflags);
1653
1654 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1655 gid = ip->i_d.di_gid;
1656
1657 /*
1658 * Attach the dquot(s) to this inode, doing a dquot allocation
1659 * if necessary. The dquot(s) will not be locked.
1660 */
1661 if (XFS_NOT_DQATTACHED(mp, ip)) {
1662 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1663 if (error) {
1664 xfs_iunlock(ip, lockflags);
1665 return error;
1666 }
1667 }
1668
1669 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1670 if (ip->i_d.di_uid != uid) {
1671 /*
1672 * What we need is the dquot that has this uid, and
1673 * if we send the inode to dqget, the uid of the inode
1674 * takes priority over what's sent in the uid argument.
1675 * We must unlock inode here before calling dqget if
1676 * we're not sending the inode, because otherwise
1677 * we'll deadlock by doing trans_reserve while
1678 * holding ilock.
1679 */
1680 xfs_iunlock(ip, lockflags);
1681 error = xfs_qm_dqget(mp, NULL, uid,
1682 XFS_DQ_USER,
1683 XFS_QMOPT_DQALLOC |
1684 XFS_QMOPT_DOWARN,
1685 &uq);
1686 if (error) {
1687 ASSERT(error != -ENOENT);
1688 return error;
1689 }
1690 /*
1691 * Get the ilock in the right order.
1692 */
1693 xfs_dqunlock(uq);
1694 lockflags = XFS_ILOCK_SHARED;
1695 xfs_ilock(ip, lockflags);
1696 } else {
1697 /*
1698 * Take an extra reference, because we'll return
1699 * this to caller
1700 */
1701 ASSERT(ip->i_udquot);
1702 uq = xfs_qm_dqhold(ip->i_udquot);
1703 }
1704 }
1705 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1706 if (ip->i_d.di_gid != gid) {
1707 xfs_iunlock(ip, lockflags);
1708 error = xfs_qm_dqget(mp, NULL, gid,
1709 XFS_DQ_GROUP,
1710 XFS_QMOPT_DQALLOC |
1711 XFS_QMOPT_DOWARN,
1712 &gq);
1713 if (error) {
1714 ASSERT(error != -ENOENT);
1715 goto error_rele;
1716 }
1717 xfs_dqunlock(gq);
1718 lockflags = XFS_ILOCK_SHARED;
1719 xfs_ilock(ip, lockflags);
1720 } else {
1721 ASSERT(ip->i_gdquot);
1722 gq = xfs_qm_dqhold(ip->i_gdquot);
1723 }
1724 }
1725 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1726 if (xfs_get_projid(ip) != prid) {
1727 xfs_iunlock(ip, lockflags);
1728 error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1729 XFS_DQ_PROJ,
1730 XFS_QMOPT_DQALLOC |
1731 XFS_QMOPT_DOWARN,
1732 &pq);
1733 if (error) {
1734 ASSERT(error != -ENOENT);
1735 goto error_rele;
1736 }
1737 xfs_dqunlock(pq);
1738 lockflags = XFS_ILOCK_SHARED;
1739 xfs_ilock(ip, lockflags);
1740 } else {
1741 ASSERT(ip->i_pdquot);
1742 pq = xfs_qm_dqhold(ip->i_pdquot);
1743 }
1744 }
1745 if (uq)
1746 trace_xfs_dquot_dqalloc(ip);
1747
1748 xfs_iunlock(ip, lockflags);
1749 if (O_udqpp)
1750 *O_udqpp = uq;
1751 else
1752 xfs_qm_dqrele(uq);
1753 if (O_gdqpp)
1754 *O_gdqpp = gq;
1755 else
1756 xfs_qm_dqrele(gq);
1757 if (O_pdqpp)
1758 *O_pdqpp = pq;
1759 else
1760 xfs_qm_dqrele(pq);
1761 return 0;
1762
1763error_rele:
1764 xfs_qm_dqrele(gq);
1765 xfs_qm_dqrele(uq);
1766 return error;
1767}
1768
1769/*
1770 * Actually transfer ownership, and do dquot modifications.
1771 * These were already reserved.
1772 */
1773xfs_dquot_t *
1774xfs_qm_vop_chown(
1775 xfs_trans_t *tp,
1776 xfs_inode_t *ip,
1777 xfs_dquot_t **IO_olddq,
1778 xfs_dquot_t *newdq)
1779{
1780 xfs_dquot_t *prevdq;
1781 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1782 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1783
1784
1785 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1786 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1787
1788 /* old dquot */
1789 prevdq = *IO_olddq;
1790 ASSERT(prevdq);
1791 ASSERT(prevdq != newdq);
1792
1793 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1794 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1795
1796 /* the sparkling new dquot */
1797 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1798 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1799
1800 /*
1801 * Take an extra reference, because the inode is going to keep
1802 * this dquot pointer even after the trans_commit.
1803 */
1804 *IO_olddq = xfs_qm_dqhold(newdq);
1805
1806 return prevdq;
1807}
1808
1809/*
1810 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1811 */
1812int
1813xfs_qm_vop_chown_reserve(
1814 struct xfs_trans *tp,
1815 struct xfs_inode *ip,
1816 struct xfs_dquot *udqp,
1817 struct xfs_dquot *gdqp,
1818 struct xfs_dquot *pdqp,
1819 uint flags)
1820{
1821 struct xfs_mount *mp = ip->i_mount;
1822 uint delblks, blkflags, prjflags = 0;
1823 struct xfs_dquot *udq_unres = NULL;
1824 struct xfs_dquot *gdq_unres = NULL;
1825 struct xfs_dquot *pdq_unres = NULL;
1826 struct xfs_dquot *udq_delblks = NULL;
1827 struct xfs_dquot *gdq_delblks = NULL;
1828 struct xfs_dquot *pdq_delblks = NULL;
1829 int error;
1830
1831
1832 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1833 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1834
1835 delblks = ip->i_delayed_blks;
1836 blkflags = XFS_IS_REALTIME_INODE(ip) ?
1837 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1838
1839 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1840 ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
1841 udq_delblks = udqp;
1842 /*
1843 * If there are delayed allocation blocks, then we have to
1844 * unreserve those from the old dquot, and add them to the
1845 * new dquot.
1846 */
1847 if (delblks) {
1848 ASSERT(ip->i_udquot);
1849 udq_unres = ip->i_udquot;
1850 }
1851 }
1852 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1853 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
1854 gdq_delblks = gdqp;
1855 if (delblks) {
1856 ASSERT(ip->i_gdquot);
1857 gdq_unres = ip->i_gdquot;
1858 }
1859 }
1860
1861 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1862 xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
1863 prjflags = XFS_QMOPT_ENOSPC;
1864 pdq_delblks = pdqp;
1865 if (delblks) {
1866 ASSERT(ip->i_pdquot);
1867 pdq_unres = ip->i_pdquot;
1868 }
1869 }
1870
1871 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1872 udq_delblks, gdq_delblks, pdq_delblks,
1873 ip->i_d.di_nblocks, 1,
1874 flags | blkflags | prjflags);
1875 if (error)
1876 return error;
1877
1878 /*
1879 * Do the delayed blks reservations/unreservations now. Since, these
1880 * are done without the help of a transaction, if a reservation fails
1881 * its previous reservations won't be automatically undone by trans
1882 * code. So, we have to do it manually here.
1883 */
1884 if (delblks) {
1885 /*
1886 * Do the reservations first. Unreservation can't fail.
1887 */
1888 ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1889 ASSERT(udq_unres || gdq_unres || pdq_unres);
1890 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1891 udq_delblks, gdq_delblks, pdq_delblks,
1892 (xfs_qcnt_t)delblks, 0,
1893 flags | blkflags | prjflags);
1894 if (error)
1895 return error;
1896 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1897 udq_unres, gdq_unres, pdq_unres,
1898 -((xfs_qcnt_t)delblks), 0, blkflags);
1899 }
1900
1901 return 0;
1902}
1903
1904int
1905xfs_qm_vop_rename_dqattach(
1906 struct xfs_inode **i_tab)
1907{
1908 struct xfs_mount *mp = i_tab[0]->i_mount;
1909 int i;
1910
1911 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1912 return 0;
1913
1914 for (i = 0; (i < 4 && i_tab[i]); i++) {
1915 struct xfs_inode *ip = i_tab[i];
1916 int error;
1917
1918 /*
1919 * Watch out for duplicate entries in the table.
1920 */
1921 if (i == 0 || ip != i_tab[i-1]) {
1922 if (XFS_NOT_DQATTACHED(mp, ip)) {
1923 error = xfs_qm_dqattach(ip, 0);
1924 if (error)
1925 return error;
1926 }
1927 }
1928 }
1929 return 0;
1930}
1931
1932void
1933xfs_qm_vop_create_dqattach(
1934 struct xfs_trans *tp,
1935 struct xfs_inode *ip,
1936 struct xfs_dquot *udqp,
1937 struct xfs_dquot *gdqp,
1938 struct xfs_dquot *pdqp)
1939{
1940 struct xfs_mount *mp = tp->t_mountp;
1941
1942 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1943 return;
1944
1945 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1946 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1947
1948 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1949 ASSERT(ip->i_udquot == NULL);
1950 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1951
1952 ip->i_udquot = xfs_qm_dqhold(udqp);
1953 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1954 }
1955 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1956 ASSERT(ip->i_gdquot == NULL);
1957 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
1958 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1959 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1960 }
1961 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1962 ASSERT(ip->i_pdquot == NULL);
1963 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
1964
1965 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1966 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1967 }
1968}
1969
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_trans.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_alloc.h"
26#include "xfs_quota.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_ialloc_btree.h"
30#include "xfs_dinode.h"
31#include "xfs_inode.h"
32#include "xfs_ialloc.h"
33#include "xfs_itable.h"
34#include "xfs_rtalloc.h"
35#include "xfs_error.h"
36#include "xfs_bmap.h"
37#include "xfs_attr.h"
38#include "xfs_buf_item.h"
39#include "xfs_trans_space.h"
40#include "xfs_utils.h"
41#include "xfs_qm.h"
42#include "xfs_trace.h"
43
44/*
45 * The global quota manager. There is only one of these for the entire
46 * system, _not_ one per file system. XQM keeps track of the overall
47 * quota functionality, including maintaining the freelist and hash
48 * tables of dquots.
49 */
50STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
51STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
52STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
53
54/*
55 * We use the batch lookup interface to iterate over the dquots as it
56 * currently is the only interface into the radix tree code that allows
57 * fuzzy lookups instead of exact matches. Holding the lock over multiple
58 * operations is fine as all callers are used either during mount/umount
59 * or quotaoff.
60 */
61#define XFS_DQ_LOOKUP_BATCH 32
62
63STATIC int
64xfs_qm_dquot_walk(
65 struct xfs_mount *mp,
66 int type,
67 int (*execute)(struct xfs_dquot *dqp, void *data),
68 void *data)
69{
70 struct xfs_quotainfo *qi = mp->m_quotainfo;
71 struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type);
72 uint32_t next_index;
73 int last_error = 0;
74 int skipped;
75 int nr_found;
76
77restart:
78 skipped = 0;
79 next_index = 0;
80 nr_found = 0;
81
82 while (1) {
83 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
84 int error = 0;
85 int i;
86
87 mutex_lock(&qi->qi_tree_lock);
88 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
89 next_index, XFS_DQ_LOOKUP_BATCH);
90 if (!nr_found) {
91 mutex_unlock(&qi->qi_tree_lock);
92 break;
93 }
94
95 for (i = 0; i < nr_found; i++) {
96 struct xfs_dquot *dqp = batch[i];
97
98 next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
99
100 error = execute(batch[i], data);
101 if (error == EAGAIN) {
102 skipped++;
103 continue;
104 }
105 if (error && last_error != EFSCORRUPTED)
106 last_error = error;
107 }
108
109 mutex_unlock(&qi->qi_tree_lock);
110
111 /* bail out if the filesystem is corrupted. */
112 if (last_error == EFSCORRUPTED) {
113 skipped = 0;
114 break;
115 }
116 }
117
118 if (skipped) {
119 delay(1);
120 goto restart;
121 }
122
123 return last_error;
124}
125
126
127/*
128 * Purge a dquot from all tracking data structures and free it.
129 */
130STATIC int
131xfs_qm_dqpurge(
132 struct xfs_dquot *dqp,
133 void *data)
134{
135 struct xfs_mount *mp = dqp->q_mount;
136 struct xfs_quotainfo *qi = mp->m_quotainfo;
137 struct xfs_dquot *gdqp = NULL;
138
139 xfs_dqlock(dqp);
140 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
141 xfs_dqunlock(dqp);
142 return EAGAIN;
143 }
144
145 /*
146 * If this quota has a group hint attached, prepare for releasing it
147 * now.
148 */
149 gdqp = dqp->q_gdquot;
150 if (gdqp) {
151 xfs_dqlock(gdqp);
152 dqp->q_gdquot = NULL;
153 }
154
155 dqp->dq_flags |= XFS_DQ_FREEING;
156
157 xfs_dqflock(dqp);
158
159 /*
160 * If we are turning this type of quotas off, we don't care
161 * about the dirty metadata sitting in this dquot. OTOH, if
162 * we're unmounting, we do care, so we flush it and wait.
163 */
164 if (XFS_DQ_IS_DIRTY(dqp)) {
165 struct xfs_buf *bp = NULL;
166 int error;
167
168 /*
169 * We don't care about getting disk errors here. We need
170 * to purge this dquot anyway, so we go ahead regardless.
171 */
172 error = xfs_qm_dqflush(dqp, &bp);
173 if (error) {
174 xfs_warn(mp, "%s: dquot %p flush failed",
175 __func__, dqp);
176 } else {
177 error = xfs_bwrite(bp);
178 xfs_buf_relse(bp);
179 }
180 xfs_dqflock(dqp);
181 }
182
183 ASSERT(atomic_read(&dqp->q_pincount) == 0);
184 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
185 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
186
187 xfs_dqfunlock(dqp);
188 xfs_dqunlock(dqp);
189
190 radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
191 be32_to_cpu(dqp->q_core.d_id));
192 qi->qi_dquots--;
193
194 /*
195 * We move dquots to the freelist as soon as their reference count
196 * hits zero, so it really should be on the freelist here.
197 */
198 mutex_lock(&qi->qi_lru_lock);
199 ASSERT(!list_empty(&dqp->q_lru));
200 list_del_init(&dqp->q_lru);
201 qi->qi_lru_count--;
202 XFS_STATS_DEC(xs_qm_dquot_unused);
203 mutex_unlock(&qi->qi_lru_lock);
204
205 xfs_qm_dqdestroy(dqp);
206
207 if (gdqp)
208 xfs_qm_dqput(gdqp);
209 return 0;
210}
211
212/*
213 * Purge the dquot cache.
214 */
215void
216xfs_qm_dqpurge_all(
217 struct xfs_mount *mp,
218 uint flags)
219{
220 if (flags & XFS_QMOPT_UQUOTA)
221 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
222 if (flags & XFS_QMOPT_GQUOTA)
223 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
224 if (flags & XFS_QMOPT_PQUOTA)
225 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
226}
227
228/*
229 * Just destroy the quotainfo structure.
230 */
231void
232xfs_qm_unmount(
233 struct xfs_mount *mp)
234{
235 if (mp->m_quotainfo) {
236 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
237 xfs_qm_destroy_quotainfo(mp);
238 }
239}
240
241
242/*
243 * This is called from xfs_mountfs to start quotas and initialize all
244 * necessary data structures like quotainfo. This is also responsible for
245 * running a quotacheck as necessary. We are guaranteed that the superblock
246 * is consistently read in at this point.
247 *
248 * If we fail here, the mount will continue with quota turned off. We don't
249 * need to inidicate success or failure at all.
250 */
251void
252xfs_qm_mount_quotas(
253 xfs_mount_t *mp)
254{
255 int error = 0;
256 uint sbf;
257
258 /*
259 * If quotas on realtime volumes is not supported, we disable
260 * quotas immediately.
261 */
262 if (mp->m_sb.sb_rextents) {
263 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
264 mp->m_qflags = 0;
265 goto write_changes;
266 }
267
268 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
269
270 /*
271 * Allocate the quotainfo structure inside the mount struct, and
272 * create quotainode(s), and change/rev superblock if necessary.
273 */
274 error = xfs_qm_init_quotainfo(mp);
275 if (error) {
276 /*
277 * We must turn off quotas.
278 */
279 ASSERT(mp->m_quotainfo == NULL);
280 mp->m_qflags = 0;
281 goto write_changes;
282 }
283 /*
284 * If any of the quotas are not consistent, do a quotacheck.
285 */
286 if (XFS_QM_NEED_QUOTACHECK(mp)) {
287 error = xfs_qm_quotacheck(mp);
288 if (error) {
289 /* Quotacheck failed and disabled quotas. */
290 return;
291 }
292 }
293 /*
294 * If one type of quotas is off, then it will lose its
295 * quotachecked status, since we won't be doing accounting for
296 * that type anymore.
297 */
298 if (!XFS_IS_UQUOTA_ON(mp))
299 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
300 if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
301 mp->m_qflags &= ~XFS_OQUOTA_CHKD;
302
303 write_changes:
304 /*
305 * We actually don't have to acquire the m_sb_lock at all.
306 * This can only be called from mount, and that's single threaded. XXX
307 */
308 spin_lock(&mp->m_sb_lock);
309 sbf = mp->m_sb.sb_qflags;
310 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
311 spin_unlock(&mp->m_sb_lock);
312
313 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
314 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
315 /*
316 * We could only have been turning quotas off.
317 * We aren't in very good shape actually because
318 * the incore structures are convinced that quotas are
319 * off, but the on disk superblock doesn't know that !
320 */
321 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
322 xfs_alert(mp, "%s: Superblock update failed!",
323 __func__);
324 }
325 }
326
327 if (error) {
328 xfs_warn(mp, "Failed to initialize disk quotas.");
329 return;
330 }
331}
332
333/*
334 * Called from the vfsops layer.
335 */
336void
337xfs_qm_unmount_quotas(
338 xfs_mount_t *mp)
339{
340 /*
341 * Release the dquots that root inode, et al might be holding,
342 * before we flush quotas and blow away the quotainfo structure.
343 */
344 ASSERT(mp->m_rootip);
345 xfs_qm_dqdetach(mp->m_rootip);
346 if (mp->m_rbmip)
347 xfs_qm_dqdetach(mp->m_rbmip);
348 if (mp->m_rsumip)
349 xfs_qm_dqdetach(mp->m_rsumip);
350
351 /*
352 * Release the quota inodes.
353 */
354 if (mp->m_quotainfo) {
355 if (mp->m_quotainfo->qi_uquotaip) {
356 IRELE(mp->m_quotainfo->qi_uquotaip);
357 mp->m_quotainfo->qi_uquotaip = NULL;
358 }
359 if (mp->m_quotainfo->qi_gquotaip) {
360 IRELE(mp->m_quotainfo->qi_gquotaip);
361 mp->m_quotainfo->qi_gquotaip = NULL;
362 }
363 }
364}
365
366STATIC int
367xfs_qm_dqattach_one(
368 xfs_inode_t *ip,
369 xfs_dqid_t id,
370 uint type,
371 uint doalloc,
372 xfs_dquot_t *udqhint, /* hint */
373 xfs_dquot_t **IO_idqpp)
374{
375 xfs_dquot_t *dqp;
376 int error;
377
378 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
379 error = 0;
380
381 /*
382 * See if we already have it in the inode itself. IO_idqpp is
383 * &i_udquot or &i_gdquot. This made the code look weird, but
384 * made the logic a lot simpler.
385 */
386 dqp = *IO_idqpp;
387 if (dqp) {
388 trace_xfs_dqattach_found(dqp);
389 return 0;
390 }
391
392 /*
393 * udqhint is the i_udquot field in inode, and is non-NULL only
394 * when the type arg is group/project. Its purpose is to save a
395 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
396 * the user dquot.
397 */
398 if (udqhint) {
399 ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
400 xfs_dqlock(udqhint);
401
402 /*
403 * No need to take dqlock to look at the id.
404 *
405 * The ID can't change until it gets reclaimed, and it won't
406 * be reclaimed as long as we have a ref from inode and we
407 * hold the ilock.
408 */
409 dqp = udqhint->q_gdquot;
410 if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
411 ASSERT(*IO_idqpp == NULL);
412
413 *IO_idqpp = xfs_qm_dqhold(dqp);
414 xfs_dqunlock(udqhint);
415 return 0;
416 }
417
418 /*
419 * We can't hold a dquot lock when we call the dqget code.
420 * We'll deadlock in no time, because of (not conforming to)
421 * lock ordering - the inodelock comes before any dquot lock,
422 * and we may drop and reacquire the ilock in xfs_qm_dqget().
423 */
424 xfs_dqunlock(udqhint);
425 }
426
427 /*
428 * Find the dquot from somewhere. This bumps the
429 * reference count of dquot and returns it locked.
430 * This can return ENOENT if dquot didn't exist on
431 * disk and we didn't ask it to allocate;
432 * ESRCH if quotas got turned off suddenly.
433 */
434 error = xfs_qm_dqget(ip->i_mount, ip, id, type,
435 doalloc | XFS_QMOPT_DOWARN, &dqp);
436 if (error)
437 return error;
438
439 trace_xfs_dqattach_get(dqp);
440
441 /*
442 * dqget may have dropped and re-acquired the ilock, but it guarantees
443 * that the dquot returned is the one that should go in the inode.
444 */
445 *IO_idqpp = dqp;
446 xfs_dqunlock(dqp);
447 return 0;
448}
449
450
451/*
452 * Given a udquot and gdquot, attach a ptr to the group dquot in the
453 * udquot as a hint for future lookups.
454 */
455STATIC void
456xfs_qm_dqattach_grouphint(
457 xfs_dquot_t *udq,
458 xfs_dquot_t *gdq)
459{
460 xfs_dquot_t *tmp;
461
462 xfs_dqlock(udq);
463
464 tmp = udq->q_gdquot;
465 if (tmp) {
466 if (tmp == gdq)
467 goto done;
468
469 udq->q_gdquot = NULL;
470 xfs_qm_dqrele(tmp);
471 }
472
473 udq->q_gdquot = xfs_qm_dqhold(gdq);
474done:
475 xfs_dqunlock(udq);
476}
477
478static bool
479xfs_qm_need_dqattach(
480 struct xfs_inode *ip)
481{
482 struct xfs_mount *mp = ip->i_mount;
483
484 if (!XFS_IS_QUOTA_RUNNING(mp))
485 return false;
486 if (!XFS_IS_QUOTA_ON(mp))
487 return false;
488 if (!XFS_NOT_DQATTACHED(mp, ip))
489 return false;
490 if (ip->i_ino == mp->m_sb.sb_uquotino ||
491 ip->i_ino == mp->m_sb.sb_gquotino)
492 return false;
493 return true;
494}
495
496/*
497 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
498 * into account.
499 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
500 * Inode may get unlocked and relocked in here, and the caller must deal with
501 * the consequences.
502 */
503int
504xfs_qm_dqattach_locked(
505 xfs_inode_t *ip,
506 uint flags)
507{
508 xfs_mount_t *mp = ip->i_mount;
509 uint nquotas = 0;
510 int error = 0;
511
512 if (!xfs_qm_need_dqattach(ip))
513 return 0;
514
515 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
516
517 if (XFS_IS_UQUOTA_ON(mp)) {
518 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
519 flags & XFS_QMOPT_DQALLOC,
520 NULL, &ip->i_udquot);
521 if (error)
522 goto done;
523 nquotas++;
524 }
525
526 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
527 if (XFS_IS_OQUOTA_ON(mp)) {
528 error = XFS_IS_GQUOTA_ON(mp) ?
529 xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
530 flags & XFS_QMOPT_DQALLOC,
531 ip->i_udquot, &ip->i_gdquot) :
532 xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
533 flags & XFS_QMOPT_DQALLOC,
534 ip->i_udquot, &ip->i_gdquot);
535 /*
536 * Don't worry about the udquot that we may have
537 * attached above. It'll get detached, if not already.
538 */
539 if (error)
540 goto done;
541 nquotas++;
542 }
543
544 /*
545 * Attach this group quota to the user quota as a hint.
546 * This WON'T, in general, result in a thrash.
547 */
548 if (nquotas == 2) {
549 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
550 ASSERT(ip->i_udquot);
551 ASSERT(ip->i_gdquot);
552
553 /*
554 * We do not have i_udquot locked at this point, but this check
555 * is OK since we don't depend on the i_gdquot to be accurate
556 * 100% all the time. It is just a hint, and this will
557 * succeed in general.
558 */
559 if (ip->i_udquot->q_gdquot != ip->i_gdquot)
560 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
561 }
562
563 done:
564#ifdef DEBUG
565 if (!error) {
566 if (XFS_IS_UQUOTA_ON(mp))
567 ASSERT(ip->i_udquot);
568 if (XFS_IS_OQUOTA_ON(mp))
569 ASSERT(ip->i_gdquot);
570 }
571 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
572#endif
573 return error;
574}
575
576int
577xfs_qm_dqattach(
578 struct xfs_inode *ip,
579 uint flags)
580{
581 int error;
582
583 if (!xfs_qm_need_dqattach(ip))
584 return 0;
585
586 xfs_ilock(ip, XFS_ILOCK_EXCL);
587 error = xfs_qm_dqattach_locked(ip, flags);
588 xfs_iunlock(ip, XFS_ILOCK_EXCL);
589
590 return error;
591}
592
593/*
594 * Release dquots (and their references) if any.
595 * The inode should be locked EXCL except when this's called by
596 * xfs_ireclaim.
597 */
598void
599xfs_qm_dqdetach(
600 xfs_inode_t *ip)
601{
602 if (!(ip->i_udquot || ip->i_gdquot))
603 return;
604
605 trace_xfs_dquot_dqdetach(ip);
606
607 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
608 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
609 if (ip->i_udquot) {
610 xfs_qm_dqrele(ip->i_udquot);
611 ip->i_udquot = NULL;
612 }
613 if (ip->i_gdquot) {
614 xfs_qm_dqrele(ip->i_gdquot);
615 ip->i_gdquot = NULL;
616 }
617}
618
619/*
620 * This initializes all the quota information that's kept in the
621 * mount structure
622 */
623STATIC int
624xfs_qm_init_quotainfo(
625 xfs_mount_t *mp)
626{
627 xfs_quotainfo_t *qinf;
628 int error;
629 xfs_dquot_t *dqp;
630
631 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
632
633 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
634
635 /*
636 * See if quotainodes are setup, and if not, allocate them,
637 * and change the superblock accordingly.
638 */
639 if ((error = xfs_qm_init_quotainos(mp))) {
640 kmem_free(qinf);
641 mp->m_quotainfo = NULL;
642 return error;
643 }
644
645 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
646 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
647 mutex_init(&qinf->qi_tree_lock);
648
649 INIT_LIST_HEAD(&qinf->qi_lru_list);
650 qinf->qi_lru_count = 0;
651 mutex_init(&qinf->qi_lru_lock);
652
653 /* mutex used to serialize quotaoffs */
654 mutex_init(&qinf->qi_quotaofflock);
655
656 /* Precalc some constants */
657 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
658 ASSERT(qinf->qi_dqchunklen);
659 qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
660 do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
661
662 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
663
664 /*
665 * We try to get the limits from the superuser's limits fields.
666 * This is quite hacky, but it is standard quota practice.
667 *
668 * We look at the USR dquot with id == 0 first, but if user quotas
669 * are not enabled we goto the GRP dquot with id == 0.
670 * We don't really care to keep separate default limits for user
671 * and group quotas, at least not at this point.
672 *
673 * Since we may not have done a quotacheck by this point, just read
674 * the dquot without attaching it to any hashtables or lists.
675 */
676 error = xfs_qm_dqread(mp, 0,
677 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
678 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
679 XFS_DQ_PROJ),
680 XFS_QMOPT_DOWARN, &dqp);
681 if (!error) {
682 xfs_disk_dquot_t *ddqp = &dqp->q_core;
683
684 /*
685 * The warnings and timers set the grace period given to
686 * a user or group before he or she can not perform any
687 * more writing. If it is zero, a default is used.
688 */
689 qinf->qi_btimelimit = ddqp->d_btimer ?
690 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
691 qinf->qi_itimelimit = ddqp->d_itimer ?
692 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
693 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
694 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
695 qinf->qi_bwarnlimit = ddqp->d_bwarns ?
696 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
697 qinf->qi_iwarnlimit = ddqp->d_iwarns ?
698 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
699 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
700 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
701 qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
702 qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
703 qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
704 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
705 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
706 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
707
708 xfs_qm_dqdestroy(dqp);
709 } else {
710 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
711 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
712 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
713 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
714 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
715 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
716 }
717
718 qinf->qi_shrinker.shrink = xfs_qm_shake;
719 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
720 register_shrinker(&qinf->qi_shrinker);
721 return 0;
722}
723
724
725/*
726 * Gets called when unmounting a filesystem or when all quotas get
727 * turned off.
728 * This purges the quota inodes, destroys locks and frees itself.
729 */
730void
731xfs_qm_destroy_quotainfo(
732 xfs_mount_t *mp)
733{
734 xfs_quotainfo_t *qi;
735
736 qi = mp->m_quotainfo;
737 ASSERT(qi != NULL);
738
739 unregister_shrinker(&qi->qi_shrinker);
740
741 if (qi->qi_uquotaip) {
742 IRELE(qi->qi_uquotaip);
743 qi->qi_uquotaip = NULL; /* paranoia */
744 }
745 if (qi->qi_gquotaip) {
746 IRELE(qi->qi_gquotaip);
747 qi->qi_gquotaip = NULL;
748 }
749 mutex_destroy(&qi->qi_quotaofflock);
750 kmem_free(qi);
751 mp->m_quotainfo = NULL;
752}
753
754/*
755 * Create an inode and return with a reference already taken, but unlocked
756 * This is how we create quota inodes
757 */
758STATIC int
759xfs_qm_qino_alloc(
760 xfs_mount_t *mp,
761 xfs_inode_t **ip,
762 __int64_t sbfields,
763 uint flags)
764{
765 xfs_trans_t *tp;
766 int error;
767 int committed;
768
769 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
770 if ((error = xfs_trans_reserve(tp,
771 XFS_QM_QINOCREATE_SPACE_RES(mp),
772 XFS_CREATE_LOG_RES(mp), 0,
773 XFS_TRANS_PERM_LOG_RES,
774 XFS_CREATE_LOG_COUNT))) {
775 xfs_trans_cancel(tp, 0);
776 return error;
777 }
778
779 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
780 if (error) {
781 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
782 XFS_TRANS_ABORT);
783 return error;
784 }
785
786 /*
787 * Make the changes in the superblock, and log those too.
788 * sbfields arg may contain fields other than *QUOTINO;
789 * VERSIONNUM for example.
790 */
791 spin_lock(&mp->m_sb_lock);
792 if (flags & XFS_QMOPT_SBVERSION) {
793 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
794 ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
795 XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
796 (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
797 XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
798
799 xfs_sb_version_addquota(&mp->m_sb);
800 mp->m_sb.sb_uquotino = NULLFSINO;
801 mp->m_sb.sb_gquotino = NULLFSINO;
802
803 /* qflags will get updated _after_ quotacheck */
804 mp->m_sb.sb_qflags = 0;
805 }
806 if (flags & XFS_QMOPT_UQUOTA)
807 mp->m_sb.sb_uquotino = (*ip)->i_ino;
808 else
809 mp->m_sb.sb_gquotino = (*ip)->i_ino;
810 spin_unlock(&mp->m_sb_lock);
811 xfs_mod_sb(tp, sbfields);
812
813 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
814 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
815 return error;
816 }
817 return 0;
818}
819
820
821STATIC void
822xfs_qm_reset_dqcounts(
823 xfs_mount_t *mp,
824 xfs_buf_t *bp,
825 xfs_dqid_t id,
826 uint type)
827{
828 xfs_disk_dquot_t *ddq;
829 int j;
830
831 trace_xfs_reset_dqcounts(bp, _RET_IP_);
832
833 /*
834 * Reset all counters and timers. They'll be
835 * started afresh by xfs_qm_quotacheck.
836 */
837#ifdef DEBUG
838 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
839 do_div(j, sizeof(xfs_dqblk_t));
840 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
841#endif
842 ddq = bp->b_addr;
843 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
844 /*
845 * Do a sanity check, and if needed, repair the dqblk. Don't
846 * output any warnings because it's perfectly possible to
847 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
848 */
849 (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
850 "xfs_quotacheck");
851 ddq->d_bcount = 0;
852 ddq->d_icount = 0;
853 ddq->d_rtbcount = 0;
854 ddq->d_btimer = 0;
855 ddq->d_itimer = 0;
856 ddq->d_rtbtimer = 0;
857 ddq->d_bwarns = 0;
858 ddq->d_iwarns = 0;
859 ddq->d_rtbwarns = 0;
860 ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
861 }
862}
863
864STATIC int
865xfs_qm_dqiter_bufs(
866 struct xfs_mount *mp,
867 xfs_dqid_t firstid,
868 xfs_fsblock_t bno,
869 xfs_filblks_t blkcnt,
870 uint flags,
871 struct list_head *buffer_list)
872{
873 struct xfs_buf *bp;
874 int error;
875 int type;
876
877 ASSERT(blkcnt > 0);
878 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
879 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
880 error = 0;
881
882 /*
883 * Blkcnt arg can be a very big number, and might even be
884 * larger than the log itself. So, we have to break it up into
885 * manageable-sized transactions.
886 * Note that we don't start a permanent transaction here; we might
887 * not be able to get a log reservation for the whole thing up front,
888 * and we don't really care to either, because we just discard
889 * everything if we were to crash in the middle of this loop.
890 */
891 while (blkcnt--) {
892 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
893 XFS_FSB_TO_DADDR(mp, bno),
894 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
895 if (error)
896 break;
897
898 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
899 xfs_buf_delwri_queue(bp, buffer_list);
900 xfs_buf_relse(bp);
901 /*
902 * goto the next block.
903 */
904 bno++;
905 firstid += mp->m_quotainfo->qi_dqperchunk;
906 }
907
908 return error;
909}
910
911/*
912 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
913 * caller supplied function for every chunk of dquots that we find.
914 */
915STATIC int
916xfs_qm_dqiterate(
917 struct xfs_mount *mp,
918 struct xfs_inode *qip,
919 uint flags,
920 struct list_head *buffer_list)
921{
922 struct xfs_bmbt_irec *map;
923 int i, nmaps; /* number of map entries */
924 int error; /* return value */
925 xfs_fileoff_t lblkno;
926 xfs_filblks_t maxlblkcnt;
927 xfs_dqid_t firstid;
928 xfs_fsblock_t rablkno;
929 xfs_filblks_t rablkcnt;
930
931 error = 0;
932 /*
933 * This looks racy, but we can't keep an inode lock across a
934 * trans_reserve. But, this gets called during quotacheck, and that
935 * happens only at mount time which is single threaded.
936 */
937 if (qip->i_d.di_nblocks == 0)
938 return 0;
939
940 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
941
942 lblkno = 0;
943 maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
944 do {
945 nmaps = XFS_DQITER_MAP_SIZE;
946 /*
947 * We aren't changing the inode itself. Just changing
948 * some of its data. No new blocks are added here, and
949 * the inode is never added to the transaction.
950 */
951 xfs_ilock(qip, XFS_ILOCK_SHARED);
952 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
953 map, &nmaps, 0);
954 xfs_iunlock(qip, XFS_ILOCK_SHARED);
955 if (error)
956 break;
957
958 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
959 for (i = 0; i < nmaps; i++) {
960 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
961 ASSERT(map[i].br_blockcount);
962
963
964 lblkno += map[i].br_blockcount;
965
966 if (map[i].br_startblock == HOLESTARTBLOCK)
967 continue;
968
969 firstid = (xfs_dqid_t) map[i].br_startoff *
970 mp->m_quotainfo->qi_dqperchunk;
971 /*
972 * Do a read-ahead on the next extent.
973 */
974 if ((i+1 < nmaps) &&
975 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
976 rablkcnt = map[i+1].br_blockcount;
977 rablkno = map[i+1].br_startblock;
978 while (rablkcnt--) {
979 xfs_buf_readahead(mp->m_ddev_targp,
980 XFS_FSB_TO_DADDR(mp, rablkno),
981 mp->m_quotainfo->qi_dqchunklen);
982 rablkno++;
983 }
984 }
985 /*
986 * Iterate thru all the blks in the extent and
987 * reset the counters of all the dquots inside them.
988 */
989 error = xfs_qm_dqiter_bufs(mp, firstid,
990 map[i].br_startblock,
991 map[i].br_blockcount,
992 flags, buffer_list);
993 if (error)
994 goto out;
995 }
996 } while (nmaps > 0);
997
998out:
999 kmem_free(map);
1000 return error;
1001}
1002
1003/*
1004 * Called by dqusage_adjust in doing a quotacheck.
1005 *
1006 * Given the inode, and a dquot id this updates both the incore dqout as well
1007 * as the buffer copy. This is so that once the quotacheck is done, we can
1008 * just log all the buffers, as opposed to logging numerous updates to
1009 * individual dquots.
1010 */
1011STATIC int
1012xfs_qm_quotacheck_dqadjust(
1013 struct xfs_inode *ip,
1014 xfs_dqid_t id,
1015 uint type,
1016 xfs_qcnt_t nblks,
1017 xfs_qcnt_t rtblks)
1018{
1019 struct xfs_mount *mp = ip->i_mount;
1020 struct xfs_dquot *dqp;
1021 int error;
1022
1023 error = xfs_qm_dqget(mp, ip, id, type,
1024 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1025 if (error) {
1026 /*
1027 * Shouldn't be able to turn off quotas here.
1028 */
1029 ASSERT(error != ESRCH);
1030 ASSERT(error != ENOENT);
1031 return error;
1032 }
1033
1034 trace_xfs_dqadjust(dqp);
1035
1036 /*
1037 * Adjust the inode count and the block count to reflect this inode's
1038 * resource usage.
1039 */
1040 be64_add_cpu(&dqp->q_core.d_icount, 1);
1041 dqp->q_res_icount++;
1042 if (nblks) {
1043 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1044 dqp->q_res_bcount += nblks;
1045 }
1046 if (rtblks) {
1047 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1048 dqp->q_res_rtbcount += rtblks;
1049 }
1050
1051 /*
1052 * Set default limits, adjust timers (since we changed usages)
1053 *
1054 * There are no timers for the default values set in the root dquot.
1055 */
1056 if (dqp->q_core.d_id) {
1057 xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
1058 xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1059 }
1060
1061 dqp->dq_flags |= XFS_DQ_DIRTY;
1062 xfs_qm_dqput(dqp);
1063 return 0;
1064}
1065
1066STATIC int
1067xfs_qm_get_rtblks(
1068 xfs_inode_t *ip,
1069 xfs_qcnt_t *O_rtblks)
1070{
1071 xfs_filblks_t rtblks; /* total rt blks */
1072 xfs_extnum_t idx; /* extent record index */
1073 xfs_ifork_t *ifp; /* inode fork pointer */
1074 xfs_extnum_t nextents; /* number of extent entries */
1075 int error;
1076
1077 ASSERT(XFS_IS_REALTIME_INODE(ip));
1078 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1079 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1080 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1081 return error;
1082 }
1083 rtblks = 0;
1084 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1085 for (idx = 0; idx < nextents; idx++)
1086 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1087 *O_rtblks = (xfs_qcnt_t)rtblks;
1088 return 0;
1089}
1090
1091/*
1092 * callback routine supplied to bulkstat(). Given an inumber, find its
1093 * dquots and update them to account for resources taken by that inode.
1094 */
1095/* ARGSUSED */
1096STATIC int
1097xfs_qm_dqusage_adjust(
1098 xfs_mount_t *mp, /* mount point for filesystem */
1099 xfs_ino_t ino, /* inode number to get data for */
1100 void __user *buffer, /* not used */
1101 int ubsize, /* not used */
1102 int *ubused, /* not used */
1103 int *res) /* result code value */
1104{
1105 xfs_inode_t *ip;
1106 xfs_qcnt_t nblks, rtblks = 0;
1107 int error;
1108
1109 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1110
1111 /*
1112 * rootino must have its resources accounted for, not so with the quota
1113 * inodes.
1114 */
1115 if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
1116 *res = BULKSTAT_RV_NOTHING;
1117 return XFS_ERROR(EINVAL);
1118 }
1119
1120 /*
1121 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1122 * interface expects the inode to be exclusively locked because that's
1123 * the case in all other instances. It's OK that we do this because
1124 * quotacheck is done only at mount time.
1125 */
1126 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1127 if (error) {
1128 *res = BULKSTAT_RV_NOTHING;
1129 return error;
1130 }
1131
1132 ASSERT(ip->i_delayed_blks == 0);
1133
1134 if (XFS_IS_REALTIME_INODE(ip)) {
1135 /*
1136 * Walk thru the extent list and count the realtime blocks.
1137 */
1138 error = xfs_qm_get_rtblks(ip, &rtblks);
1139 if (error)
1140 goto error0;
1141 }
1142
1143 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1144
1145 /*
1146 * Add the (disk blocks and inode) resources occupied by this
1147 * inode to its dquots. We do this adjustment in the incore dquot,
1148 * and also copy the changes to its buffer.
1149 * We don't care about putting these changes in a transaction
1150 * envelope because if we crash in the middle of a 'quotacheck'
1151 * we have to start from the beginning anyway.
1152 * Once we're done, we'll log all the dquot bufs.
1153 *
1154 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1155 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1156 */
1157 if (XFS_IS_UQUOTA_ON(mp)) {
1158 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1159 XFS_DQ_USER, nblks, rtblks);
1160 if (error)
1161 goto error0;
1162 }
1163
1164 if (XFS_IS_GQUOTA_ON(mp)) {
1165 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1166 XFS_DQ_GROUP, nblks, rtblks);
1167 if (error)
1168 goto error0;
1169 }
1170
1171 if (XFS_IS_PQUOTA_ON(mp)) {
1172 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1173 XFS_DQ_PROJ, nblks, rtblks);
1174 if (error)
1175 goto error0;
1176 }
1177
1178 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1179 IRELE(ip);
1180 *res = BULKSTAT_RV_DIDONE;
1181 return 0;
1182
1183error0:
1184 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1185 IRELE(ip);
1186 *res = BULKSTAT_RV_GIVEUP;
1187 return error;
1188}
1189
1190STATIC int
1191xfs_qm_flush_one(
1192 struct xfs_dquot *dqp,
1193 void *data)
1194{
1195 struct list_head *buffer_list = data;
1196 struct xfs_buf *bp = NULL;
1197 int error = 0;
1198
1199 xfs_dqlock(dqp);
1200 if (dqp->dq_flags & XFS_DQ_FREEING)
1201 goto out_unlock;
1202 if (!XFS_DQ_IS_DIRTY(dqp))
1203 goto out_unlock;
1204
1205 xfs_dqflock(dqp);
1206 error = xfs_qm_dqflush(dqp, &bp);
1207 if (error)
1208 goto out_unlock;
1209
1210 xfs_buf_delwri_queue(bp, buffer_list);
1211 xfs_buf_relse(bp);
1212out_unlock:
1213 xfs_dqunlock(dqp);
1214 return error;
1215}
1216
1217/*
1218 * Walk thru all the filesystem inodes and construct a consistent view
1219 * of the disk quota world. If the quotacheck fails, disable quotas.
1220 */
1221int
1222xfs_qm_quotacheck(
1223 xfs_mount_t *mp)
1224{
1225 int done, count, error, error2;
1226 xfs_ino_t lastino;
1227 size_t structsz;
1228 xfs_inode_t *uip, *gip;
1229 uint flags;
1230 LIST_HEAD (buffer_list);
1231
1232 count = INT_MAX;
1233 structsz = 1;
1234 lastino = 0;
1235 flags = 0;
1236
1237 ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
1238 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1239
1240 xfs_notice(mp, "Quotacheck needed: Please wait.");
1241
1242 /*
1243 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1244 * their counters to zero. We need a clean slate.
1245 * We don't log our changes till later.
1246 */
1247 uip = mp->m_quotainfo->qi_uquotaip;
1248 if (uip) {
1249 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1250 &buffer_list);
1251 if (error)
1252 goto error_return;
1253 flags |= XFS_UQUOTA_CHKD;
1254 }
1255
1256 gip = mp->m_quotainfo->qi_gquotaip;
1257 if (gip) {
1258 error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
1259 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA,
1260 &buffer_list);
1261 if (error)
1262 goto error_return;
1263 flags |= XFS_OQUOTA_CHKD;
1264 }
1265
1266 do {
1267 /*
1268 * Iterate thru all the inodes in the file system,
1269 * adjusting the corresponding dquot counters in core.
1270 */
1271 error = xfs_bulkstat(mp, &lastino, &count,
1272 xfs_qm_dqusage_adjust,
1273 structsz, NULL, &done);
1274 if (error)
1275 break;
1276
1277 } while (!done);
1278
1279 /*
1280 * We've made all the changes that we need to make incore. Flush them
1281 * down to disk buffers if everything was updated successfully.
1282 */
1283 if (XFS_IS_UQUOTA_ON(mp)) {
1284 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1285 &buffer_list);
1286 }
1287 if (XFS_IS_GQUOTA_ON(mp)) {
1288 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1289 &buffer_list);
1290 if (!error)
1291 error = error2;
1292 }
1293 if (XFS_IS_PQUOTA_ON(mp)) {
1294 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1295 &buffer_list);
1296 if (!error)
1297 error = error2;
1298 }
1299
1300 error2 = xfs_buf_delwri_submit(&buffer_list);
1301 if (!error)
1302 error = error2;
1303
1304 /*
1305 * We can get this error if we couldn't do a dquot allocation inside
1306 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1307 * dirty dquots that might be cached, we just want to get rid of them
1308 * and turn quotaoff. The dquots won't be attached to any of the inodes
1309 * at this point (because we intentionally didn't in dqget_noattach).
1310 */
1311 if (error) {
1312 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1313 goto error_return;
1314 }
1315
1316 /*
1317 * If one type of quotas is off, then it will lose its
1318 * quotachecked status, since we won't be doing accounting for
1319 * that type anymore.
1320 */
1321 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1322 mp->m_qflags |= flags;
1323
1324 error_return:
1325 while (!list_empty(&buffer_list)) {
1326 struct xfs_buf *bp =
1327 list_first_entry(&buffer_list, struct xfs_buf, b_list);
1328 list_del_init(&bp->b_list);
1329 xfs_buf_relse(bp);
1330 }
1331
1332 if (error) {
1333 xfs_warn(mp,
1334 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1335 error);
1336 /*
1337 * We must turn off quotas.
1338 */
1339 ASSERT(mp->m_quotainfo != NULL);
1340 xfs_qm_destroy_quotainfo(mp);
1341 if (xfs_mount_reset_sbqflags(mp)) {
1342 xfs_warn(mp,
1343 "Quotacheck: Failed to reset quota flags.");
1344 }
1345 } else
1346 xfs_notice(mp, "Quotacheck: Done.");
1347 return (error);
1348}
1349
1350/*
1351 * This is called after the superblock has been read in and we're ready to
1352 * iget the quota inodes.
1353 */
1354STATIC int
1355xfs_qm_init_quotainos(
1356 xfs_mount_t *mp)
1357{
1358 xfs_inode_t *uip, *gip;
1359 int error;
1360 __int64_t sbflags;
1361 uint flags;
1362
1363 ASSERT(mp->m_quotainfo);
1364 uip = gip = NULL;
1365 sbflags = 0;
1366 flags = 0;
1367
1368 /*
1369 * Get the uquota and gquota inodes
1370 */
1371 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1372 if (XFS_IS_UQUOTA_ON(mp) &&
1373 mp->m_sb.sb_uquotino != NULLFSINO) {
1374 ASSERT(mp->m_sb.sb_uquotino > 0);
1375 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1376 0, 0, &uip)))
1377 return XFS_ERROR(error);
1378 }
1379 if (XFS_IS_OQUOTA_ON(mp) &&
1380 mp->m_sb.sb_gquotino != NULLFSINO) {
1381 ASSERT(mp->m_sb.sb_gquotino > 0);
1382 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1383 0, 0, &gip))) {
1384 if (uip)
1385 IRELE(uip);
1386 return XFS_ERROR(error);
1387 }
1388 }
1389 } else {
1390 flags |= XFS_QMOPT_SBVERSION;
1391 sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1392 XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
1393 }
1394
1395 /*
1396 * Create the two inodes, if they don't exist already. The changes
1397 * made above will get added to a transaction and logged in one of
1398 * the qino_alloc calls below. If the device is readonly,
1399 * temporarily switch to read-write to do this.
1400 */
1401 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1402 if ((error = xfs_qm_qino_alloc(mp, &uip,
1403 sbflags | XFS_SB_UQUOTINO,
1404 flags | XFS_QMOPT_UQUOTA)))
1405 return XFS_ERROR(error);
1406
1407 flags &= ~XFS_QMOPT_SBVERSION;
1408 }
1409 if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
1410 flags |= (XFS_IS_GQUOTA_ON(mp) ?
1411 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
1412 error = xfs_qm_qino_alloc(mp, &gip,
1413 sbflags | XFS_SB_GQUOTINO, flags);
1414 if (error) {
1415 if (uip)
1416 IRELE(uip);
1417
1418 return XFS_ERROR(error);
1419 }
1420 }
1421
1422 mp->m_quotainfo->qi_uquotaip = uip;
1423 mp->m_quotainfo->qi_gquotaip = gip;
1424
1425 return 0;
1426}
1427
1428STATIC void
1429xfs_qm_dqfree_one(
1430 struct xfs_dquot *dqp)
1431{
1432 struct xfs_mount *mp = dqp->q_mount;
1433 struct xfs_quotainfo *qi = mp->m_quotainfo;
1434
1435 mutex_lock(&qi->qi_tree_lock);
1436 radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
1437 be32_to_cpu(dqp->q_core.d_id));
1438
1439 qi->qi_dquots--;
1440 mutex_unlock(&qi->qi_tree_lock);
1441
1442 xfs_qm_dqdestroy(dqp);
1443}
1444
1445STATIC void
1446xfs_qm_dqreclaim_one(
1447 struct xfs_dquot *dqp,
1448 struct list_head *buffer_list,
1449 struct list_head *dispose_list)
1450{
1451 struct xfs_mount *mp = dqp->q_mount;
1452 struct xfs_quotainfo *qi = mp->m_quotainfo;
1453 int error;
1454
1455 if (!xfs_dqlock_nowait(dqp))
1456 goto out_busy;
1457
1458 /*
1459 * This dquot has acquired a reference in the meantime remove it from
1460 * the freelist and try again.
1461 */
1462 if (dqp->q_nrefs) {
1463 xfs_dqunlock(dqp);
1464
1465 trace_xfs_dqreclaim_want(dqp);
1466 XFS_STATS_INC(xs_qm_dqwants);
1467
1468 list_del_init(&dqp->q_lru);
1469 qi->qi_lru_count--;
1470 XFS_STATS_DEC(xs_qm_dquot_unused);
1471 return;
1472 }
1473
1474 /*
1475 * Try to grab the flush lock. If this dquot is in the process of
1476 * getting flushed to disk, we don't want to reclaim it.
1477 */
1478 if (!xfs_dqflock_nowait(dqp))
1479 goto out_busy;
1480
1481 if (XFS_DQ_IS_DIRTY(dqp)) {
1482 struct xfs_buf *bp = NULL;
1483
1484 trace_xfs_dqreclaim_dirty(dqp);
1485
1486 error = xfs_qm_dqflush(dqp, &bp);
1487 if (error) {
1488 xfs_warn(mp, "%s: dquot %p flush failed",
1489 __func__, dqp);
1490 goto out_busy;
1491 }
1492
1493 xfs_buf_delwri_queue(bp, buffer_list);
1494 xfs_buf_relse(bp);
1495 /*
1496 * Give the dquot another try on the freelist, as the
1497 * flushing will take some time.
1498 */
1499 goto out_busy;
1500 }
1501 xfs_dqfunlock(dqp);
1502
1503 /*
1504 * Prevent lookups now that we are past the point of no return.
1505 */
1506 dqp->dq_flags |= XFS_DQ_FREEING;
1507 xfs_dqunlock(dqp);
1508
1509 ASSERT(dqp->q_nrefs == 0);
1510 list_move_tail(&dqp->q_lru, dispose_list);
1511 qi->qi_lru_count--;
1512 XFS_STATS_DEC(xs_qm_dquot_unused);
1513
1514 trace_xfs_dqreclaim_done(dqp);
1515 XFS_STATS_INC(xs_qm_dqreclaims);
1516 return;
1517
1518out_busy:
1519 xfs_dqunlock(dqp);
1520
1521 /*
1522 * Move the dquot to the tail of the list so that we don't spin on it.
1523 */
1524 list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
1525
1526 trace_xfs_dqreclaim_busy(dqp);
1527 XFS_STATS_INC(xs_qm_dqreclaim_misses);
1528}
1529
1530STATIC int
1531xfs_qm_shake(
1532 struct shrinker *shrink,
1533 struct shrink_control *sc)
1534{
1535 struct xfs_quotainfo *qi =
1536 container_of(shrink, struct xfs_quotainfo, qi_shrinker);
1537 int nr_to_scan = sc->nr_to_scan;
1538 LIST_HEAD (buffer_list);
1539 LIST_HEAD (dispose_list);
1540 struct xfs_dquot *dqp;
1541 int error;
1542
1543 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
1544 return 0;
1545 if (!nr_to_scan)
1546 goto out;
1547
1548 mutex_lock(&qi->qi_lru_lock);
1549 while (!list_empty(&qi->qi_lru_list)) {
1550 if (nr_to_scan-- <= 0)
1551 break;
1552 dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot,
1553 q_lru);
1554 xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list);
1555 }
1556 mutex_unlock(&qi->qi_lru_lock);
1557
1558 error = xfs_buf_delwri_submit(&buffer_list);
1559 if (error)
1560 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
1561
1562 while (!list_empty(&dispose_list)) {
1563 dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru);
1564 list_del_init(&dqp->q_lru);
1565 xfs_qm_dqfree_one(dqp);
1566 }
1567
1568out:
1569 return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure;
1570}
1571
1572/*
1573 * Start a transaction and write the incore superblock changes to
1574 * disk. flags parameter indicates which fields have changed.
1575 */
1576int
1577xfs_qm_write_sb_changes(
1578 xfs_mount_t *mp,
1579 __int64_t flags)
1580{
1581 xfs_trans_t *tp;
1582 int error;
1583
1584 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1585 if ((error = xfs_trans_reserve(tp, 0,
1586 mp->m_sb.sb_sectsize + 128, 0,
1587 0,
1588 XFS_DEFAULT_LOG_COUNT))) {
1589 xfs_trans_cancel(tp, 0);
1590 return error;
1591 }
1592
1593 xfs_mod_sb(tp, flags);
1594 error = xfs_trans_commit(tp, 0);
1595
1596 return error;
1597}
1598
1599
1600/* --------------- utility functions for vnodeops ---------------- */
1601
1602
1603/*
1604 * Given an inode, a uid, gid and prid make sure that we have
1605 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1606 * quotas by creating this file.
1607 * This also attaches dquot(s) to the given inode after locking it,
1608 * and returns the dquots corresponding to the uid and/or gid.
1609 *
1610 * in : inode (unlocked)
1611 * out : udquot, gdquot with references taken and unlocked
1612 */
1613int
1614xfs_qm_vop_dqalloc(
1615 struct xfs_inode *ip,
1616 uid_t uid,
1617 gid_t gid,
1618 prid_t prid,
1619 uint flags,
1620 struct xfs_dquot **O_udqpp,
1621 struct xfs_dquot **O_gdqpp)
1622{
1623 struct xfs_mount *mp = ip->i_mount;
1624 struct xfs_dquot *uq, *gq;
1625 int error;
1626 uint lockflags;
1627
1628 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1629 return 0;
1630
1631 lockflags = XFS_ILOCK_EXCL;
1632 xfs_ilock(ip, lockflags);
1633
1634 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1635 gid = ip->i_d.di_gid;
1636
1637 /*
1638 * Attach the dquot(s) to this inode, doing a dquot allocation
1639 * if necessary. The dquot(s) will not be locked.
1640 */
1641 if (XFS_NOT_DQATTACHED(mp, ip)) {
1642 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1643 if (error) {
1644 xfs_iunlock(ip, lockflags);
1645 return error;
1646 }
1647 }
1648
1649 uq = gq = NULL;
1650 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1651 if (ip->i_d.di_uid != uid) {
1652 /*
1653 * What we need is the dquot that has this uid, and
1654 * if we send the inode to dqget, the uid of the inode
1655 * takes priority over what's sent in the uid argument.
1656 * We must unlock inode here before calling dqget if
1657 * we're not sending the inode, because otherwise
1658 * we'll deadlock by doing trans_reserve while
1659 * holding ilock.
1660 */
1661 xfs_iunlock(ip, lockflags);
1662 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
1663 XFS_DQ_USER,
1664 XFS_QMOPT_DQALLOC |
1665 XFS_QMOPT_DOWARN,
1666 &uq))) {
1667 ASSERT(error != ENOENT);
1668 return error;
1669 }
1670 /*
1671 * Get the ilock in the right order.
1672 */
1673 xfs_dqunlock(uq);
1674 lockflags = XFS_ILOCK_SHARED;
1675 xfs_ilock(ip, lockflags);
1676 } else {
1677 /*
1678 * Take an extra reference, because we'll return
1679 * this to caller
1680 */
1681 ASSERT(ip->i_udquot);
1682 uq = xfs_qm_dqhold(ip->i_udquot);
1683 }
1684 }
1685 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1686 if (ip->i_d.di_gid != gid) {
1687 xfs_iunlock(ip, lockflags);
1688 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
1689 XFS_DQ_GROUP,
1690 XFS_QMOPT_DQALLOC |
1691 XFS_QMOPT_DOWARN,
1692 &gq))) {
1693 if (uq)
1694 xfs_qm_dqrele(uq);
1695 ASSERT(error != ENOENT);
1696 return error;
1697 }
1698 xfs_dqunlock(gq);
1699 lockflags = XFS_ILOCK_SHARED;
1700 xfs_ilock(ip, lockflags);
1701 } else {
1702 ASSERT(ip->i_gdquot);
1703 gq = xfs_qm_dqhold(ip->i_gdquot);
1704 }
1705 } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1706 if (xfs_get_projid(ip) != prid) {
1707 xfs_iunlock(ip, lockflags);
1708 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1709 XFS_DQ_PROJ,
1710 XFS_QMOPT_DQALLOC |
1711 XFS_QMOPT_DOWARN,
1712 &gq))) {
1713 if (uq)
1714 xfs_qm_dqrele(uq);
1715 ASSERT(error != ENOENT);
1716 return (error);
1717 }
1718 xfs_dqunlock(gq);
1719 lockflags = XFS_ILOCK_SHARED;
1720 xfs_ilock(ip, lockflags);
1721 } else {
1722 ASSERT(ip->i_gdquot);
1723 gq = xfs_qm_dqhold(ip->i_gdquot);
1724 }
1725 }
1726 if (uq)
1727 trace_xfs_dquot_dqalloc(ip);
1728
1729 xfs_iunlock(ip, lockflags);
1730 if (O_udqpp)
1731 *O_udqpp = uq;
1732 else if (uq)
1733 xfs_qm_dqrele(uq);
1734 if (O_gdqpp)
1735 *O_gdqpp = gq;
1736 else if (gq)
1737 xfs_qm_dqrele(gq);
1738 return 0;
1739}
1740
1741/*
1742 * Actually transfer ownership, and do dquot modifications.
1743 * These were already reserved.
1744 */
1745xfs_dquot_t *
1746xfs_qm_vop_chown(
1747 xfs_trans_t *tp,
1748 xfs_inode_t *ip,
1749 xfs_dquot_t **IO_olddq,
1750 xfs_dquot_t *newdq)
1751{
1752 xfs_dquot_t *prevdq;
1753 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1754 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1755
1756
1757 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1758 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1759
1760 /* old dquot */
1761 prevdq = *IO_olddq;
1762 ASSERT(prevdq);
1763 ASSERT(prevdq != newdq);
1764
1765 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1766 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1767
1768 /* the sparkling new dquot */
1769 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1770 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1771
1772 /*
1773 * Take an extra reference, because the inode is going to keep
1774 * this dquot pointer even after the trans_commit.
1775 */
1776 *IO_olddq = xfs_qm_dqhold(newdq);
1777
1778 return prevdq;
1779}
1780
1781/*
1782 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1783 */
1784int
1785xfs_qm_vop_chown_reserve(
1786 xfs_trans_t *tp,
1787 xfs_inode_t *ip,
1788 xfs_dquot_t *udqp,
1789 xfs_dquot_t *gdqp,
1790 uint flags)
1791{
1792 xfs_mount_t *mp = ip->i_mount;
1793 uint delblks, blkflags, prjflags = 0;
1794 xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
1795 int error;
1796
1797
1798 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1799 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1800
1801 delblks = ip->i_delayed_blks;
1802 delblksudq = delblksgdq = unresudq = unresgdq = NULL;
1803 blkflags = XFS_IS_REALTIME_INODE(ip) ?
1804 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1805
1806 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1807 ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
1808 delblksudq = udqp;
1809 /*
1810 * If there are delayed allocation blocks, then we have to
1811 * unreserve those from the old dquot, and add them to the
1812 * new dquot.
1813 */
1814 if (delblks) {
1815 ASSERT(ip->i_udquot);
1816 unresudq = ip->i_udquot;
1817 }
1818 }
1819 if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
1820 if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
1821 xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
1822 prjflags = XFS_QMOPT_ENOSPC;
1823
1824 if (prjflags ||
1825 (XFS_IS_GQUOTA_ON(ip->i_mount) &&
1826 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
1827 delblksgdq = gdqp;
1828 if (delblks) {
1829 ASSERT(ip->i_gdquot);
1830 unresgdq = ip->i_gdquot;
1831 }
1832 }
1833 }
1834
1835 if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1836 delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
1837 flags | blkflags | prjflags)))
1838 return (error);
1839
1840 /*
1841 * Do the delayed blks reservations/unreservations now. Since, these
1842 * are done without the help of a transaction, if a reservation fails
1843 * its previous reservations won't be automatically undone by trans
1844 * code. So, we have to do it manually here.
1845 */
1846 if (delblks) {
1847 /*
1848 * Do the reservations first. Unreservation can't fail.
1849 */
1850 ASSERT(delblksudq || delblksgdq);
1851 ASSERT(unresudq || unresgdq);
1852 if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1853 delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
1854 flags | blkflags | prjflags)))
1855 return (error);
1856 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1857 unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
1858 blkflags);
1859 }
1860
1861 return (0);
1862}
1863
1864int
1865xfs_qm_vop_rename_dqattach(
1866 struct xfs_inode **i_tab)
1867{
1868 struct xfs_mount *mp = i_tab[0]->i_mount;
1869 int i;
1870
1871 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1872 return 0;
1873
1874 for (i = 0; (i < 4 && i_tab[i]); i++) {
1875 struct xfs_inode *ip = i_tab[i];
1876 int error;
1877
1878 /*
1879 * Watch out for duplicate entries in the table.
1880 */
1881 if (i == 0 || ip != i_tab[i-1]) {
1882 if (XFS_NOT_DQATTACHED(mp, ip)) {
1883 error = xfs_qm_dqattach(ip, 0);
1884 if (error)
1885 return error;
1886 }
1887 }
1888 }
1889 return 0;
1890}
1891
1892void
1893xfs_qm_vop_create_dqattach(
1894 struct xfs_trans *tp,
1895 struct xfs_inode *ip,
1896 struct xfs_dquot *udqp,
1897 struct xfs_dquot *gdqp)
1898{
1899 struct xfs_mount *mp = tp->t_mountp;
1900
1901 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1902 return;
1903
1904 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1905 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1906
1907 if (udqp) {
1908 ASSERT(ip->i_udquot == NULL);
1909 ASSERT(XFS_IS_UQUOTA_ON(mp));
1910 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1911
1912 ip->i_udquot = xfs_qm_dqhold(udqp);
1913 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1914 }
1915 if (gdqp) {
1916 ASSERT(ip->i_gdquot == NULL);
1917 ASSERT(XFS_IS_OQUOTA_ON(mp));
1918 ASSERT((XFS_IS_GQUOTA_ON(mp) ?
1919 ip->i_d.di_gid : xfs_get_projid(ip)) ==
1920 be32_to_cpu(gdqp->q_core.d_id));
1921
1922 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1923 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1924 }
1925}
1926