Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_sb.h"
14#include "xfs_mount.h"
15#include "xfs_inode.h"
16#include "xfs_iwalk.h"
17#include "xfs_quota.h"
18#include "xfs_bmap.h"
19#include "xfs_bmap_util.h"
20#include "xfs_trans.h"
21#include "xfs_trans_space.h"
22#include "xfs_qm.h"
23#include "xfs_trace.h"
24#include "xfs_icache.h"
25#include "xfs_error.h"
26#include "xfs_ag.h"
27#include "xfs_ialloc.h"
28#include "xfs_log_priv.h"
29#include "xfs_health.h"
30#include "xfs_da_format.h"
31#include "xfs_metafile.h"
32#include "xfs_rtgroup.h"
33
34/*
35 * The global quota manager. There is only one of these for the entire
36 * system, _not_ one per file system. XQM keeps track of the overall
37 * quota functionality, including maintaining the freelist and hash
38 * tables of dquots.
39 */
40STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
41STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
42
43STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
44/*
45 * We use the batch lookup interface to iterate over the dquots as it
46 * currently is the only interface into the radix tree code that allows
47 * fuzzy lookups instead of exact matches. Holding the lock over multiple
48 * operations is fine as all callers are used either during mount/umount
49 * or quotaoff.
50 */
51#define XFS_DQ_LOOKUP_BATCH 32
52
53STATIC int
54xfs_qm_dquot_walk(
55 struct xfs_mount *mp,
56 xfs_dqtype_t type,
57 int (*execute)(struct xfs_dquot *dqp, void *data),
58 void *data)
59{
60 struct xfs_quotainfo *qi = mp->m_quotainfo;
61 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
62 uint32_t next_index;
63 int last_error = 0;
64 int skipped;
65 int nr_found;
66
67restart:
68 skipped = 0;
69 next_index = 0;
70 nr_found = 0;
71
72 while (1) {
73 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
74 int error;
75 int i;
76
77 mutex_lock(&qi->qi_tree_lock);
78 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
79 next_index, XFS_DQ_LOOKUP_BATCH);
80 if (!nr_found) {
81 mutex_unlock(&qi->qi_tree_lock);
82 break;
83 }
84
85 for (i = 0; i < nr_found; i++) {
86 struct xfs_dquot *dqp = batch[i];
87
88 next_index = dqp->q_id + 1;
89
90 error = execute(batch[i], data);
91 if (error == -EAGAIN) {
92 skipped++;
93 continue;
94 }
95 if (error && last_error != -EFSCORRUPTED)
96 last_error = error;
97 }
98
99 mutex_unlock(&qi->qi_tree_lock);
100
101 /* bail out if the filesystem is corrupted. */
102 if (last_error == -EFSCORRUPTED) {
103 skipped = 0;
104 break;
105 }
106 /* we're done if id overflows back to zero */
107 if (!next_index)
108 break;
109 }
110
111 if (skipped) {
112 delay(1);
113 goto restart;
114 }
115
116 return last_error;
117}
118
119
120/*
121 * Purge a dquot from all tracking data structures and free it.
122 */
123STATIC int
124xfs_qm_dqpurge(
125 struct xfs_dquot *dqp,
126 void *data)
127{
128 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
129 int error = -EAGAIN;
130
131 xfs_dqlock(dqp);
132 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
133 goto out_unlock;
134
135 dqp->q_flags |= XFS_DQFLAG_FREEING;
136
137 xfs_dqflock(dqp);
138
139 /*
140 * If we are turning this type of quotas off, we don't care
141 * about the dirty metadata sitting in this dquot. OTOH, if
142 * we're unmounting, we do care, so we flush it and wait.
143 */
144 if (XFS_DQ_IS_DIRTY(dqp)) {
145 struct xfs_buf *bp = NULL;
146
147 /*
148 * We don't care about getting disk errors here. We need
149 * to purge this dquot anyway, so we go ahead regardless.
150 */
151 error = xfs_dquot_use_attached_buf(dqp, &bp);
152 if (error == -EAGAIN) {
153 xfs_dqfunlock(dqp);
154 dqp->q_flags &= ~XFS_DQFLAG_FREEING;
155 goto out_unlock;
156 }
157 if (!bp)
158 goto out_funlock;
159
160 /*
161 * dqflush completes dqflock on error, and the bwrite ioend
162 * does it on success.
163 */
164 error = xfs_qm_dqflush(dqp, bp);
165 if (!error) {
166 error = xfs_bwrite(bp);
167 xfs_buf_relse(bp);
168 }
169 xfs_dqflock(dqp);
170 }
171 xfs_dquot_detach_buf(dqp);
172
173out_funlock:
174 ASSERT(atomic_read(&dqp->q_pincount) == 0);
175 ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
176 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
177
178 xfs_dqfunlock(dqp);
179 xfs_dqunlock(dqp);
180
181 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
182 qi->qi_dquots--;
183
184 /*
185 * We move dquots to the freelist as soon as their reference count
186 * hits zero, so it really should be on the freelist here.
187 */
188 ASSERT(!list_empty(&dqp->q_lru));
189 list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
190 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
191
192 xfs_qm_dqdestroy(dqp);
193 return 0;
194
195out_unlock:
196 xfs_dqunlock(dqp);
197 return error;
198}
199
200/*
201 * Purge the dquot cache.
202 */
203static void
204xfs_qm_dqpurge_all(
205 struct xfs_mount *mp)
206{
207 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
208 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
209 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
210}
211
212/*
213 * Just destroy the quotainfo structure.
214 */
215void
216xfs_qm_unmount(
217 struct xfs_mount *mp)
218{
219 if (mp->m_quotainfo) {
220 xfs_qm_dqpurge_all(mp);
221 xfs_qm_destroy_quotainfo(mp);
222 }
223}
224
225static void
226xfs_qm_unmount_rt(
227 struct xfs_mount *mp)
228{
229 struct xfs_rtgroup *rtg = xfs_rtgroup_grab(mp, 0);
230
231 if (!rtg)
232 return;
233 if (rtg->rtg_inodes[XFS_RTGI_BITMAP])
234 xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_BITMAP]);
235 if (rtg->rtg_inodes[XFS_RTGI_SUMMARY])
236 xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_SUMMARY]);
237 xfs_rtgroup_rele(rtg);
238}
239
240STATIC void
241xfs_qm_destroy_quotainos(
242 struct xfs_quotainfo *qi)
243{
244 if (qi->qi_uquotaip) {
245 xfs_irele(qi->qi_uquotaip);
246 qi->qi_uquotaip = NULL; /* paranoia */
247 }
248 if (qi->qi_gquotaip) {
249 xfs_irele(qi->qi_gquotaip);
250 qi->qi_gquotaip = NULL;
251 }
252 if (qi->qi_pquotaip) {
253 xfs_irele(qi->qi_pquotaip);
254 qi->qi_pquotaip = NULL;
255 }
256 if (qi->qi_dirip) {
257 xfs_irele(qi->qi_dirip);
258 qi->qi_dirip = NULL;
259 }
260}
261
262/*
263 * Called from the vfsops layer.
264 */
265void
266xfs_qm_unmount_quotas(
267 xfs_mount_t *mp)
268{
269 /*
270 * Release the dquots that root inode, et al might be holding,
271 * before we flush quotas and blow away the quotainfo structure.
272 */
273 ASSERT(mp->m_rootip);
274 xfs_qm_dqdetach(mp->m_rootip);
275
276 /*
277 * For pre-RTG file systems, the RT inodes have quotas attached,
278 * detach them now.
279 */
280 if (!xfs_has_rtgroups(mp))
281 xfs_qm_unmount_rt(mp);
282
283 /*
284 * Release the quota inodes.
285 */
286 if (mp->m_quotainfo)
287 xfs_qm_destroy_quotainos(mp->m_quotainfo);
288}
289
290STATIC int
291xfs_qm_dqattach_one(
292 struct xfs_inode *ip,
293 xfs_dqtype_t type,
294 bool doalloc,
295 struct xfs_dquot **IO_idqpp)
296{
297 struct xfs_dquot *dqp;
298 int error;
299
300 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
301 error = 0;
302
303 /*
304 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
305 * or &i_gdquot. This made the code look weird, but made the logic a lot
306 * simpler.
307 */
308 dqp = *IO_idqpp;
309 if (dqp) {
310 trace_xfs_dqattach_found(dqp);
311 return 0;
312 }
313
314 /*
315 * Find the dquot from somewhere. This bumps the reference count of
316 * dquot and returns it locked. This can return ENOENT if dquot didn't
317 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
318 * turned off suddenly.
319 */
320 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
321 if (error)
322 return error;
323
324 trace_xfs_dqattach_get(dqp);
325
326 /*
327 * dqget may have dropped and re-acquired the ilock, but it guarantees
328 * that the dquot returned is the one that should go in the inode.
329 */
330 *IO_idqpp = dqp;
331 xfs_dqunlock(dqp);
332 return 0;
333}
334
335static bool
336xfs_qm_need_dqattach(
337 struct xfs_inode *ip)
338{
339 struct xfs_mount *mp = ip->i_mount;
340
341 if (!XFS_IS_QUOTA_ON(mp))
342 return false;
343 if (!XFS_NOT_DQATTACHED(mp, ip))
344 return false;
345 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
346 return false;
347 if (xfs_is_metadir_inode(ip))
348 return false;
349 return true;
350}
351
352/*
353 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
354 * into account.
355 * If @doalloc is true, the dquot(s) will be allocated if needed.
356 * Inode may get unlocked and relocked in here, and the caller must deal with
357 * the consequences.
358 */
359int
360xfs_qm_dqattach_locked(
361 xfs_inode_t *ip,
362 bool doalloc)
363{
364 xfs_mount_t *mp = ip->i_mount;
365 int error = 0;
366
367 if (!xfs_qm_need_dqattach(ip))
368 return 0;
369
370 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
371 ASSERT(!xfs_is_metadir_inode(ip));
372
373 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
374 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
375 doalloc, &ip->i_udquot);
376 if (error)
377 goto done;
378 ASSERT(ip->i_udquot);
379 }
380
381 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
382 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
383 doalloc, &ip->i_gdquot);
384 if (error)
385 goto done;
386 ASSERT(ip->i_gdquot);
387 }
388
389 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
390 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
391 doalloc, &ip->i_pdquot);
392 if (error)
393 goto done;
394 ASSERT(ip->i_pdquot);
395 }
396
397done:
398 /*
399 * Don't worry about the dquots that we may have attached before any
400 * error - they'll get detached later if it has not already been done.
401 */
402 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
403 return error;
404}
405
406int
407xfs_qm_dqattach(
408 struct xfs_inode *ip)
409{
410 int error;
411
412 if (!xfs_qm_need_dqattach(ip))
413 return 0;
414
415 xfs_ilock(ip, XFS_ILOCK_EXCL);
416 error = xfs_qm_dqattach_locked(ip, false);
417 xfs_iunlock(ip, XFS_ILOCK_EXCL);
418
419 return error;
420}
421
422/*
423 * Release dquots (and their references) if any.
424 * The inode should be locked EXCL except when this's called by
425 * xfs_ireclaim.
426 */
427void
428xfs_qm_dqdetach(
429 xfs_inode_t *ip)
430{
431 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
432 return;
433
434 trace_xfs_dquot_dqdetach(ip);
435
436 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
437 if (ip->i_udquot) {
438 xfs_qm_dqrele(ip->i_udquot);
439 ip->i_udquot = NULL;
440 }
441 if (ip->i_gdquot) {
442 xfs_qm_dqrele(ip->i_gdquot);
443 ip->i_gdquot = NULL;
444 }
445 if (ip->i_pdquot) {
446 xfs_qm_dqrele(ip->i_pdquot);
447 ip->i_pdquot = NULL;
448 }
449}
450
451struct xfs_qm_isolate {
452 struct list_head buffers;
453 struct list_head dispose;
454};
455
456static enum lru_status
457xfs_qm_dquot_isolate(
458 struct list_head *item,
459 struct list_lru_one *lru,
460 void *arg)
461 __releases(&lru->lock) __acquires(&lru->lock)
462{
463 struct xfs_dquot *dqp = container_of(item,
464 struct xfs_dquot, q_lru);
465 struct xfs_qm_isolate *isol = arg;
466
467 if (!xfs_dqlock_nowait(dqp))
468 goto out_miss_busy;
469
470 /*
471 * If something else is freeing this dquot and hasn't yet removed it
472 * from the LRU, leave it for the freeing task to complete the freeing
473 * process rather than risk it being free from under us here.
474 */
475 if (dqp->q_flags & XFS_DQFLAG_FREEING)
476 goto out_miss_unlock;
477
478 /*
479 * This dquot has acquired a reference in the meantime remove it from
480 * the freelist and try again.
481 */
482 if (dqp->q_nrefs) {
483 xfs_dqunlock(dqp);
484 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
485
486 trace_xfs_dqreclaim_want(dqp);
487 list_lru_isolate(lru, &dqp->q_lru);
488 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
489 return LRU_REMOVED;
490 }
491
492 /*
493 * If the dquot is dirty, flush it. If it's already being flushed, just
494 * skip it so there is time for the IO to complete before we try to
495 * reclaim it again on the next LRU pass.
496 */
497 if (!xfs_dqflock_nowait(dqp))
498 goto out_miss_unlock;
499
500 if (XFS_DQ_IS_DIRTY(dqp)) {
501 struct xfs_buf *bp = NULL;
502 int error;
503
504 trace_xfs_dqreclaim_dirty(dqp);
505
506 /* we have to drop the LRU lock to flush the dquot */
507 spin_unlock(&lru->lock);
508
509 error = xfs_dquot_use_attached_buf(dqp, &bp);
510 if (!bp || error == -EAGAIN) {
511 xfs_dqfunlock(dqp);
512 goto out_unlock_dirty;
513 }
514
515 /*
516 * dqflush completes dqflock on error, and the delwri ioend
517 * does it on success.
518 */
519 error = xfs_qm_dqflush(dqp, bp);
520 if (error)
521 goto out_unlock_dirty;
522
523 xfs_buf_delwri_queue(bp, &isol->buffers);
524 xfs_buf_relse(bp);
525 goto out_unlock_dirty;
526 }
527
528 xfs_dquot_detach_buf(dqp);
529 xfs_dqfunlock(dqp);
530
531 /*
532 * Prevent lookups now that we are past the point of no return.
533 */
534 dqp->q_flags |= XFS_DQFLAG_FREEING;
535 xfs_dqunlock(dqp);
536
537 ASSERT(dqp->q_nrefs == 0);
538 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
539 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
540 trace_xfs_dqreclaim_done(dqp);
541 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
542 return LRU_REMOVED;
543
544out_miss_unlock:
545 xfs_dqunlock(dqp);
546out_miss_busy:
547 trace_xfs_dqreclaim_busy(dqp);
548 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
549 return LRU_SKIP;
550
551out_unlock_dirty:
552 trace_xfs_dqreclaim_busy(dqp);
553 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
554 xfs_dqunlock(dqp);
555 return LRU_RETRY;
556}
557
558static unsigned long
559xfs_qm_shrink_scan(
560 struct shrinker *shrink,
561 struct shrink_control *sc)
562{
563 struct xfs_quotainfo *qi = shrink->private_data;
564 struct xfs_qm_isolate isol;
565 unsigned long freed;
566 int error;
567
568 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
569 return 0;
570
571 INIT_LIST_HEAD(&isol.buffers);
572 INIT_LIST_HEAD(&isol.dispose);
573
574 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
575 xfs_qm_dquot_isolate, &isol);
576
577 error = xfs_buf_delwri_submit(&isol.buffers);
578 if (error)
579 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
580
581 while (!list_empty(&isol.dispose)) {
582 struct xfs_dquot *dqp;
583
584 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
585 list_del_init(&dqp->q_lru);
586 xfs_qm_dqfree_one(dqp);
587 }
588
589 return freed;
590}
591
592static unsigned long
593xfs_qm_shrink_count(
594 struct shrinker *shrink,
595 struct shrink_control *sc)
596{
597 struct xfs_quotainfo *qi = shrink->private_data;
598
599 return list_lru_shrink_count(&qi->qi_lru, sc);
600}
601
602STATIC void
603xfs_qm_set_defquota(
604 struct xfs_mount *mp,
605 xfs_dqtype_t type,
606 struct xfs_quotainfo *qinf)
607{
608 struct xfs_dquot *dqp;
609 struct xfs_def_quota *defq;
610 int error;
611
612 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
613 if (error)
614 return;
615
616 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
617
618 /*
619 * Timers and warnings have been already set, let's just set the
620 * default limits for this quota type
621 */
622 defq->blk.hard = dqp->q_blk.hardlimit;
623 defq->blk.soft = dqp->q_blk.softlimit;
624 defq->ino.hard = dqp->q_ino.hardlimit;
625 defq->ino.soft = dqp->q_ino.softlimit;
626 defq->rtb.hard = dqp->q_rtb.hardlimit;
627 defq->rtb.soft = dqp->q_rtb.softlimit;
628 xfs_qm_dqdestroy(dqp);
629}
630
631/* Initialize quota time limits from the root dquot. */
632static void
633xfs_qm_init_timelimits(
634 struct xfs_mount *mp,
635 xfs_dqtype_t type)
636{
637 struct xfs_quotainfo *qinf = mp->m_quotainfo;
638 struct xfs_def_quota *defq;
639 struct xfs_dquot *dqp;
640 int error;
641
642 defq = xfs_get_defquota(qinf, type);
643
644 defq->blk.time = XFS_QM_BTIMELIMIT;
645 defq->ino.time = XFS_QM_ITIMELIMIT;
646 defq->rtb.time = XFS_QM_RTBTIMELIMIT;
647
648 /*
649 * We try to get the limits from the superuser's limits fields.
650 * This is quite hacky, but it is standard quota practice.
651 *
652 * Since we may not have done a quotacheck by this point, just read
653 * the dquot without attaching it to any hashtables or lists.
654 */
655 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
656 if (error)
657 return;
658
659 /*
660 * The warnings and timers set the grace period given to
661 * a user or group before he or she can not perform any
662 * more writing. If it is zero, a default is used.
663 */
664 if (dqp->q_blk.timer)
665 defq->blk.time = dqp->q_blk.timer;
666 if (dqp->q_ino.timer)
667 defq->ino.time = dqp->q_ino.timer;
668 if (dqp->q_rtb.timer)
669 defq->rtb.time = dqp->q_rtb.timer;
670
671 xfs_qm_dqdestroy(dqp);
672}
673
674static int
675xfs_qm_load_metadir_qinos(
676 struct xfs_mount *mp,
677 struct xfs_quotainfo *qi)
678{
679 struct xfs_trans *tp;
680 int error;
681
682 error = xfs_trans_alloc_empty(mp, &tp);
683 if (error)
684 return error;
685
686 error = xfs_dqinode_load_parent(tp, &qi->qi_dirip);
687 if (error == -ENOENT) {
688 /* no quota dir directory, but we'll create one later */
689 error = 0;
690 goto out_trans;
691 }
692 if (error)
693 goto out_trans;
694
695 if (XFS_IS_UQUOTA_ON(mp)) {
696 error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_USER,
697 &qi->qi_uquotaip);
698 if (error && error != -ENOENT)
699 goto out_trans;
700 }
701
702 if (XFS_IS_GQUOTA_ON(mp)) {
703 error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_GROUP,
704 &qi->qi_gquotaip);
705 if (error && error != -ENOENT)
706 goto out_trans;
707 }
708
709 if (XFS_IS_PQUOTA_ON(mp)) {
710 error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_PROJ,
711 &qi->qi_pquotaip);
712 if (error && error != -ENOENT)
713 goto out_trans;
714 }
715
716 error = 0;
717out_trans:
718 xfs_trans_cancel(tp);
719 return error;
720}
721
722/* Create quota inodes in the metadata directory tree. */
723STATIC int
724xfs_qm_create_metadir_qinos(
725 struct xfs_mount *mp,
726 struct xfs_quotainfo *qi)
727{
728 int error;
729
730 if (!qi->qi_dirip) {
731 error = xfs_dqinode_mkdir_parent(mp, &qi->qi_dirip);
732 if (error && error != -EEXIST)
733 return error;
734 /*
735 * If the /quotas dirent points to an inode that isn't
736 * loadable, qi_dirip will be NULL but mkdir_parent will return
737 * -EEXIST. In this case the metadir is corrupt, so bail out.
738 */
739 if (XFS_IS_CORRUPT(mp, qi->qi_dirip == NULL))
740 return -EFSCORRUPTED;
741 }
742
743 if (XFS_IS_UQUOTA_ON(mp) && !qi->qi_uquotaip) {
744 error = xfs_dqinode_metadir_create(qi->qi_dirip,
745 XFS_DQTYPE_USER, &qi->qi_uquotaip);
746 if (error)
747 return error;
748 }
749
750 if (XFS_IS_GQUOTA_ON(mp) && !qi->qi_gquotaip) {
751 error = xfs_dqinode_metadir_create(qi->qi_dirip,
752 XFS_DQTYPE_GROUP, &qi->qi_gquotaip);
753 if (error)
754 return error;
755 }
756
757 if (XFS_IS_PQUOTA_ON(mp) && !qi->qi_pquotaip) {
758 error = xfs_dqinode_metadir_create(qi->qi_dirip,
759 XFS_DQTYPE_PROJ, &qi->qi_pquotaip);
760 if (error)
761 return error;
762 }
763
764 return 0;
765}
766
767/*
768 * Add QUOTABIT to sb_versionnum and initialize qflags in preparation for
769 * creating quota files on a metadir filesystem.
770 */
771STATIC int
772xfs_qm_prep_metadir_sb(
773 struct xfs_mount *mp)
774{
775 struct xfs_trans *tp;
776 int error;
777
778 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0, 0, &tp);
779 if (error)
780 return error;
781
782 spin_lock(&mp->m_sb_lock);
783
784 xfs_add_quota(mp);
785
786 /* qflags will get updated fully _after_ quotacheck */
787 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
788
789 spin_unlock(&mp->m_sb_lock);
790 xfs_log_sb(tp);
791
792 return xfs_trans_commit(tp);
793}
794
795/*
796 * Load existing quota inodes or create them. Since this is a V5 filesystem,
797 * we don't have to deal with the grp/prjquota switcheroo thing from V4.
798 */
799STATIC int
800xfs_qm_init_metadir_qinos(
801 struct xfs_mount *mp)
802{
803 struct xfs_quotainfo *qi = mp->m_quotainfo;
804 int error;
805
806 if (!xfs_has_quota(mp)) {
807 error = xfs_qm_prep_metadir_sb(mp);
808 if (error)
809 return error;
810 }
811
812 error = xfs_qm_load_metadir_qinos(mp, qi);
813 if (error)
814 goto out_err;
815
816 error = xfs_qm_create_metadir_qinos(mp, qi);
817 if (error)
818 goto out_err;
819
820 /* The only user of the quota dir inode is online fsck */
821#if !IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
822 xfs_irele(qi->qi_dirip);
823 qi->qi_dirip = NULL;
824#endif
825 return 0;
826out_err:
827 xfs_qm_destroy_quotainos(mp->m_quotainfo);
828 return error;
829}
830
831/*
832 * This initializes all the quota information that's kept in the
833 * mount structure
834 */
835STATIC int
836xfs_qm_init_quotainfo(
837 struct xfs_mount *mp)
838{
839 struct xfs_quotainfo *qinf;
840 int error;
841
842 ASSERT(XFS_IS_QUOTA_ON(mp));
843
844 qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo),
845 GFP_KERNEL | __GFP_NOFAIL);
846
847 error = list_lru_init(&qinf->qi_lru);
848 if (error)
849 goto out_free_qinf;
850
851 /*
852 * See if quotainodes are setup, and if not, allocate them,
853 * and change the superblock accordingly.
854 */
855 if (xfs_has_metadir(mp))
856 error = xfs_qm_init_metadir_qinos(mp);
857 else
858 error = xfs_qm_init_quotainos(mp);
859 if (error)
860 goto out_free_lru;
861
862 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL);
863 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL);
864 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL);
865 mutex_init(&qinf->qi_tree_lock);
866
867 /* mutex used to serialize quotaoffs */
868 mutex_init(&qinf->qi_quotaofflock);
869
870 /* Precalc some constants */
871 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
872 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
873 if (xfs_has_bigtime(mp)) {
874 qinf->qi_expiry_min =
875 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
876 qinf->qi_expiry_max =
877 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
878 } else {
879 qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
880 qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
881 }
882 trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
883 qinf->qi_expiry_max);
884
885 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
886
887 xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
888 xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
889 xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
890
891 if (XFS_IS_UQUOTA_ON(mp))
892 xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
893 if (XFS_IS_GQUOTA_ON(mp))
894 xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
895 if (XFS_IS_PQUOTA_ON(mp))
896 xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
897
898 qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s",
899 mp->m_super->s_id);
900 if (!qinf->qi_shrinker) {
901 error = -ENOMEM;
902 goto out_free_inos;
903 }
904
905 qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
906 qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
907 qinf->qi_shrinker->private_data = qinf;
908
909 shrinker_register(qinf->qi_shrinker);
910
911 xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks);
912 xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks);
913
914 return 0;
915
916out_free_inos:
917 mutex_destroy(&qinf->qi_quotaofflock);
918 mutex_destroy(&qinf->qi_tree_lock);
919 xfs_qm_destroy_quotainos(qinf);
920out_free_lru:
921 list_lru_destroy(&qinf->qi_lru);
922out_free_qinf:
923 kfree(qinf);
924 mp->m_quotainfo = NULL;
925 return error;
926}
927
928/*
929 * Gets called when unmounting a filesystem or when all quotas get
930 * turned off.
931 * This purges the quota inodes, destroys locks and frees itself.
932 */
933void
934xfs_qm_destroy_quotainfo(
935 struct xfs_mount *mp)
936{
937 struct xfs_quotainfo *qi;
938
939 qi = mp->m_quotainfo;
940 ASSERT(qi != NULL);
941
942 shrinker_free(qi->qi_shrinker);
943 list_lru_destroy(&qi->qi_lru);
944 xfs_qm_destroy_quotainos(qi);
945 mutex_destroy(&qi->qi_tree_lock);
946 mutex_destroy(&qi->qi_quotaofflock);
947 kfree(qi);
948 mp->m_quotainfo = NULL;
949}
950
951static inline enum xfs_metafile_type
952xfs_qm_metafile_type(
953 unsigned int flags)
954{
955 if (flags & XFS_QMOPT_UQUOTA)
956 return XFS_METAFILE_USRQUOTA;
957 else if (flags & XFS_QMOPT_GQUOTA)
958 return XFS_METAFILE_GRPQUOTA;
959 return XFS_METAFILE_PRJQUOTA;
960}
961
962/*
963 * Create an inode and return with a reference already taken, but unlocked
964 * This is how we create quota inodes
965 */
966STATIC int
967xfs_qm_qino_alloc(
968 struct xfs_mount *mp,
969 struct xfs_inode **ipp,
970 unsigned int flags)
971{
972 struct xfs_trans *tp;
973 enum xfs_metafile_type metafile_type = xfs_qm_metafile_type(flags);
974 int error;
975 bool need_alloc = true;
976
977 *ipp = NULL;
978 /*
979 * With superblock that doesn't have separate pquotino, we
980 * share an inode between gquota and pquota. If the on-disk
981 * superblock has GQUOTA and the filesystem is now mounted
982 * with PQUOTA, just use sb_gquotino for sb_pquotino and
983 * vice-versa.
984 */
985 if (!xfs_has_pquotino(mp) &&
986 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
987 xfs_ino_t ino = NULLFSINO;
988
989 if ((flags & XFS_QMOPT_PQUOTA) &&
990 (mp->m_sb.sb_gquotino != NULLFSINO)) {
991 ino = mp->m_sb.sb_gquotino;
992 if (XFS_IS_CORRUPT(mp,
993 mp->m_sb.sb_pquotino != NULLFSINO)) {
994 xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
995 return -EFSCORRUPTED;
996 }
997 } else if ((flags & XFS_QMOPT_GQUOTA) &&
998 (mp->m_sb.sb_pquotino != NULLFSINO)) {
999 ino = mp->m_sb.sb_pquotino;
1000 if (XFS_IS_CORRUPT(mp,
1001 mp->m_sb.sb_gquotino != NULLFSINO)) {
1002 xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
1003 return -EFSCORRUPTED;
1004 }
1005 }
1006 if (ino != NULLFSINO) {
1007 error = xfs_metafile_iget(mp, ino, metafile_type, ipp);
1008 if (error)
1009 return error;
1010
1011 mp->m_sb.sb_gquotino = NULLFSINO;
1012 mp->m_sb.sb_pquotino = NULLFSINO;
1013 need_alloc = false;
1014 }
1015 }
1016
1017 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
1018 need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
1019 0, 0, &tp);
1020 if (error)
1021 return error;
1022
1023 if (need_alloc) {
1024 struct xfs_icreate_args args = {
1025 .mode = S_IFREG,
1026 .flags = XFS_ICREATE_UNLINKABLE,
1027 };
1028 xfs_ino_t ino;
1029
1030 error = xfs_dialloc(&tp, &args, &ino);
1031 if (!error)
1032 error = xfs_icreate(tp, ino, &args, ipp);
1033 if (error) {
1034 xfs_trans_cancel(tp);
1035 return error;
1036 }
1037 if (xfs_has_metadir(mp))
1038 xfs_metafile_set_iflag(tp, *ipp, metafile_type);
1039 }
1040
1041 /*
1042 * Make the changes in the superblock, and log those too.
1043 * sbfields arg may contain fields other than *QUOTINO;
1044 * VERSIONNUM for example.
1045 */
1046 spin_lock(&mp->m_sb_lock);
1047 if (flags & XFS_QMOPT_SBVERSION) {
1048 ASSERT(!xfs_has_quota(mp));
1049
1050 xfs_add_quota(mp);
1051 mp->m_sb.sb_uquotino = NULLFSINO;
1052 mp->m_sb.sb_gquotino = NULLFSINO;
1053 mp->m_sb.sb_pquotino = NULLFSINO;
1054
1055 /* qflags will get updated fully _after_ quotacheck */
1056 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
1057 }
1058 if (flags & XFS_QMOPT_UQUOTA)
1059 mp->m_sb.sb_uquotino = (*ipp)->i_ino;
1060 else if (flags & XFS_QMOPT_GQUOTA)
1061 mp->m_sb.sb_gquotino = (*ipp)->i_ino;
1062 else
1063 mp->m_sb.sb_pquotino = (*ipp)->i_ino;
1064 spin_unlock(&mp->m_sb_lock);
1065 xfs_log_sb(tp);
1066
1067 error = xfs_trans_commit(tp);
1068 if (error) {
1069 ASSERT(xfs_is_shutdown(mp));
1070 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
1071 }
1072 if (need_alloc) {
1073 xfs_iunlock(*ipp, XFS_ILOCK_EXCL);
1074 xfs_finish_inode_setup(*ipp);
1075 }
1076 return error;
1077}
1078
1079
1080STATIC void
1081xfs_qm_reset_dqcounts(
1082 struct xfs_mount *mp,
1083 struct xfs_buf *bp,
1084 xfs_dqid_t id,
1085 xfs_dqtype_t type)
1086{
1087 struct xfs_dqblk *dqb;
1088 int j;
1089
1090 trace_xfs_reset_dqcounts(bp, _RET_IP_);
1091
1092 /*
1093 * Reset all counters and timers. They'll be
1094 * started afresh by xfs_qm_quotacheck.
1095 */
1096#ifdef DEBUG
1097 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
1098 sizeof(struct xfs_dqblk);
1099 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
1100#endif
1101 dqb = bp->b_addr;
1102 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
1103 struct xfs_disk_dquot *ddq;
1104
1105 ddq = (struct xfs_disk_dquot *)&dqb[j];
1106
1107 /*
1108 * Do a sanity check, and if needed, repair the dqblk. Don't
1109 * output any warnings because it's perfectly possible to
1110 * find uninitialised dquot blks. See comment in
1111 * xfs_dquot_verify.
1112 */
1113 if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
1114 (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
1115 xfs_dqblk_repair(mp, &dqb[j], id + j, type);
1116
1117 /*
1118 * Reset type in case we are reusing group quota file for
1119 * project quotas or vice versa
1120 */
1121 ddq->d_type = type;
1122 ddq->d_bcount = 0;
1123 ddq->d_icount = 0;
1124 ddq->d_rtbcount = 0;
1125
1126 /*
1127 * dquot id 0 stores the default grace period and the maximum
1128 * warning limit that were set by the administrator, so we
1129 * should not reset them.
1130 */
1131 if (ddq->d_id != 0) {
1132 ddq->d_btimer = 0;
1133 ddq->d_itimer = 0;
1134 ddq->d_rtbtimer = 0;
1135 ddq->d_bwarns = 0;
1136 ddq->d_iwarns = 0;
1137 ddq->d_rtbwarns = 0;
1138 if (xfs_has_bigtime(mp))
1139 ddq->d_type |= XFS_DQTYPE_BIGTIME;
1140 }
1141
1142 if (xfs_has_crc(mp)) {
1143 xfs_update_cksum((char *)&dqb[j],
1144 sizeof(struct xfs_dqblk),
1145 XFS_DQUOT_CRC_OFF);
1146 }
1147 }
1148}
1149
1150STATIC int
1151xfs_qm_reset_dqcounts_all(
1152 struct xfs_mount *mp,
1153 xfs_dqid_t firstid,
1154 xfs_fsblock_t bno,
1155 xfs_filblks_t blkcnt,
1156 xfs_dqtype_t type,
1157 struct list_head *buffer_list)
1158{
1159 struct xfs_buf *bp;
1160 int error = 0;
1161
1162 ASSERT(blkcnt > 0);
1163
1164 /*
1165 * Blkcnt arg can be a very big number, and might even be
1166 * larger than the log itself. So, we have to break it up into
1167 * manageable-sized transactions.
1168 * Note that we don't start a permanent transaction here; we might
1169 * not be able to get a log reservation for the whole thing up front,
1170 * and we don't really care to either, because we just discard
1171 * everything if we were to crash in the middle of this loop.
1172 */
1173 while (blkcnt--) {
1174 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1175 XFS_FSB_TO_DADDR(mp, bno),
1176 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1177 &xfs_dquot_buf_ops);
1178
1179 /*
1180 * CRC and validation errors will return a EFSCORRUPTED here. If
1181 * this occurs, re-read without CRC validation so that we can
1182 * repair the damage via xfs_qm_reset_dqcounts(). This process
1183 * will leave a trace in the log indicating corruption has
1184 * been detected.
1185 */
1186 if (error == -EFSCORRUPTED) {
1187 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1188 XFS_FSB_TO_DADDR(mp, bno),
1189 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1190 NULL);
1191 }
1192
1193 if (error)
1194 break;
1195
1196 /*
1197 * A corrupt buffer might not have a verifier attached, so
1198 * make sure we have the correct one attached before writeback
1199 * occurs.
1200 */
1201 bp->b_ops = &xfs_dquot_buf_ops;
1202 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1203 xfs_buf_delwri_queue(bp, buffer_list);
1204 xfs_buf_relse(bp);
1205
1206 /* goto the next block. */
1207 bno++;
1208 firstid += mp->m_quotainfo->qi_dqperchunk;
1209 }
1210
1211 return error;
1212}
1213
1214/*
1215 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
1216 * counters for every chunk of dquots that we find.
1217 */
1218STATIC int
1219xfs_qm_reset_dqcounts_buf(
1220 struct xfs_mount *mp,
1221 struct xfs_inode *qip,
1222 xfs_dqtype_t type,
1223 struct list_head *buffer_list)
1224{
1225 struct xfs_bmbt_irec *map;
1226 int i, nmaps; /* number of map entries */
1227 int error; /* return value */
1228 xfs_fileoff_t lblkno;
1229 xfs_filblks_t maxlblkcnt;
1230 xfs_dqid_t firstid;
1231 xfs_fsblock_t rablkno;
1232 xfs_filblks_t rablkcnt;
1233
1234 error = 0;
1235 /*
1236 * This looks racy, but we can't keep an inode lock across a
1237 * trans_reserve. But, this gets called during quotacheck, and that
1238 * happens only at mount time which is single threaded.
1239 */
1240 if (qip->i_nblocks == 0)
1241 return 0;
1242
1243 map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
1244 GFP_KERNEL | __GFP_NOFAIL);
1245
1246 lblkno = 0;
1247 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1248 do {
1249 uint lock_mode;
1250
1251 nmaps = XFS_DQITER_MAP_SIZE;
1252 /*
1253 * We aren't changing the inode itself. Just changing
1254 * some of its data. No new blocks are added here, and
1255 * the inode is never added to the transaction.
1256 */
1257 lock_mode = xfs_ilock_data_map_shared(qip);
1258 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1259 map, &nmaps, 0);
1260 xfs_iunlock(qip, lock_mode);
1261 if (error)
1262 break;
1263
1264 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1265 for (i = 0; i < nmaps; i++) {
1266 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1267 ASSERT(map[i].br_blockcount);
1268
1269
1270 lblkno += map[i].br_blockcount;
1271
1272 if (map[i].br_startblock == HOLESTARTBLOCK)
1273 continue;
1274
1275 firstid = (xfs_dqid_t) map[i].br_startoff *
1276 mp->m_quotainfo->qi_dqperchunk;
1277 /*
1278 * Do a read-ahead on the next extent.
1279 */
1280 if ((i+1 < nmaps) &&
1281 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1282 rablkcnt = map[i+1].br_blockcount;
1283 rablkno = map[i+1].br_startblock;
1284 while (rablkcnt--) {
1285 xfs_buf_readahead(mp->m_ddev_targp,
1286 XFS_FSB_TO_DADDR(mp, rablkno),
1287 mp->m_quotainfo->qi_dqchunklen,
1288 &xfs_dquot_buf_ops);
1289 rablkno++;
1290 }
1291 }
1292 /*
1293 * Iterate thru all the blks in the extent and
1294 * reset the counters of all the dquots inside them.
1295 */
1296 error = xfs_qm_reset_dqcounts_all(mp, firstid,
1297 map[i].br_startblock,
1298 map[i].br_blockcount,
1299 type, buffer_list);
1300 if (error)
1301 goto out;
1302 }
1303 } while (nmaps > 0);
1304
1305out:
1306 kfree(map);
1307 return error;
1308}
1309
1310/*
1311 * Called by dqusage_adjust in doing a quotacheck.
1312 *
1313 * Given the inode, and a dquot id this updates both the incore dqout as well
1314 * as the buffer copy. This is so that once the quotacheck is done, we can
1315 * just log all the buffers, as opposed to logging numerous updates to
1316 * individual dquots.
1317 */
1318STATIC int
1319xfs_qm_quotacheck_dqadjust(
1320 struct xfs_inode *ip,
1321 xfs_dqtype_t type,
1322 xfs_qcnt_t nblks,
1323 xfs_qcnt_t rtblks)
1324{
1325 struct xfs_mount *mp = ip->i_mount;
1326 struct xfs_dquot *dqp;
1327 xfs_dqid_t id;
1328 int error;
1329
1330 id = xfs_qm_id_for_quotatype(ip, type);
1331 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1332 if (error) {
1333 /*
1334 * Shouldn't be able to turn off quotas here.
1335 */
1336 ASSERT(error != -ESRCH);
1337 ASSERT(error != -ENOENT);
1338 return error;
1339 }
1340
1341 error = xfs_dquot_attach_buf(NULL, dqp);
1342 if (error)
1343 return error;
1344
1345 trace_xfs_dqadjust(dqp);
1346
1347 /*
1348 * Adjust the inode count and the block count to reflect this inode's
1349 * resource usage.
1350 */
1351 dqp->q_ino.count++;
1352 dqp->q_ino.reserved++;
1353 if (nblks) {
1354 dqp->q_blk.count += nblks;
1355 dqp->q_blk.reserved += nblks;
1356 }
1357 if (rtblks) {
1358 dqp->q_rtb.count += rtblks;
1359 dqp->q_rtb.reserved += rtblks;
1360 }
1361
1362 /*
1363 * Set default limits, adjust timers (since we changed usages)
1364 *
1365 * There are no timers for the default values set in the root dquot.
1366 */
1367 if (dqp->q_id) {
1368 xfs_qm_adjust_dqlimits(dqp);
1369 xfs_qm_adjust_dqtimers(dqp);
1370 }
1371
1372 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1373 xfs_qm_dqput(dqp);
1374 return 0;
1375}
1376
1377/*
1378 * callback routine supplied to bulkstat(). Given an inumber, find its
1379 * dquots and update them to account for resources taken by that inode.
1380 */
1381/* ARGSUSED */
1382STATIC int
1383xfs_qm_dqusage_adjust(
1384 struct xfs_mount *mp,
1385 struct xfs_trans *tp,
1386 xfs_ino_t ino,
1387 void *data)
1388{
1389 struct xfs_inode *ip;
1390 xfs_filblks_t nblks, rtblks;
1391 unsigned int lock_mode;
1392 int error;
1393
1394 ASSERT(XFS_IS_QUOTA_ON(mp));
1395
1396 /*
1397 * rootino must have its resources accounted for, not so with the quota
1398 * inodes.
1399 */
1400 if (xfs_is_quota_inode(&mp->m_sb, ino))
1401 return 0;
1402
1403 /*
1404 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1405 * at mount time and therefore nobody will be racing chown/chproj.
1406 */
1407 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1408 if (error == -EINVAL || error == -ENOENT)
1409 return 0;
1410 if (error)
1411 return error;
1412
1413 /*
1414 * Reload the incore unlinked list to avoid failure in inodegc.
1415 * Use an unlocked check here because unrecovered unlinked inodes
1416 * should be somewhat rare.
1417 */
1418 if (xfs_inode_unlinked_incomplete(ip)) {
1419 error = xfs_inode_reload_unlinked(ip);
1420 if (error) {
1421 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1422 goto error0;
1423 }
1424 }
1425
1426 /* Metadata directory files are not accounted to user-visible quotas. */
1427 if (xfs_is_metadir_inode(ip))
1428 goto error0;
1429
1430 ASSERT(ip->i_delayed_blks == 0);
1431
1432 lock_mode = xfs_ilock_data_map_shared(ip);
1433 if (XFS_IS_REALTIME_INODE(ip)) {
1434 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1435 if (error) {
1436 xfs_iunlock(ip, lock_mode);
1437 goto error0;
1438 }
1439 }
1440 xfs_inode_count_blocks(tp, ip, &nblks, &rtblks);
1441 xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
1442 xfs_iunlock(ip, lock_mode);
1443
1444 /*
1445 * Add the (disk blocks and inode) resources occupied by this
1446 * inode to its dquots. We do this adjustment in the incore dquot,
1447 * and also copy the changes to its buffer.
1448 * We don't care about putting these changes in a transaction
1449 * envelope because if we crash in the middle of a 'quotacheck'
1450 * we have to start from the beginning anyway.
1451 * Once we're done, we'll log all the dquot bufs.
1452 *
1453 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1454 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1455 */
1456 if (XFS_IS_UQUOTA_ON(mp)) {
1457 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1458 rtblks);
1459 if (error)
1460 goto error0;
1461 }
1462
1463 if (XFS_IS_GQUOTA_ON(mp)) {
1464 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1465 rtblks);
1466 if (error)
1467 goto error0;
1468 }
1469
1470 if (XFS_IS_PQUOTA_ON(mp)) {
1471 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1472 rtblks);
1473 if (error)
1474 goto error0;
1475 }
1476
1477error0:
1478 xfs_irele(ip);
1479 return error;
1480}
1481
1482STATIC int
1483xfs_qm_flush_one(
1484 struct xfs_dquot *dqp,
1485 void *data)
1486{
1487 struct xfs_mount *mp = dqp->q_mount;
1488 struct list_head *buffer_list = data;
1489 struct xfs_buf *bp = NULL;
1490 int error = 0;
1491
1492 xfs_dqlock(dqp);
1493 if (dqp->q_flags & XFS_DQFLAG_FREEING)
1494 goto out_unlock;
1495 if (!XFS_DQ_IS_DIRTY(dqp))
1496 goto out_unlock;
1497
1498 /*
1499 * The only way the dquot is already flush locked by the time quotacheck
1500 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1501 * it for the final time. Quotacheck collects all dquot bufs in the
1502 * local delwri queue before dquots are dirtied, so reclaim can't have
1503 * possibly queued it for I/O. The only way out is to push the buffer to
1504 * cycle the flush lock.
1505 */
1506 if (!xfs_dqflock_nowait(dqp)) {
1507 /* buf is pinned in-core by delwri list */
1508 error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1509 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1510 if (error)
1511 goto out_unlock;
1512
1513 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1514 error = -EAGAIN;
1515 xfs_buf_relse(bp);
1516 goto out_unlock;
1517 }
1518 xfs_buf_unlock(bp);
1519
1520 xfs_buf_delwri_pushbuf(bp, buffer_list);
1521 xfs_buf_rele(bp);
1522
1523 error = -EAGAIN;
1524 goto out_unlock;
1525 }
1526
1527 error = xfs_dquot_use_attached_buf(dqp, &bp);
1528 if (error)
1529 goto out_unlock;
1530 if (!bp) {
1531 error = -EFSCORRUPTED;
1532 goto out_unlock;
1533 }
1534
1535 error = xfs_qm_dqflush(dqp, bp);
1536 if (!error)
1537 xfs_buf_delwri_queue(bp, buffer_list);
1538 xfs_buf_relse(bp);
1539out_unlock:
1540 xfs_dqunlock(dqp);
1541 return error;
1542}
1543
1544/*
1545 * Walk thru all the filesystem inodes and construct a consistent view
1546 * of the disk quota world. If the quotacheck fails, disable quotas.
1547 */
1548STATIC int
1549xfs_qm_quotacheck(
1550 xfs_mount_t *mp)
1551{
1552 int error, error2;
1553 uint flags;
1554 LIST_HEAD (buffer_list);
1555 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1556 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1557 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1558
1559 flags = 0;
1560
1561 ASSERT(uip || gip || pip);
1562 ASSERT(XFS_IS_QUOTA_ON(mp));
1563
1564 xfs_notice(mp, "Quotacheck needed: Please wait.");
1565
1566 /*
1567 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1568 * their counters to zero. We need a clean slate.
1569 * We don't log our changes till later.
1570 */
1571 if (uip) {
1572 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1573 &buffer_list);
1574 if (error)
1575 goto error_return;
1576 flags |= XFS_UQUOTA_CHKD;
1577 }
1578
1579 if (gip) {
1580 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1581 &buffer_list);
1582 if (error)
1583 goto error_return;
1584 flags |= XFS_GQUOTA_CHKD;
1585 }
1586
1587 if (pip) {
1588 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1589 &buffer_list);
1590 if (error)
1591 goto error_return;
1592 flags |= XFS_PQUOTA_CHKD;
1593 }
1594
1595 xfs_set_quotacheck_running(mp);
1596 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1597 NULL);
1598 xfs_clear_quotacheck_running(mp);
1599
1600 /*
1601 * On error, the inode walk may have partially populated the dquot
1602 * caches. We must purge them before disabling quota and tearing down
1603 * the quotainfo, or else the dquots will leak.
1604 */
1605 if (error)
1606 goto error_purge;
1607
1608 /*
1609 * We've made all the changes that we need to make incore. Flush them
1610 * down to disk buffers if everything was updated successfully.
1611 */
1612 if (XFS_IS_UQUOTA_ON(mp)) {
1613 error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1614 &buffer_list);
1615 }
1616 if (XFS_IS_GQUOTA_ON(mp)) {
1617 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1618 &buffer_list);
1619 if (!error)
1620 error = error2;
1621 }
1622 if (XFS_IS_PQUOTA_ON(mp)) {
1623 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1624 &buffer_list);
1625 if (!error)
1626 error = error2;
1627 }
1628
1629 error2 = xfs_buf_delwri_submit(&buffer_list);
1630 if (!error)
1631 error = error2;
1632
1633 /*
1634 * We can get this error if we couldn't do a dquot allocation inside
1635 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1636 * dirty dquots that might be cached, we just want to get rid of them
1637 * and turn quotaoff. The dquots won't be attached to any of the inodes
1638 * at this point (because we intentionally didn't in dqget_noattach).
1639 */
1640 if (error)
1641 goto error_purge;
1642
1643 /*
1644 * If one type of quotas is off, then it will lose its
1645 * quotachecked status, since we won't be doing accounting for
1646 * that type anymore.
1647 */
1648 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1649 mp->m_qflags |= flags;
1650
1651error_return:
1652 xfs_buf_delwri_cancel(&buffer_list);
1653
1654 if (error) {
1655 xfs_warn(mp,
1656 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1657 error);
1658 /*
1659 * We must turn off quotas.
1660 */
1661 ASSERT(mp->m_quotainfo != NULL);
1662 xfs_qm_destroy_quotainfo(mp);
1663 if (xfs_mount_reset_sbqflags(mp)) {
1664 xfs_warn(mp,
1665 "Quotacheck: Failed to reset quota flags.");
1666 }
1667 xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
1668 } else {
1669 xfs_notice(mp, "Quotacheck: Done.");
1670 xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
1671 }
1672
1673 return error;
1674
1675error_purge:
1676 /*
1677 * On error, we may have inodes queued for inactivation. This may try
1678 * to attach dquots to the inode before running cleanup operations on
1679 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1680 * below that frees mp->m_quotainfo. To avoid this race, flush all the
1681 * pending inodegc operations before we purge the dquots from memory,
1682 * ensuring that background inactivation is idle whilst we turn off
1683 * quotas.
1684 */
1685 xfs_inodegc_flush(mp);
1686 xfs_qm_dqpurge_all(mp);
1687 goto error_return;
1688
1689}
1690
1691/*
1692 * This is called from xfs_mountfs to start quotas and initialize all
1693 * necessary data structures like quotainfo. This is also responsible for
1694 * running a quotacheck as necessary. We are guaranteed that the superblock
1695 * is consistently read in at this point.
1696 *
1697 * If we fail here, the mount will continue with quota turned off. We don't
1698 * need to inidicate success or failure at all.
1699 */
1700void
1701xfs_qm_mount_quotas(
1702 struct xfs_mount *mp)
1703{
1704 int error = 0;
1705 uint sbf;
1706
1707 /*
1708 * If quotas on realtime volumes is not supported, disable quotas
1709 * immediately. We only support rtquota if rtgroups are enabled to
1710 * avoid problems with older kernels.
1711 */
1712 if (mp->m_sb.sb_rextents && !xfs_has_rtgroups(mp)) {
1713 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1714 mp->m_qflags = 0;
1715 goto write_changes;
1716 }
1717
1718 ASSERT(XFS_IS_QUOTA_ON(mp));
1719
1720 /*
1721 * Allocate the quotainfo structure inside the mount struct, and
1722 * create quotainode(s), and change/rev superblock if necessary.
1723 */
1724 error = xfs_qm_init_quotainfo(mp);
1725 if (error) {
1726 /*
1727 * We must turn off quotas.
1728 */
1729 ASSERT(mp->m_quotainfo == NULL);
1730 mp->m_qflags = 0;
1731 goto write_changes;
1732 }
1733 /*
1734 * If any of the quotas are not consistent, do a quotacheck.
1735 */
1736 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1737 error = xfs_qm_quotacheck(mp);
1738 if (error) {
1739 /* Quotacheck failed and disabled quotas. */
1740 return;
1741 }
1742 }
1743 /*
1744 * If one type of quotas is off, then it will lose its
1745 * quotachecked status, since we won't be doing accounting for
1746 * that type anymore.
1747 */
1748 if (!XFS_IS_UQUOTA_ON(mp))
1749 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1750 if (!XFS_IS_GQUOTA_ON(mp))
1751 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1752 if (!XFS_IS_PQUOTA_ON(mp))
1753 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1754
1755 write_changes:
1756 /*
1757 * We actually don't have to acquire the m_sb_lock at all.
1758 * This can only be called from mount, and that's single threaded. XXX
1759 */
1760 spin_lock(&mp->m_sb_lock);
1761 sbf = mp->m_sb.sb_qflags;
1762 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1763 spin_unlock(&mp->m_sb_lock);
1764
1765 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1766 if (xfs_sync_sb(mp, false)) {
1767 /*
1768 * We could only have been turning quotas off.
1769 * We aren't in very good shape actually because
1770 * the incore structures are convinced that quotas are
1771 * off, but the on disk superblock doesn't know that !
1772 */
1773 ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1774 xfs_alert(mp, "%s: Superblock update failed!",
1775 __func__);
1776 }
1777 }
1778
1779 if (error) {
1780 xfs_warn(mp, "Failed to initialize disk quotas, err %d.", error);
1781 return;
1782 }
1783}
1784
1785/*
1786 * Load the inode for a given type of quota, assuming that the sb fields have
1787 * been sorted out. This is not true when switching quota types on a V4
1788 * filesystem, so do not use this function for that.
1789 *
1790 * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
1791 * success; or a negative errno.
1792 */
1793int
1794xfs_qm_qino_load(
1795 struct xfs_mount *mp,
1796 xfs_dqtype_t type,
1797 struct xfs_inode **ipp)
1798{
1799 struct xfs_trans *tp;
1800 struct xfs_inode *dp = NULL;
1801 int error;
1802
1803 error = xfs_trans_alloc_empty(mp, &tp);
1804 if (error)
1805 return error;
1806
1807 if (xfs_has_metadir(mp)) {
1808 error = xfs_dqinode_load_parent(tp, &dp);
1809 if (error)
1810 goto out_cancel;
1811 }
1812
1813 error = xfs_dqinode_load(tp, dp, type, ipp);
1814 if (dp)
1815 xfs_irele(dp);
1816out_cancel:
1817 xfs_trans_cancel(tp);
1818 return error;
1819}
1820
1821/*
1822 * This is called after the superblock has been read in and we're ready to
1823 * iget the quota inodes.
1824 */
1825STATIC int
1826xfs_qm_init_quotainos(
1827 xfs_mount_t *mp)
1828{
1829 struct xfs_inode *uip = NULL;
1830 struct xfs_inode *gip = NULL;
1831 struct xfs_inode *pip = NULL;
1832 int error;
1833 uint flags = 0;
1834
1835 ASSERT(mp->m_quotainfo);
1836
1837 /*
1838 * Get the uquota and gquota inodes
1839 */
1840 if (xfs_has_quota(mp)) {
1841 if (XFS_IS_UQUOTA_ON(mp) &&
1842 mp->m_sb.sb_uquotino != NULLFSINO) {
1843 ASSERT(mp->m_sb.sb_uquotino > 0);
1844 error = xfs_qm_qino_load(mp, XFS_DQTYPE_USER, &uip);
1845 if (error)
1846 return error;
1847 }
1848 if (XFS_IS_GQUOTA_ON(mp) &&
1849 mp->m_sb.sb_gquotino != NULLFSINO) {
1850 ASSERT(mp->m_sb.sb_gquotino > 0);
1851 error = xfs_qm_qino_load(mp, XFS_DQTYPE_GROUP, &gip);
1852 if (error)
1853 goto error_rele;
1854 }
1855 if (XFS_IS_PQUOTA_ON(mp) &&
1856 mp->m_sb.sb_pquotino != NULLFSINO) {
1857 ASSERT(mp->m_sb.sb_pquotino > 0);
1858 error = xfs_qm_qino_load(mp, XFS_DQTYPE_PROJ, &pip);
1859 if (error)
1860 goto error_rele;
1861 }
1862 } else {
1863 flags |= XFS_QMOPT_SBVERSION;
1864 }
1865
1866 /*
1867 * Create the three inodes, if they don't exist already. The changes
1868 * made above will get added to a transaction and logged in one of
1869 * the qino_alloc calls below. If the device is readonly,
1870 * temporarily switch to read-write to do this.
1871 */
1872 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1873 error = xfs_qm_qino_alloc(mp, &uip,
1874 flags | XFS_QMOPT_UQUOTA);
1875 if (error)
1876 goto error_rele;
1877
1878 flags &= ~XFS_QMOPT_SBVERSION;
1879 }
1880 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1881 error = xfs_qm_qino_alloc(mp, &gip,
1882 flags | XFS_QMOPT_GQUOTA);
1883 if (error)
1884 goto error_rele;
1885
1886 flags &= ~XFS_QMOPT_SBVERSION;
1887 }
1888 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1889 error = xfs_qm_qino_alloc(mp, &pip,
1890 flags | XFS_QMOPT_PQUOTA);
1891 if (error)
1892 goto error_rele;
1893 }
1894
1895 mp->m_quotainfo->qi_uquotaip = uip;
1896 mp->m_quotainfo->qi_gquotaip = gip;
1897 mp->m_quotainfo->qi_pquotaip = pip;
1898
1899 return 0;
1900
1901error_rele:
1902 if (uip)
1903 xfs_irele(uip);
1904 if (gip)
1905 xfs_irele(gip);
1906 if (pip)
1907 xfs_irele(pip);
1908 return error;
1909}
1910
1911STATIC void
1912xfs_qm_dqfree_one(
1913 struct xfs_dquot *dqp)
1914{
1915 struct xfs_mount *mp = dqp->q_mount;
1916 struct xfs_quotainfo *qi = mp->m_quotainfo;
1917
1918 mutex_lock(&qi->qi_tree_lock);
1919 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1920
1921 qi->qi_dquots--;
1922 mutex_unlock(&qi->qi_tree_lock);
1923
1924 xfs_qm_dqdestroy(dqp);
1925}
1926
1927/* --------------- utility functions for vnodeops ---------------- */
1928
1929
1930/*
1931 * Given an inode, a uid, gid and prid make sure that we have
1932 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1933 * quotas by creating this file.
1934 * This also attaches dquot(s) to the given inode after locking it,
1935 * and returns the dquots corresponding to the uid and/or gid.
1936 *
1937 * in : inode (unlocked)
1938 * out : udquot, gdquot with references taken and unlocked
1939 */
1940int
1941xfs_qm_vop_dqalloc(
1942 struct xfs_inode *ip,
1943 kuid_t uid,
1944 kgid_t gid,
1945 prid_t prid,
1946 uint flags,
1947 struct xfs_dquot **O_udqpp,
1948 struct xfs_dquot **O_gdqpp,
1949 struct xfs_dquot **O_pdqpp)
1950{
1951 struct xfs_mount *mp = ip->i_mount;
1952 struct inode *inode = VFS_I(ip);
1953 struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1954 struct xfs_dquot *uq = NULL;
1955 struct xfs_dquot *gq = NULL;
1956 struct xfs_dquot *pq = NULL;
1957 int error;
1958 uint lockflags;
1959
1960 if (!XFS_IS_QUOTA_ON(mp))
1961 return 0;
1962
1963 ASSERT(!xfs_is_metadir_inode(ip));
1964
1965 lockflags = XFS_ILOCK_EXCL;
1966 xfs_ilock(ip, lockflags);
1967
1968 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1969 gid = inode->i_gid;
1970
1971 /*
1972 * Attach the dquot(s) to this inode, doing a dquot allocation
1973 * if necessary. The dquot(s) will not be locked.
1974 */
1975 if (XFS_NOT_DQATTACHED(mp, ip)) {
1976 error = xfs_qm_dqattach_locked(ip, true);
1977 if (error) {
1978 xfs_iunlock(ip, lockflags);
1979 return error;
1980 }
1981 }
1982
1983 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1984 ASSERT(O_udqpp);
1985 if (!uid_eq(inode->i_uid, uid)) {
1986 /*
1987 * What we need is the dquot that has this uid, and
1988 * if we send the inode to dqget, the uid of the inode
1989 * takes priority over what's sent in the uid argument.
1990 * We must unlock inode here before calling dqget if
1991 * we're not sending the inode, because otherwise
1992 * we'll deadlock by doing trans_reserve while
1993 * holding ilock.
1994 */
1995 xfs_iunlock(ip, lockflags);
1996 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1997 XFS_DQTYPE_USER, true, &uq);
1998 if (error) {
1999 ASSERT(error != -ENOENT);
2000 return error;
2001 }
2002 /*
2003 * Get the ilock in the right order.
2004 */
2005 xfs_dqunlock(uq);
2006 lockflags = XFS_ILOCK_SHARED;
2007 xfs_ilock(ip, lockflags);
2008 } else {
2009 /*
2010 * Take an extra reference, because we'll return
2011 * this to caller
2012 */
2013 ASSERT(ip->i_udquot);
2014 uq = xfs_qm_dqhold(ip->i_udquot);
2015 }
2016 }
2017 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
2018 ASSERT(O_gdqpp);
2019 if (!gid_eq(inode->i_gid, gid)) {
2020 xfs_iunlock(ip, lockflags);
2021 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
2022 XFS_DQTYPE_GROUP, true, &gq);
2023 if (error) {
2024 ASSERT(error != -ENOENT);
2025 goto error_rele;
2026 }
2027 xfs_dqunlock(gq);
2028 lockflags = XFS_ILOCK_SHARED;
2029 xfs_ilock(ip, lockflags);
2030 } else {
2031 ASSERT(ip->i_gdquot);
2032 gq = xfs_qm_dqhold(ip->i_gdquot);
2033 }
2034 }
2035 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
2036 ASSERT(O_pdqpp);
2037 if (ip->i_projid != prid) {
2038 xfs_iunlock(ip, lockflags);
2039 error = xfs_qm_dqget(mp, prid,
2040 XFS_DQTYPE_PROJ, true, &pq);
2041 if (error) {
2042 ASSERT(error != -ENOENT);
2043 goto error_rele;
2044 }
2045 xfs_dqunlock(pq);
2046 lockflags = XFS_ILOCK_SHARED;
2047 xfs_ilock(ip, lockflags);
2048 } else {
2049 ASSERT(ip->i_pdquot);
2050 pq = xfs_qm_dqhold(ip->i_pdquot);
2051 }
2052 }
2053 trace_xfs_dquot_dqalloc(ip);
2054
2055 xfs_iunlock(ip, lockflags);
2056 if (O_udqpp)
2057 *O_udqpp = uq;
2058 else
2059 xfs_qm_dqrele(uq);
2060 if (O_gdqpp)
2061 *O_gdqpp = gq;
2062 else
2063 xfs_qm_dqrele(gq);
2064 if (O_pdqpp)
2065 *O_pdqpp = pq;
2066 else
2067 xfs_qm_dqrele(pq);
2068 return 0;
2069
2070error_rele:
2071 xfs_qm_dqrele(gq);
2072 xfs_qm_dqrele(uq);
2073 return error;
2074}
2075
2076/*
2077 * Actually transfer ownership, and do dquot modifications.
2078 * These were already reserved.
2079 */
2080struct xfs_dquot *
2081xfs_qm_vop_chown(
2082 struct xfs_trans *tp,
2083 struct xfs_inode *ip,
2084 struct xfs_dquot **IO_olddq,
2085 struct xfs_dquot *newdq)
2086{
2087 struct xfs_dquot *prevdq;
2088 xfs_filblks_t dblocks, rblocks;
2089 bool isrt = XFS_IS_REALTIME_INODE(ip);
2090
2091 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
2092 ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
2093 ASSERT(!xfs_is_metadir_inode(ip));
2094
2095 /* old dquot */
2096 prevdq = *IO_olddq;
2097 ASSERT(prevdq);
2098 ASSERT(prevdq != newdq);
2099
2100 xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks);
2101
2102 xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_BCOUNT,
2103 -(xfs_qcnt_t)dblocks);
2104 xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_RTBCOUNT,
2105 -(xfs_qcnt_t)rblocks);
2106 xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
2107
2108 /* the sparkling new dquot */
2109 xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_BCOUNT, dblocks);
2110 xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_RTBCOUNT, rblocks);
2111 xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
2112
2113 /*
2114 * Back when we made quota reservations for the chown, we reserved the
2115 * ondisk blocks + delalloc blocks with the new dquot. Now that we've
2116 * switched the dquots, decrease the new dquot's block reservation
2117 * (having already bumped up the real counter) so that we don't have
2118 * any reservation to give back when we commit.
2119 */
2120 xfs_trans_mod_dquot(tp, newdq,
2121 isrt ? XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
2122 -ip->i_delayed_blks);
2123
2124 /*
2125 * Give the incore reservation for delalloc blocks back to the old
2126 * dquot. We don't normally handle delalloc quota reservations
2127 * transactionally, so just lock the dquot and subtract from the
2128 * reservation. Dirty the transaction because it's too late to turn
2129 * back now.
2130 */
2131 tp->t_flags |= XFS_TRANS_DIRTY;
2132 xfs_dqlock(prevdq);
2133 if (isrt) {
2134 ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks);
2135 prevdq->q_rtb.reserved -= ip->i_delayed_blks;
2136 } else {
2137 ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
2138 prevdq->q_blk.reserved -= ip->i_delayed_blks;
2139 }
2140 xfs_dqunlock(prevdq);
2141
2142 /*
2143 * Take an extra reference, because the inode is going to keep
2144 * this dquot pointer even after the trans_commit.
2145 */
2146 *IO_olddq = xfs_qm_dqhold(newdq);
2147
2148 return prevdq;
2149}
2150
2151int
2152xfs_qm_vop_rename_dqattach(
2153 struct xfs_inode **i_tab)
2154{
2155 struct xfs_mount *mp = i_tab[0]->i_mount;
2156 int i;
2157
2158 if (!XFS_IS_QUOTA_ON(mp))
2159 return 0;
2160
2161 for (i = 0; (i < 4 && i_tab[i]); i++) {
2162 struct xfs_inode *ip = i_tab[i];
2163 int error;
2164
2165 /*
2166 * Watch out for duplicate entries in the table.
2167 */
2168 if (i == 0 || ip != i_tab[i-1]) {
2169 if (XFS_NOT_DQATTACHED(mp, ip)) {
2170 error = xfs_qm_dqattach(ip);
2171 if (error)
2172 return error;
2173 }
2174 }
2175 }
2176 return 0;
2177}
2178
2179void
2180xfs_qm_vop_create_dqattach(
2181 struct xfs_trans *tp,
2182 struct xfs_inode *ip,
2183 struct xfs_dquot *udqp,
2184 struct xfs_dquot *gdqp,
2185 struct xfs_dquot *pdqp)
2186{
2187 struct xfs_mount *mp = tp->t_mountp;
2188
2189 if (!XFS_IS_QUOTA_ON(mp))
2190 return;
2191
2192 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
2193 ASSERT(!xfs_is_metadir_inode(ip));
2194
2195 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
2196 ASSERT(ip->i_udquot == NULL);
2197 ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
2198
2199 ip->i_udquot = xfs_qm_dqhold(udqp);
2200 }
2201 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
2202 ASSERT(ip->i_gdquot == NULL);
2203 ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
2204
2205 ip->i_gdquot = xfs_qm_dqhold(gdqp);
2206 }
2207 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
2208 ASSERT(ip->i_pdquot == NULL);
2209 ASSERT(ip->i_projid == pdqp->q_id);
2210
2211 ip->i_pdquot = xfs_qm_dqhold(pdqp);
2212 }
2213
2214 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
2215}
2216
2217/* Decide if this inode's dquot is near an enforcement boundary. */
2218bool
2219xfs_inode_near_dquot_enforcement(
2220 struct xfs_inode *ip,
2221 xfs_dqtype_t type)
2222{
2223 struct xfs_dquot *dqp;
2224 struct xfs_dquot_res *res;
2225 struct xfs_dquot_pre *pre;
2226 int64_t freesp;
2227
2228 /* We only care for quotas that are enabled and enforced. */
2229 dqp = xfs_inode_dquot(ip, type);
2230 if (!dqp || !xfs_dquot_is_enforced(dqp))
2231 return false;
2232
2233 if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
2234 xfs_dquot_res_over_limits(&dqp->q_blk) ||
2235 xfs_dquot_res_over_limits(&dqp->q_rtb))
2236 return true;
2237
2238 if (XFS_IS_REALTIME_INODE(ip)) {
2239 res = &dqp->q_rtb;
2240 pre = &dqp->q_rtb_prealloc;
2241 } else {
2242 res = &dqp->q_blk;
2243 pre = &dqp->q_blk_prealloc;
2244 }
2245
2246 /* For space on the data device, check the various thresholds. */
2247 if (!pre->q_prealloc_hi_wmark)
2248 return false;
2249
2250 if (res->reserved < pre->q_prealloc_lo_wmark)
2251 return false;
2252
2253 if (res->reserved >= pre->q_prealloc_hi_wmark)
2254 return true;
2255
2256 freesp = pre->q_prealloc_hi_wmark - res->reserved;
2257 if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT])
2258 return true;
2259
2260 return false;
2261}
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_mount.h"
27#include "xfs_inode.h"
28#include "xfs_ialloc.h"
29#include "xfs_itable.h"
30#include "xfs_quota.h"
31#include "xfs_error.h"
32#include "xfs_bmap.h"
33#include "xfs_bmap_btree.h"
34#include "xfs_trans.h"
35#include "xfs_trans_space.h"
36#include "xfs_qm.h"
37#include "xfs_trace.h"
38#include "xfs_icache.h"
39#include "xfs_cksum.h"
40
41/*
42 * The global quota manager. There is only one of these for the entire
43 * system, _not_ one per file system. XQM keeps track of the overall
44 * quota functionality, including maintaining the freelist and hash
45 * tables of dquots.
46 */
47STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
48STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
49
50
51STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
52/*
53 * We use the batch lookup interface to iterate over the dquots as it
54 * currently is the only interface into the radix tree code that allows
55 * fuzzy lookups instead of exact matches. Holding the lock over multiple
56 * operations is fine as all callers are used either during mount/umount
57 * or quotaoff.
58 */
59#define XFS_DQ_LOOKUP_BATCH 32
60
61STATIC int
62xfs_qm_dquot_walk(
63 struct xfs_mount *mp,
64 int type,
65 int (*execute)(struct xfs_dquot *dqp, void *data),
66 void *data)
67{
68 struct xfs_quotainfo *qi = mp->m_quotainfo;
69 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
70 uint32_t next_index;
71 int last_error = 0;
72 int skipped;
73 int nr_found;
74
75restart:
76 skipped = 0;
77 next_index = 0;
78 nr_found = 0;
79
80 while (1) {
81 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
82 int error = 0;
83 int i;
84
85 mutex_lock(&qi->qi_tree_lock);
86 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
87 next_index, XFS_DQ_LOOKUP_BATCH);
88 if (!nr_found) {
89 mutex_unlock(&qi->qi_tree_lock);
90 break;
91 }
92
93 for (i = 0; i < nr_found; i++) {
94 struct xfs_dquot *dqp = batch[i];
95
96 next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
97
98 error = execute(batch[i], data);
99 if (error == -EAGAIN) {
100 skipped++;
101 continue;
102 }
103 if (error && last_error != -EFSCORRUPTED)
104 last_error = error;
105 }
106
107 mutex_unlock(&qi->qi_tree_lock);
108
109 /* bail out if the filesystem is corrupted. */
110 if (last_error == -EFSCORRUPTED) {
111 skipped = 0;
112 break;
113 }
114 }
115
116 if (skipped) {
117 delay(1);
118 goto restart;
119 }
120
121 return last_error;
122}
123
124
125/*
126 * Purge a dquot from all tracking data structures and free it.
127 */
128STATIC int
129xfs_qm_dqpurge(
130 struct xfs_dquot *dqp,
131 void *data)
132{
133 struct xfs_mount *mp = dqp->q_mount;
134 struct xfs_quotainfo *qi = mp->m_quotainfo;
135
136 xfs_dqlock(dqp);
137 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
138 xfs_dqunlock(dqp);
139 return -EAGAIN;
140 }
141
142 dqp->dq_flags |= XFS_DQ_FREEING;
143
144 xfs_dqflock(dqp);
145
146 /*
147 * If we are turning this type of quotas off, we don't care
148 * about the dirty metadata sitting in this dquot. OTOH, if
149 * we're unmounting, we do care, so we flush it and wait.
150 */
151 if (XFS_DQ_IS_DIRTY(dqp)) {
152 struct xfs_buf *bp = NULL;
153 int error;
154
155 /*
156 * We don't care about getting disk errors here. We need
157 * to purge this dquot anyway, so we go ahead regardless.
158 */
159 error = xfs_qm_dqflush(dqp, &bp);
160 if (error) {
161 xfs_warn(mp, "%s: dquot %p flush failed",
162 __func__, dqp);
163 } else {
164 error = xfs_bwrite(bp);
165 xfs_buf_relse(bp);
166 }
167 xfs_dqflock(dqp);
168 }
169
170 ASSERT(atomic_read(&dqp->q_pincount) == 0);
171 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
172 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
173
174 xfs_dqfunlock(dqp);
175 xfs_dqunlock(dqp);
176
177 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
178 be32_to_cpu(dqp->q_core.d_id));
179 qi->qi_dquots--;
180
181 /*
182 * We move dquots to the freelist as soon as their reference count
183 * hits zero, so it really should be on the freelist here.
184 */
185 ASSERT(!list_empty(&dqp->q_lru));
186 list_lru_del(&qi->qi_lru, &dqp->q_lru);
187 XFS_STATS_DEC(mp, xs_qm_dquot_unused);
188
189 xfs_qm_dqdestroy(dqp);
190 return 0;
191}
192
193/*
194 * Purge the dquot cache.
195 */
196void
197xfs_qm_dqpurge_all(
198 struct xfs_mount *mp,
199 uint flags)
200{
201 if (flags & XFS_QMOPT_UQUOTA)
202 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
203 if (flags & XFS_QMOPT_GQUOTA)
204 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
205 if (flags & XFS_QMOPT_PQUOTA)
206 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
207}
208
209/*
210 * Just destroy the quotainfo structure.
211 */
212void
213xfs_qm_unmount(
214 struct xfs_mount *mp)
215{
216 if (mp->m_quotainfo) {
217 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
218 xfs_qm_destroy_quotainfo(mp);
219 }
220}
221
222/*
223 * Called from the vfsops layer.
224 */
225void
226xfs_qm_unmount_quotas(
227 xfs_mount_t *mp)
228{
229 /*
230 * Release the dquots that root inode, et al might be holding,
231 * before we flush quotas and blow away the quotainfo structure.
232 */
233 ASSERT(mp->m_rootip);
234 xfs_qm_dqdetach(mp->m_rootip);
235 if (mp->m_rbmip)
236 xfs_qm_dqdetach(mp->m_rbmip);
237 if (mp->m_rsumip)
238 xfs_qm_dqdetach(mp->m_rsumip);
239
240 /*
241 * Release the quota inodes.
242 */
243 if (mp->m_quotainfo) {
244 if (mp->m_quotainfo->qi_uquotaip) {
245 IRELE(mp->m_quotainfo->qi_uquotaip);
246 mp->m_quotainfo->qi_uquotaip = NULL;
247 }
248 if (mp->m_quotainfo->qi_gquotaip) {
249 IRELE(mp->m_quotainfo->qi_gquotaip);
250 mp->m_quotainfo->qi_gquotaip = NULL;
251 }
252 if (mp->m_quotainfo->qi_pquotaip) {
253 IRELE(mp->m_quotainfo->qi_pquotaip);
254 mp->m_quotainfo->qi_pquotaip = NULL;
255 }
256 }
257}
258
259STATIC int
260xfs_qm_dqattach_one(
261 xfs_inode_t *ip,
262 xfs_dqid_t id,
263 uint type,
264 uint doalloc,
265 xfs_dquot_t **IO_idqpp)
266{
267 xfs_dquot_t *dqp;
268 int error;
269
270 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
271 error = 0;
272
273 /*
274 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
275 * or &i_gdquot. This made the code look weird, but made the logic a lot
276 * simpler.
277 */
278 dqp = *IO_idqpp;
279 if (dqp) {
280 trace_xfs_dqattach_found(dqp);
281 return 0;
282 }
283
284 /*
285 * Find the dquot from somewhere. This bumps the reference count of
286 * dquot and returns it locked. This can return ENOENT if dquot didn't
287 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
288 * turned off suddenly.
289 */
290 error = xfs_qm_dqget(ip->i_mount, ip, id, type,
291 doalloc | XFS_QMOPT_DOWARN, &dqp);
292 if (error)
293 return error;
294
295 trace_xfs_dqattach_get(dqp);
296
297 /*
298 * dqget may have dropped and re-acquired the ilock, but it guarantees
299 * that the dquot returned is the one that should go in the inode.
300 */
301 *IO_idqpp = dqp;
302 xfs_dqunlock(dqp);
303 return 0;
304}
305
306static bool
307xfs_qm_need_dqattach(
308 struct xfs_inode *ip)
309{
310 struct xfs_mount *mp = ip->i_mount;
311
312 if (!XFS_IS_QUOTA_RUNNING(mp))
313 return false;
314 if (!XFS_IS_QUOTA_ON(mp))
315 return false;
316 if (!XFS_NOT_DQATTACHED(mp, ip))
317 return false;
318 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
319 return false;
320 return true;
321}
322
323/*
324 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
325 * into account.
326 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
327 * Inode may get unlocked and relocked in here, and the caller must deal with
328 * the consequences.
329 */
330int
331xfs_qm_dqattach_locked(
332 xfs_inode_t *ip,
333 uint flags)
334{
335 xfs_mount_t *mp = ip->i_mount;
336 int error = 0;
337
338 if (!xfs_qm_need_dqattach(ip))
339 return 0;
340
341 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
342
343 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
344 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
345 flags & XFS_QMOPT_DQALLOC,
346 &ip->i_udquot);
347 if (error)
348 goto done;
349 ASSERT(ip->i_udquot);
350 }
351
352 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
353 error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
354 flags & XFS_QMOPT_DQALLOC,
355 &ip->i_gdquot);
356 if (error)
357 goto done;
358 ASSERT(ip->i_gdquot);
359 }
360
361 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
362 error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
363 flags & XFS_QMOPT_DQALLOC,
364 &ip->i_pdquot);
365 if (error)
366 goto done;
367 ASSERT(ip->i_pdquot);
368 }
369
370done:
371 /*
372 * Don't worry about the dquots that we may have attached before any
373 * error - they'll get detached later if it has not already been done.
374 */
375 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
376 return error;
377}
378
379int
380xfs_qm_dqattach(
381 struct xfs_inode *ip,
382 uint flags)
383{
384 int error;
385
386 if (!xfs_qm_need_dqattach(ip))
387 return 0;
388
389 xfs_ilock(ip, XFS_ILOCK_EXCL);
390 error = xfs_qm_dqattach_locked(ip, flags);
391 xfs_iunlock(ip, XFS_ILOCK_EXCL);
392
393 return error;
394}
395
396/*
397 * Release dquots (and their references) if any.
398 * The inode should be locked EXCL except when this's called by
399 * xfs_ireclaim.
400 */
401void
402xfs_qm_dqdetach(
403 xfs_inode_t *ip)
404{
405 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
406 return;
407
408 trace_xfs_dquot_dqdetach(ip);
409
410 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
411 if (ip->i_udquot) {
412 xfs_qm_dqrele(ip->i_udquot);
413 ip->i_udquot = NULL;
414 }
415 if (ip->i_gdquot) {
416 xfs_qm_dqrele(ip->i_gdquot);
417 ip->i_gdquot = NULL;
418 }
419 if (ip->i_pdquot) {
420 xfs_qm_dqrele(ip->i_pdquot);
421 ip->i_pdquot = NULL;
422 }
423}
424
425struct xfs_qm_isolate {
426 struct list_head buffers;
427 struct list_head dispose;
428};
429
430static enum lru_status
431xfs_qm_dquot_isolate(
432 struct list_head *item,
433 struct list_lru_one *lru,
434 spinlock_t *lru_lock,
435 void *arg)
436 __releases(lru_lock) __acquires(lru_lock)
437{
438 struct xfs_dquot *dqp = container_of(item,
439 struct xfs_dquot, q_lru);
440 struct xfs_qm_isolate *isol = arg;
441
442 if (!xfs_dqlock_nowait(dqp))
443 goto out_miss_busy;
444
445 /*
446 * This dquot has acquired a reference in the meantime remove it from
447 * the freelist and try again.
448 */
449 if (dqp->q_nrefs) {
450 xfs_dqunlock(dqp);
451 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
452
453 trace_xfs_dqreclaim_want(dqp);
454 list_lru_isolate(lru, &dqp->q_lru);
455 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
456 return LRU_REMOVED;
457 }
458
459 /*
460 * If the dquot is dirty, flush it. If it's already being flushed, just
461 * skip it so there is time for the IO to complete before we try to
462 * reclaim it again on the next LRU pass.
463 */
464 if (!xfs_dqflock_nowait(dqp)) {
465 xfs_dqunlock(dqp);
466 goto out_miss_busy;
467 }
468
469 if (XFS_DQ_IS_DIRTY(dqp)) {
470 struct xfs_buf *bp = NULL;
471 int error;
472
473 trace_xfs_dqreclaim_dirty(dqp);
474
475 /* we have to drop the LRU lock to flush the dquot */
476 spin_unlock(lru_lock);
477
478 error = xfs_qm_dqflush(dqp, &bp);
479 if (error) {
480 xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
481 __func__, dqp);
482 goto out_unlock_dirty;
483 }
484
485 xfs_buf_delwri_queue(bp, &isol->buffers);
486 xfs_buf_relse(bp);
487 goto out_unlock_dirty;
488 }
489 xfs_dqfunlock(dqp);
490
491 /*
492 * Prevent lookups now that we are past the point of no return.
493 */
494 dqp->dq_flags |= XFS_DQ_FREEING;
495 xfs_dqunlock(dqp);
496
497 ASSERT(dqp->q_nrefs == 0);
498 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
499 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
500 trace_xfs_dqreclaim_done(dqp);
501 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
502 return LRU_REMOVED;
503
504out_miss_busy:
505 trace_xfs_dqreclaim_busy(dqp);
506 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
507 return LRU_SKIP;
508
509out_unlock_dirty:
510 trace_xfs_dqreclaim_busy(dqp);
511 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
512 xfs_dqunlock(dqp);
513 spin_lock(lru_lock);
514 return LRU_RETRY;
515}
516
517static unsigned long
518xfs_qm_shrink_scan(
519 struct shrinker *shrink,
520 struct shrink_control *sc)
521{
522 struct xfs_quotainfo *qi = container_of(shrink,
523 struct xfs_quotainfo, qi_shrinker);
524 struct xfs_qm_isolate isol;
525 unsigned long freed;
526 int error;
527
528 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
529 return 0;
530
531 INIT_LIST_HEAD(&isol.buffers);
532 INIT_LIST_HEAD(&isol.dispose);
533
534 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
535 xfs_qm_dquot_isolate, &isol);
536
537 error = xfs_buf_delwri_submit(&isol.buffers);
538 if (error)
539 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
540
541 while (!list_empty(&isol.dispose)) {
542 struct xfs_dquot *dqp;
543
544 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
545 list_del_init(&dqp->q_lru);
546 xfs_qm_dqfree_one(dqp);
547 }
548
549 return freed;
550}
551
552static unsigned long
553xfs_qm_shrink_count(
554 struct shrinker *shrink,
555 struct shrink_control *sc)
556{
557 struct xfs_quotainfo *qi = container_of(shrink,
558 struct xfs_quotainfo, qi_shrinker);
559
560 return list_lru_shrink_count(&qi->qi_lru, sc);
561}
562
563STATIC void
564xfs_qm_set_defquota(
565 xfs_mount_t *mp,
566 uint type,
567 xfs_quotainfo_t *qinf)
568{
569 xfs_dquot_t *dqp;
570 struct xfs_def_quota *defq;
571 int error;
572
573 error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp);
574
575 if (!error) {
576 xfs_disk_dquot_t *ddqp = &dqp->q_core;
577
578 defq = xfs_get_defquota(dqp, qinf);
579
580 /*
581 * Timers and warnings have been already set, let's just set the
582 * default limits for this quota type
583 */
584 defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
585 defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
586 defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
587 defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
588 defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
589 defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
590 xfs_qm_dqdestroy(dqp);
591 }
592}
593
594/*
595 * This initializes all the quota information that's kept in the
596 * mount structure
597 */
598STATIC int
599xfs_qm_init_quotainfo(
600 xfs_mount_t *mp)
601{
602 xfs_quotainfo_t *qinf;
603 int error;
604 xfs_dquot_t *dqp;
605
606 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
607
608 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
609
610 error = list_lru_init(&qinf->qi_lru);
611 if (error)
612 goto out_free_qinf;
613
614 /*
615 * See if quotainodes are setup, and if not, allocate them,
616 * and change the superblock accordingly.
617 */
618 error = xfs_qm_init_quotainos(mp);
619 if (error)
620 goto out_free_lru;
621
622 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
623 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
624 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
625 mutex_init(&qinf->qi_tree_lock);
626
627 /* mutex used to serialize quotaoffs */
628 mutex_init(&qinf->qi_quotaofflock);
629
630 /* Precalc some constants */
631 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
632 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
633
634 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
635
636 /*
637 * We try to get the limits from the superuser's limits fields.
638 * This is quite hacky, but it is standard quota practice.
639 *
640 * Since we may not have done a quotacheck by this point, just read
641 * the dquot without attaching it to any hashtables or lists.
642 *
643 * Timers and warnings are globally set by the first timer found in
644 * user/group/proj quota types, otherwise a default value is used.
645 * This should be split into different fields per quota type.
646 */
647 error = xfs_qm_dqread(mp, 0,
648 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
649 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
650 XFS_DQ_PROJ),
651 XFS_QMOPT_DOWARN, &dqp);
652
653 if (!error) {
654 xfs_disk_dquot_t *ddqp = &dqp->q_core;
655
656 /*
657 * The warnings and timers set the grace period given to
658 * a user or group before he or she can not perform any
659 * more writing. If it is zero, a default is used.
660 */
661 qinf->qi_btimelimit = ddqp->d_btimer ?
662 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
663 qinf->qi_itimelimit = ddqp->d_itimer ?
664 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
665 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
666 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
667 qinf->qi_bwarnlimit = ddqp->d_bwarns ?
668 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
669 qinf->qi_iwarnlimit = ddqp->d_iwarns ?
670 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
671 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
672 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
673 xfs_qm_dqdestroy(dqp);
674 } else {
675 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
676 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
677 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
678 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
679 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
680 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
681 }
682
683 if (XFS_IS_UQUOTA_RUNNING(mp))
684 xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
685 if (XFS_IS_GQUOTA_RUNNING(mp))
686 xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
687 if (XFS_IS_PQUOTA_RUNNING(mp))
688 xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
689
690 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
691 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
692 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
693 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
694 register_shrinker(&qinf->qi_shrinker);
695 return 0;
696
697out_free_lru:
698 list_lru_destroy(&qinf->qi_lru);
699out_free_qinf:
700 kmem_free(qinf);
701 mp->m_quotainfo = NULL;
702 return error;
703}
704
705
706/*
707 * Gets called when unmounting a filesystem or when all quotas get
708 * turned off.
709 * This purges the quota inodes, destroys locks and frees itself.
710 */
711void
712xfs_qm_destroy_quotainfo(
713 xfs_mount_t *mp)
714{
715 xfs_quotainfo_t *qi;
716
717 qi = mp->m_quotainfo;
718 ASSERT(qi != NULL);
719
720 unregister_shrinker(&qi->qi_shrinker);
721 list_lru_destroy(&qi->qi_lru);
722
723 if (qi->qi_uquotaip) {
724 IRELE(qi->qi_uquotaip);
725 qi->qi_uquotaip = NULL; /* paranoia */
726 }
727 if (qi->qi_gquotaip) {
728 IRELE(qi->qi_gquotaip);
729 qi->qi_gquotaip = NULL;
730 }
731 if (qi->qi_pquotaip) {
732 IRELE(qi->qi_pquotaip);
733 qi->qi_pquotaip = NULL;
734 }
735 mutex_destroy(&qi->qi_quotaofflock);
736 kmem_free(qi);
737 mp->m_quotainfo = NULL;
738}
739
740/*
741 * Create an inode and return with a reference already taken, but unlocked
742 * This is how we create quota inodes
743 */
744STATIC int
745xfs_qm_qino_alloc(
746 xfs_mount_t *mp,
747 xfs_inode_t **ip,
748 uint flags)
749{
750 xfs_trans_t *tp;
751 int error;
752 int committed;
753 bool need_alloc = true;
754
755 *ip = NULL;
756 /*
757 * With superblock that doesn't have separate pquotino, we
758 * share an inode between gquota and pquota. If the on-disk
759 * superblock has GQUOTA and the filesystem is now mounted
760 * with PQUOTA, just use sb_gquotino for sb_pquotino and
761 * vice-versa.
762 */
763 if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
764 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
765 xfs_ino_t ino = NULLFSINO;
766
767 if ((flags & XFS_QMOPT_PQUOTA) &&
768 (mp->m_sb.sb_gquotino != NULLFSINO)) {
769 ino = mp->m_sb.sb_gquotino;
770 ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
771 } else if ((flags & XFS_QMOPT_GQUOTA) &&
772 (mp->m_sb.sb_pquotino != NULLFSINO)) {
773 ino = mp->m_sb.sb_pquotino;
774 ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
775 }
776 if (ino != NULLFSINO) {
777 error = xfs_iget(mp, NULL, ino, 0, 0, ip);
778 if (error)
779 return error;
780 mp->m_sb.sb_gquotino = NULLFSINO;
781 mp->m_sb.sb_pquotino = NULLFSINO;
782 need_alloc = false;
783 }
784 }
785
786 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
787 XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
788 if (error)
789 return error;
790
791 if (need_alloc) {
792 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
793 &committed);
794 if (error) {
795 xfs_trans_cancel(tp);
796 return error;
797 }
798 }
799
800 /*
801 * Make the changes in the superblock, and log those too.
802 * sbfields arg may contain fields other than *QUOTINO;
803 * VERSIONNUM for example.
804 */
805 spin_lock(&mp->m_sb_lock);
806 if (flags & XFS_QMOPT_SBVERSION) {
807 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
808
809 xfs_sb_version_addquota(&mp->m_sb);
810 mp->m_sb.sb_uquotino = NULLFSINO;
811 mp->m_sb.sb_gquotino = NULLFSINO;
812 mp->m_sb.sb_pquotino = NULLFSINO;
813
814 /* qflags will get updated fully _after_ quotacheck */
815 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
816 }
817 if (flags & XFS_QMOPT_UQUOTA)
818 mp->m_sb.sb_uquotino = (*ip)->i_ino;
819 else if (flags & XFS_QMOPT_GQUOTA)
820 mp->m_sb.sb_gquotino = (*ip)->i_ino;
821 else
822 mp->m_sb.sb_pquotino = (*ip)->i_ino;
823 spin_unlock(&mp->m_sb_lock);
824 xfs_log_sb(tp);
825
826 error = xfs_trans_commit(tp);
827 if (error) {
828 ASSERT(XFS_FORCED_SHUTDOWN(mp));
829 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
830 }
831 if (need_alloc)
832 xfs_finish_inode_setup(*ip);
833 return error;
834}
835
836
837STATIC void
838xfs_qm_reset_dqcounts(
839 xfs_mount_t *mp,
840 xfs_buf_t *bp,
841 xfs_dqid_t id,
842 uint type)
843{
844 struct xfs_dqblk *dqb;
845 int j;
846
847 trace_xfs_reset_dqcounts(bp, _RET_IP_);
848
849 /*
850 * Reset all counters and timers. They'll be
851 * started afresh by xfs_qm_quotacheck.
852 */
853#ifdef DEBUG
854 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
855 do_div(j, sizeof(xfs_dqblk_t));
856 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
857#endif
858 dqb = bp->b_addr;
859 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
860 struct xfs_disk_dquot *ddq;
861
862 ddq = (struct xfs_disk_dquot *)&dqb[j];
863
864 /*
865 * Do a sanity check, and if needed, repair the dqblk. Don't
866 * output any warnings because it's perfectly possible to
867 * find uninitialised dquot blks. See comment in xfs_dqcheck.
868 */
869 xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
870 "xfs_quotacheck");
871 /*
872 * Reset type in case we are reusing group quota file for
873 * project quotas or vice versa
874 */
875 ddq->d_flags = type;
876 ddq->d_bcount = 0;
877 ddq->d_icount = 0;
878 ddq->d_rtbcount = 0;
879 ddq->d_btimer = 0;
880 ddq->d_itimer = 0;
881 ddq->d_rtbtimer = 0;
882 ddq->d_bwarns = 0;
883 ddq->d_iwarns = 0;
884 ddq->d_rtbwarns = 0;
885
886 if (xfs_sb_version_hascrc(&mp->m_sb)) {
887 xfs_update_cksum((char *)&dqb[j],
888 sizeof(struct xfs_dqblk),
889 XFS_DQUOT_CRC_OFF);
890 }
891 }
892}
893
894STATIC int
895xfs_qm_dqiter_bufs(
896 struct xfs_mount *mp,
897 xfs_dqid_t firstid,
898 xfs_fsblock_t bno,
899 xfs_filblks_t blkcnt,
900 uint flags,
901 struct list_head *buffer_list)
902{
903 struct xfs_buf *bp;
904 int error;
905 int type;
906
907 ASSERT(blkcnt > 0);
908 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
909 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
910 error = 0;
911
912 /*
913 * Blkcnt arg can be a very big number, and might even be
914 * larger than the log itself. So, we have to break it up into
915 * manageable-sized transactions.
916 * Note that we don't start a permanent transaction here; we might
917 * not be able to get a log reservation for the whole thing up front,
918 * and we don't really care to either, because we just discard
919 * everything if we were to crash in the middle of this loop.
920 */
921 while (blkcnt--) {
922 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
923 XFS_FSB_TO_DADDR(mp, bno),
924 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
925 &xfs_dquot_buf_ops);
926
927 /*
928 * CRC and validation errors will return a EFSCORRUPTED here. If
929 * this occurs, re-read without CRC validation so that we can
930 * repair the damage via xfs_qm_reset_dqcounts(). This process
931 * will leave a trace in the log indicating corruption has
932 * been detected.
933 */
934 if (error == -EFSCORRUPTED) {
935 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
936 XFS_FSB_TO_DADDR(mp, bno),
937 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
938 NULL);
939 }
940
941 if (error)
942 break;
943
944 /*
945 * A corrupt buffer might not have a verifier attached, so
946 * make sure we have the correct one attached before writeback
947 * occurs.
948 */
949 bp->b_ops = &xfs_dquot_buf_ops;
950 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
951 xfs_buf_delwri_queue(bp, buffer_list);
952 xfs_buf_relse(bp);
953
954 /* goto the next block. */
955 bno++;
956 firstid += mp->m_quotainfo->qi_dqperchunk;
957 }
958
959 return error;
960}
961
962/*
963 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
964 * caller supplied function for every chunk of dquots that we find.
965 */
966STATIC int
967xfs_qm_dqiterate(
968 struct xfs_mount *mp,
969 struct xfs_inode *qip,
970 uint flags,
971 struct list_head *buffer_list)
972{
973 struct xfs_bmbt_irec *map;
974 int i, nmaps; /* number of map entries */
975 int error; /* return value */
976 xfs_fileoff_t lblkno;
977 xfs_filblks_t maxlblkcnt;
978 xfs_dqid_t firstid;
979 xfs_fsblock_t rablkno;
980 xfs_filblks_t rablkcnt;
981
982 error = 0;
983 /*
984 * This looks racy, but we can't keep an inode lock across a
985 * trans_reserve. But, this gets called during quotacheck, and that
986 * happens only at mount time which is single threaded.
987 */
988 if (qip->i_d.di_nblocks == 0)
989 return 0;
990
991 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
992
993 lblkno = 0;
994 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
995 do {
996 uint lock_mode;
997
998 nmaps = XFS_DQITER_MAP_SIZE;
999 /*
1000 * We aren't changing the inode itself. Just changing
1001 * some of its data. No new blocks are added here, and
1002 * the inode is never added to the transaction.
1003 */
1004 lock_mode = xfs_ilock_data_map_shared(qip);
1005 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1006 map, &nmaps, 0);
1007 xfs_iunlock(qip, lock_mode);
1008 if (error)
1009 break;
1010
1011 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1012 for (i = 0; i < nmaps; i++) {
1013 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1014 ASSERT(map[i].br_blockcount);
1015
1016
1017 lblkno += map[i].br_blockcount;
1018
1019 if (map[i].br_startblock == HOLESTARTBLOCK)
1020 continue;
1021
1022 firstid = (xfs_dqid_t) map[i].br_startoff *
1023 mp->m_quotainfo->qi_dqperchunk;
1024 /*
1025 * Do a read-ahead on the next extent.
1026 */
1027 if ((i+1 < nmaps) &&
1028 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1029 rablkcnt = map[i+1].br_blockcount;
1030 rablkno = map[i+1].br_startblock;
1031 while (rablkcnt--) {
1032 xfs_buf_readahead(mp->m_ddev_targp,
1033 XFS_FSB_TO_DADDR(mp, rablkno),
1034 mp->m_quotainfo->qi_dqchunklen,
1035 &xfs_dquot_buf_ops);
1036 rablkno++;
1037 }
1038 }
1039 /*
1040 * Iterate thru all the blks in the extent and
1041 * reset the counters of all the dquots inside them.
1042 */
1043 error = xfs_qm_dqiter_bufs(mp, firstid,
1044 map[i].br_startblock,
1045 map[i].br_blockcount,
1046 flags, buffer_list);
1047 if (error)
1048 goto out;
1049 }
1050 } while (nmaps > 0);
1051
1052out:
1053 kmem_free(map);
1054 return error;
1055}
1056
1057/*
1058 * Called by dqusage_adjust in doing a quotacheck.
1059 *
1060 * Given the inode, and a dquot id this updates both the incore dqout as well
1061 * as the buffer copy. This is so that once the quotacheck is done, we can
1062 * just log all the buffers, as opposed to logging numerous updates to
1063 * individual dquots.
1064 */
1065STATIC int
1066xfs_qm_quotacheck_dqadjust(
1067 struct xfs_inode *ip,
1068 xfs_dqid_t id,
1069 uint type,
1070 xfs_qcnt_t nblks,
1071 xfs_qcnt_t rtblks)
1072{
1073 struct xfs_mount *mp = ip->i_mount;
1074 struct xfs_dquot *dqp;
1075 int error;
1076
1077 error = xfs_qm_dqget(mp, ip, id, type,
1078 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1079 if (error) {
1080 /*
1081 * Shouldn't be able to turn off quotas here.
1082 */
1083 ASSERT(error != -ESRCH);
1084 ASSERT(error != -ENOENT);
1085 return error;
1086 }
1087
1088 trace_xfs_dqadjust(dqp);
1089
1090 /*
1091 * Adjust the inode count and the block count to reflect this inode's
1092 * resource usage.
1093 */
1094 be64_add_cpu(&dqp->q_core.d_icount, 1);
1095 dqp->q_res_icount++;
1096 if (nblks) {
1097 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1098 dqp->q_res_bcount += nblks;
1099 }
1100 if (rtblks) {
1101 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1102 dqp->q_res_rtbcount += rtblks;
1103 }
1104
1105 /*
1106 * Set default limits, adjust timers (since we changed usages)
1107 *
1108 * There are no timers for the default values set in the root dquot.
1109 */
1110 if (dqp->q_core.d_id) {
1111 xfs_qm_adjust_dqlimits(mp, dqp);
1112 xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1113 }
1114
1115 dqp->dq_flags |= XFS_DQ_DIRTY;
1116 xfs_qm_dqput(dqp);
1117 return 0;
1118}
1119
1120STATIC int
1121xfs_qm_get_rtblks(
1122 xfs_inode_t *ip,
1123 xfs_qcnt_t *O_rtblks)
1124{
1125 xfs_filblks_t rtblks; /* total rt blks */
1126 xfs_extnum_t idx; /* extent record index */
1127 xfs_ifork_t *ifp; /* inode fork pointer */
1128 xfs_extnum_t nextents; /* number of extent entries */
1129 int error;
1130
1131 ASSERT(XFS_IS_REALTIME_INODE(ip));
1132 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1133 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1134 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1135 return error;
1136 }
1137 rtblks = 0;
1138 nextents = xfs_iext_count(ifp);
1139 for (idx = 0; idx < nextents; idx++)
1140 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1141 *O_rtblks = (xfs_qcnt_t)rtblks;
1142 return 0;
1143}
1144
1145/*
1146 * callback routine supplied to bulkstat(). Given an inumber, find its
1147 * dquots and update them to account for resources taken by that inode.
1148 */
1149/* ARGSUSED */
1150STATIC int
1151xfs_qm_dqusage_adjust(
1152 xfs_mount_t *mp, /* mount point for filesystem */
1153 xfs_ino_t ino, /* inode number to get data for */
1154 void __user *buffer, /* not used */
1155 int ubsize, /* not used */
1156 int *ubused, /* not used */
1157 int *res) /* result code value */
1158{
1159 xfs_inode_t *ip;
1160 xfs_qcnt_t nblks, rtblks = 0;
1161 int error;
1162
1163 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1164
1165 /*
1166 * rootino must have its resources accounted for, not so with the quota
1167 * inodes.
1168 */
1169 if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1170 *res = BULKSTAT_RV_NOTHING;
1171 return -EINVAL;
1172 }
1173
1174 /*
1175 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1176 * interface expects the inode to be exclusively locked because that's
1177 * the case in all other instances. It's OK that we do this because
1178 * quotacheck is done only at mount time.
1179 */
1180 error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
1181 &ip);
1182 if (error) {
1183 *res = BULKSTAT_RV_NOTHING;
1184 return error;
1185 }
1186
1187 ASSERT(ip->i_delayed_blks == 0);
1188
1189 if (XFS_IS_REALTIME_INODE(ip)) {
1190 /*
1191 * Walk thru the extent list and count the realtime blocks.
1192 */
1193 error = xfs_qm_get_rtblks(ip, &rtblks);
1194 if (error)
1195 goto error0;
1196 }
1197
1198 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1199
1200 /*
1201 * Add the (disk blocks and inode) resources occupied by this
1202 * inode to its dquots. We do this adjustment in the incore dquot,
1203 * and also copy the changes to its buffer.
1204 * We don't care about putting these changes in a transaction
1205 * envelope because if we crash in the middle of a 'quotacheck'
1206 * we have to start from the beginning anyway.
1207 * Once we're done, we'll log all the dquot bufs.
1208 *
1209 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1210 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1211 */
1212 if (XFS_IS_UQUOTA_ON(mp)) {
1213 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1214 XFS_DQ_USER, nblks, rtblks);
1215 if (error)
1216 goto error0;
1217 }
1218
1219 if (XFS_IS_GQUOTA_ON(mp)) {
1220 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1221 XFS_DQ_GROUP, nblks, rtblks);
1222 if (error)
1223 goto error0;
1224 }
1225
1226 if (XFS_IS_PQUOTA_ON(mp)) {
1227 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1228 XFS_DQ_PROJ, nblks, rtblks);
1229 if (error)
1230 goto error0;
1231 }
1232
1233 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1234 IRELE(ip);
1235 *res = BULKSTAT_RV_DIDONE;
1236 return 0;
1237
1238error0:
1239 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1240 IRELE(ip);
1241 *res = BULKSTAT_RV_GIVEUP;
1242 return error;
1243}
1244
1245STATIC int
1246xfs_qm_flush_one(
1247 struct xfs_dquot *dqp,
1248 void *data)
1249{
1250 struct list_head *buffer_list = data;
1251 struct xfs_buf *bp = NULL;
1252 int error = 0;
1253
1254 xfs_dqlock(dqp);
1255 if (dqp->dq_flags & XFS_DQ_FREEING)
1256 goto out_unlock;
1257 if (!XFS_DQ_IS_DIRTY(dqp))
1258 goto out_unlock;
1259
1260 xfs_dqflock(dqp);
1261 error = xfs_qm_dqflush(dqp, &bp);
1262 if (error)
1263 goto out_unlock;
1264
1265 xfs_buf_delwri_queue(bp, buffer_list);
1266 xfs_buf_relse(bp);
1267out_unlock:
1268 xfs_dqunlock(dqp);
1269 return error;
1270}
1271
1272/*
1273 * Walk thru all the filesystem inodes and construct a consistent view
1274 * of the disk quota world. If the quotacheck fails, disable quotas.
1275 */
1276STATIC int
1277xfs_qm_quotacheck(
1278 xfs_mount_t *mp)
1279{
1280 int done, count, error, error2;
1281 xfs_ino_t lastino;
1282 size_t structsz;
1283 uint flags;
1284 LIST_HEAD (buffer_list);
1285 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1286 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1287 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1288
1289 count = INT_MAX;
1290 structsz = 1;
1291 lastino = 0;
1292 flags = 0;
1293
1294 ASSERT(uip || gip || pip);
1295 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1296
1297 xfs_notice(mp, "Quotacheck needed: Please wait.");
1298
1299 /*
1300 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1301 * their counters to zero. We need a clean slate.
1302 * We don't log our changes till later.
1303 */
1304 if (uip) {
1305 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1306 &buffer_list);
1307 if (error)
1308 goto error_return;
1309 flags |= XFS_UQUOTA_CHKD;
1310 }
1311
1312 if (gip) {
1313 error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
1314 &buffer_list);
1315 if (error)
1316 goto error_return;
1317 flags |= XFS_GQUOTA_CHKD;
1318 }
1319
1320 if (pip) {
1321 error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
1322 &buffer_list);
1323 if (error)
1324 goto error_return;
1325 flags |= XFS_PQUOTA_CHKD;
1326 }
1327
1328 do {
1329 /*
1330 * Iterate thru all the inodes in the file system,
1331 * adjusting the corresponding dquot counters in core.
1332 */
1333 error = xfs_bulkstat(mp, &lastino, &count,
1334 xfs_qm_dqusage_adjust,
1335 structsz, NULL, &done);
1336 if (error)
1337 break;
1338
1339 } while (!done);
1340
1341 /*
1342 * We've made all the changes that we need to make incore. Flush them
1343 * down to disk buffers if everything was updated successfully.
1344 */
1345 if (XFS_IS_UQUOTA_ON(mp)) {
1346 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1347 &buffer_list);
1348 }
1349 if (XFS_IS_GQUOTA_ON(mp)) {
1350 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1351 &buffer_list);
1352 if (!error)
1353 error = error2;
1354 }
1355 if (XFS_IS_PQUOTA_ON(mp)) {
1356 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1357 &buffer_list);
1358 if (!error)
1359 error = error2;
1360 }
1361
1362 error2 = xfs_buf_delwri_submit(&buffer_list);
1363 if (!error)
1364 error = error2;
1365
1366 /*
1367 * We can get this error if we couldn't do a dquot allocation inside
1368 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1369 * dirty dquots that might be cached, we just want to get rid of them
1370 * and turn quotaoff. The dquots won't be attached to any of the inodes
1371 * at this point (because we intentionally didn't in dqget_noattach).
1372 */
1373 if (error) {
1374 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1375 goto error_return;
1376 }
1377
1378 /*
1379 * If one type of quotas is off, then it will lose its
1380 * quotachecked status, since we won't be doing accounting for
1381 * that type anymore.
1382 */
1383 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1384 mp->m_qflags |= flags;
1385
1386 error_return:
1387 while (!list_empty(&buffer_list)) {
1388 struct xfs_buf *bp =
1389 list_first_entry(&buffer_list, struct xfs_buf, b_list);
1390 list_del_init(&bp->b_list);
1391 xfs_buf_relse(bp);
1392 }
1393
1394 if (error) {
1395 xfs_warn(mp,
1396 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1397 error);
1398 /*
1399 * We must turn off quotas.
1400 */
1401 ASSERT(mp->m_quotainfo != NULL);
1402 xfs_qm_destroy_quotainfo(mp);
1403 if (xfs_mount_reset_sbqflags(mp)) {
1404 xfs_warn(mp,
1405 "Quotacheck: Failed to reset quota flags.");
1406 }
1407 } else
1408 xfs_notice(mp, "Quotacheck: Done.");
1409 return error;
1410}
1411
1412/*
1413 * This is called from xfs_mountfs to start quotas and initialize all
1414 * necessary data structures like quotainfo. This is also responsible for
1415 * running a quotacheck as necessary. We are guaranteed that the superblock
1416 * is consistently read in at this point.
1417 *
1418 * If we fail here, the mount will continue with quota turned off. We don't
1419 * need to inidicate success or failure at all.
1420 */
1421void
1422xfs_qm_mount_quotas(
1423 struct xfs_mount *mp)
1424{
1425 int error = 0;
1426 uint sbf;
1427
1428 /*
1429 * If quotas on realtime volumes is not supported, we disable
1430 * quotas immediately.
1431 */
1432 if (mp->m_sb.sb_rextents) {
1433 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1434 mp->m_qflags = 0;
1435 goto write_changes;
1436 }
1437
1438 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1439
1440 /*
1441 * Allocate the quotainfo structure inside the mount struct, and
1442 * create quotainode(s), and change/rev superblock if necessary.
1443 */
1444 error = xfs_qm_init_quotainfo(mp);
1445 if (error) {
1446 /*
1447 * We must turn off quotas.
1448 */
1449 ASSERT(mp->m_quotainfo == NULL);
1450 mp->m_qflags = 0;
1451 goto write_changes;
1452 }
1453 /*
1454 * If any of the quotas are not consistent, do a quotacheck.
1455 */
1456 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1457 error = xfs_qm_quotacheck(mp);
1458 if (error) {
1459 /* Quotacheck failed and disabled quotas. */
1460 return;
1461 }
1462 }
1463 /*
1464 * If one type of quotas is off, then it will lose its
1465 * quotachecked status, since we won't be doing accounting for
1466 * that type anymore.
1467 */
1468 if (!XFS_IS_UQUOTA_ON(mp))
1469 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1470 if (!XFS_IS_GQUOTA_ON(mp))
1471 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1472 if (!XFS_IS_PQUOTA_ON(mp))
1473 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1474
1475 write_changes:
1476 /*
1477 * We actually don't have to acquire the m_sb_lock at all.
1478 * This can only be called from mount, and that's single threaded. XXX
1479 */
1480 spin_lock(&mp->m_sb_lock);
1481 sbf = mp->m_sb.sb_qflags;
1482 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1483 spin_unlock(&mp->m_sb_lock);
1484
1485 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1486 if (xfs_sync_sb(mp, false)) {
1487 /*
1488 * We could only have been turning quotas off.
1489 * We aren't in very good shape actually because
1490 * the incore structures are convinced that quotas are
1491 * off, but the on disk superblock doesn't know that !
1492 */
1493 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1494 xfs_alert(mp, "%s: Superblock update failed!",
1495 __func__);
1496 }
1497 }
1498
1499 if (error) {
1500 xfs_warn(mp, "Failed to initialize disk quotas.");
1501 return;
1502 }
1503}
1504
1505/*
1506 * This is called after the superblock has been read in and we're ready to
1507 * iget the quota inodes.
1508 */
1509STATIC int
1510xfs_qm_init_quotainos(
1511 xfs_mount_t *mp)
1512{
1513 struct xfs_inode *uip = NULL;
1514 struct xfs_inode *gip = NULL;
1515 struct xfs_inode *pip = NULL;
1516 int error;
1517 uint flags = 0;
1518
1519 ASSERT(mp->m_quotainfo);
1520
1521 /*
1522 * Get the uquota and gquota inodes
1523 */
1524 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1525 if (XFS_IS_UQUOTA_ON(mp) &&
1526 mp->m_sb.sb_uquotino != NULLFSINO) {
1527 ASSERT(mp->m_sb.sb_uquotino > 0);
1528 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1529 0, 0, &uip);
1530 if (error)
1531 return error;
1532 }
1533 if (XFS_IS_GQUOTA_ON(mp) &&
1534 mp->m_sb.sb_gquotino != NULLFSINO) {
1535 ASSERT(mp->m_sb.sb_gquotino > 0);
1536 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1537 0, 0, &gip);
1538 if (error)
1539 goto error_rele;
1540 }
1541 if (XFS_IS_PQUOTA_ON(mp) &&
1542 mp->m_sb.sb_pquotino != NULLFSINO) {
1543 ASSERT(mp->m_sb.sb_pquotino > 0);
1544 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1545 0, 0, &pip);
1546 if (error)
1547 goto error_rele;
1548 }
1549 } else {
1550 flags |= XFS_QMOPT_SBVERSION;
1551 }
1552
1553 /*
1554 * Create the three inodes, if they don't exist already. The changes
1555 * made above will get added to a transaction and logged in one of
1556 * the qino_alloc calls below. If the device is readonly,
1557 * temporarily switch to read-write to do this.
1558 */
1559 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1560 error = xfs_qm_qino_alloc(mp, &uip,
1561 flags | XFS_QMOPT_UQUOTA);
1562 if (error)
1563 goto error_rele;
1564
1565 flags &= ~XFS_QMOPT_SBVERSION;
1566 }
1567 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1568 error = xfs_qm_qino_alloc(mp, &gip,
1569 flags | XFS_QMOPT_GQUOTA);
1570 if (error)
1571 goto error_rele;
1572
1573 flags &= ~XFS_QMOPT_SBVERSION;
1574 }
1575 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1576 error = xfs_qm_qino_alloc(mp, &pip,
1577 flags | XFS_QMOPT_PQUOTA);
1578 if (error)
1579 goto error_rele;
1580 }
1581
1582 mp->m_quotainfo->qi_uquotaip = uip;
1583 mp->m_quotainfo->qi_gquotaip = gip;
1584 mp->m_quotainfo->qi_pquotaip = pip;
1585
1586 return 0;
1587
1588error_rele:
1589 if (uip)
1590 IRELE(uip);
1591 if (gip)
1592 IRELE(gip);
1593 if (pip)
1594 IRELE(pip);
1595 return error;
1596}
1597
1598STATIC void
1599xfs_qm_dqfree_one(
1600 struct xfs_dquot *dqp)
1601{
1602 struct xfs_mount *mp = dqp->q_mount;
1603 struct xfs_quotainfo *qi = mp->m_quotainfo;
1604
1605 mutex_lock(&qi->qi_tree_lock);
1606 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1607 be32_to_cpu(dqp->q_core.d_id));
1608
1609 qi->qi_dquots--;
1610 mutex_unlock(&qi->qi_tree_lock);
1611
1612 xfs_qm_dqdestroy(dqp);
1613}
1614
1615/* --------------- utility functions for vnodeops ---------------- */
1616
1617
1618/*
1619 * Given an inode, a uid, gid and prid make sure that we have
1620 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1621 * quotas by creating this file.
1622 * This also attaches dquot(s) to the given inode after locking it,
1623 * and returns the dquots corresponding to the uid and/or gid.
1624 *
1625 * in : inode (unlocked)
1626 * out : udquot, gdquot with references taken and unlocked
1627 */
1628int
1629xfs_qm_vop_dqalloc(
1630 struct xfs_inode *ip,
1631 xfs_dqid_t uid,
1632 xfs_dqid_t gid,
1633 prid_t prid,
1634 uint flags,
1635 struct xfs_dquot **O_udqpp,
1636 struct xfs_dquot **O_gdqpp,
1637 struct xfs_dquot **O_pdqpp)
1638{
1639 struct xfs_mount *mp = ip->i_mount;
1640 struct xfs_dquot *uq = NULL;
1641 struct xfs_dquot *gq = NULL;
1642 struct xfs_dquot *pq = NULL;
1643 int error;
1644 uint lockflags;
1645
1646 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1647 return 0;
1648
1649 lockflags = XFS_ILOCK_EXCL;
1650 xfs_ilock(ip, lockflags);
1651
1652 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1653 gid = ip->i_d.di_gid;
1654
1655 /*
1656 * Attach the dquot(s) to this inode, doing a dquot allocation
1657 * if necessary. The dquot(s) will not be locked.
1658 */
1659 if (XFS_NOT_DQATTACHED(mp, ip)) {
1660 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1661 if (error) {
1662 xfs_iunlock(ip, lockflags);
1663 return error;
1664 }
1665 }
1666
1667 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1668 if (ip->i_d.di_uid != uid) {
1669 /*
1670 * What we need is the dquot that has this uid, and
1671 * if we send the inode to dqget, the uid of the inode
1672 * takes priority over what's sent in the uid argument.
1673 * We must unlock inode here before calling dqget if
1674 * we're not sending the inode, because otherwise
1675 * we'll deadlock by doing trans_reserve while
1676 * holding ilock.
1677 */
1678 xfs_iunlock(ip, lockflags);
1679 error = xfs_qm_dqget(mp, NULL, uid,
1680 XFS_DQ_USER,
1681 XFS_QMOPT_DQALLOC |
1682 XFS_QMOPT_DOWARN,
1683 &uq);
1684 if (error) {
1685 ASSERT(error != -ENOENT);
1686 return error;
1687 }
1688 /*
1689 * Get the ilock in the right order.
1690 */
1691 xfs_dqunlock(uq);
1692 lockflags = XFS_ILOCK_SHARED;
1693 xfs_ilock(ip, lockflags);
1694 } else {
1695 /*
1696 * Take an extra reference, because we'll return
1697 * this to caller
1698 */
1699 ASSERT(ip->i_udquot);
1700 uq = xfs_qm_dqhold(ip->i_udquot);
1701 }
1702 }
1703 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1704 if (ip->i_d.di_gid != gid) {
1705 xfs_iunlock(ip, lockflags);
1706 error = xfs_qm_dqget(mp, NULL, gid,
1707 XFS_DQ_GROUP,
1708 XFS_QMOPT_DQALLOC |
1709 XFS_QMOPT_DOWARN,
1710 &gq);
1711 if (error) {
1712 ASSERT(error != -ENOENT);
1713 goto error_rele;
1714 }
1715 xfs_dqunlock(gq);
1716 lockflags = XFS_ILOCK_SHARED;
1717 xfs_ilock(ip, lockflags);
1718 } else {
1719 ASSERT(ip->i_gdquot);
1720 gq = xfs_qm_dqhold(ip->i_gdquot);
1721 }
1722 }
1723 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1724 if (xfs_get_projid(ip) != prid) {
1725 xfs_iunlock(ip, lockflags);
1726 error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1727 XFS_DQ_PROJ,
1728 XFS_QMOPT_DQALLOC |
1729 XFS_QMOPT_DOWARN,
1730 &pq);
1731 if (error) {
1732 ASSERT(error != -ENOENT);
1733 goto error_rele;
1734 }
1735 xfs_dqunlock(pq);
1736 lockflags = XFS_ILOCK_SHARED;
1737 xfs_ilock(ip, lockflags);
1738 } else {
1739 ASSERT(ip->i_pdquot);
1740 pq = xfs_qm_dqhold(ip->i_pdquot);
1741 }
1742 }
1743 if (uq)
1744 trace_xfs_dquot_dqalloc(ip);
1745
1746 xfs_iunlock(ip, lockflags);
1747 if (O_udqpp)
1748 *O_udqpp = uq;
1749 else
1750 xfs_qm_dqrele(uq);
1751 if (O_gdqpp)
1752 *O_gdqpp = gq;
1753 else
1754 xfs_qm_dqrele(gq);
1755 if (O_pdqpp)
1756 *O_pdqpp = pq;
1757 else
1758 xfs_qm_dqrele(pq);
1759 return 0;
1760
1761error_rele:
1762 xfs_qm_dqrele(gq);
1763 xfs_qm_dqrele(uq);
1764 return error;
1765}
1766
1767/*
1768 * Actually transfer ownership, and do dquot modifications.
1769 * These were already reserved.
1770 */
1771xfs_dquot_t *
1772xfs_qm_vop_chown(
1773 xfs_trans_t *tp,
1774 xfs_inode_t *ip,
1775 xfs_dquot_t **IO_olddq,
1776 xfs_dquot_t *newdq)
1777{
1778 xfs_dquot_t *prevdq;
1779 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1780 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1781
1782
1783 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1784 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1785
1786 /* old dquot */
1787 prevdq = *IO_olddq;
1788 ASSERT(prevdq);
1789 ASSERT(prevdq != newdq);
1790
1791 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1792 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1793
1794 /* the sparkling new dquot */
1795 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1796 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1797
1798 /*
1799 * Take an extra reference, because the inode is going to keep
1800 * this dquot pointer even after the trans_commit.
1801 */
1802 *IO_olddq = xfs_qm_dqhold(newdq);
1803
1804 return prevdq;
1805}
1806
1807/*
1808 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1809 */
1810int
1811xfs_qm_vop_chown_reserve(
1812 struct xfs_trans *tp,
1813 struct xfs_inode *ip,
1814 struct xfs_dquot *udqp,
1815 struct xfs_dquot *gdqp,
1816 struct xfs_dquot *pdqp,
1817 uint flags)
1818{
1819 struct xfs_mount *mp = ip->i_mount;
1820 uint delblks, blkflags, prjflags = 0;
1821 struct xfs_dquot *udq_unres = NULL;
1822 struct xfs_dquot *gdq_unres = NULL;
1823 struct xfs_dquot *pdq_unres = NULL;
1824 struct xfs_dquot *udq_delblks = NULL;
1825 struct xfs_dquot *gdq_delblks = NULL;
1826 struct xfs_dquot *pdq_delblks = NULL;
1827 int error;
1828
1829
1830 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1831 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1832
1833 delblks = ip->i_delayed_blks;
1834 blkflags = XFS_IS_REALTIME_INODE(ip) ?
1835 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1836
1837 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1838 ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
1839 udq_delblks = udqp;
1840 /*
1841 * If there are delayed allocation blocks, then we have to
1842 * unreserve those from the old dquot, and add them to the
1843 * new dquot.
1844 */
1845 if (delblks) {
1846 ASSERT(ip->i_udquot);
1847 udq_unres = ip->i_udquot;
1848 }
1849 }
1850 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1851 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
1852 gdq_delblks = gdqp;
1853 if (delblks) {
1854 ASSERT(ip->i_gdquot);
1855 gdq_unres = ip->i_gdquot;
1856 }
1857 }
1858
1859 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1860 xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
1861 prjflags = XFS_QMOPT_ENOSPC;
1862 pdq_delblks = pdqp;
1863 if (delblks) {
1864 ASSERT(ip->i_pdquot);
1865 pdq_unres = ip->i_pdquot;
1866 }
1867 }
1868
1869 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1870 udq_delblks, gdq_delblks, pdq_delblks,
1871 ip->i_d.di_nblocks, 1,
1872 flags | blkflags | prjflags);
1873 if (error)
1874 return error;
1875
1876 /*
1877 * Do the delayed blks reservations/unreservations now. Since, these
1878 * are done without the help of a transaction, if a reservation fails
1879 * its previous reservations won't be automatically undone by trans
1880 * code. So, we have to do it manually here.
1881 */
1882 if (delblks) {
1883 /*
1884 * Do the reservations first. Unreservation can't fail.
1885 */
1886 ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1887 ASSERT(udq_unres || gdq_unres || pdq_unres);
1888 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1889 udq_delblks, gdq_delblks, pdq_delblks,
1890 (xfs_qcnt_t)delblks, 0,
1891 flags | blkflags | prjflags);
1892 if (error)
1893 return error;
1894 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1895 udq_unres, gdq_unres, pdq_unres,
1896 -((xfs_qcnt_t)delblks), 0, blkflags);
1897 }
1898
1899 return 0;
1900}
1901
1902int
1903xfs_qm_vop_rename_dqattach(
1904 struct xfs_inode **i_tab)
1905{
1906 struct xfs_mount *mp = i_tab[0]->i_mount;
1907 int i;
1908
1909 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1910 return 0;
1911
1912 for (i = 0; (i < 4 && i_tab[i]); i++) {
1913 struct xfs_inode *ip = i_tab[i];
1914 int error;
1915
1916 /*
1917 * Watch out for duplicate entries in the table.
1918 */
1919 if (i == 0 || ip != i_tab[i-1]) {
1920 if (XFS_NOT_DQATTACHED(mp, ip)) {
1921 error = xfs_qm_dqattach(ip, 0);
1922 if (error)
1923 return error;
1924 }
1925 }
1926 }
1927 return 0;
1928}
1929
1930void
1931xfs_qm_vop_create_dqattach(
1932 struct xfs_trans *tp,
1933 struct xfs_inode *ip,
1934 struct xfs_dquot *udqp,
1935 struct xfs_dquot *gdqp,
1936 struct xfs_dquot *pdqp)
1937{
1938 struct xfs_mount *mp = tp->t_mountp;
1939
1940 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1941 return;
1942
1943 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1944 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1945
1946 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1947 ASSERT(ip->i_udquot == NULL);
1948 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
1949
1950 ip->i_udquot = xfs_qm_dqhold(udqp);
1951 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1952 }
1953 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1954 ASSERT(ip->i_gdquot == NULL);
1955 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
1956 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1957 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1958 }
1959 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1960 ASSERT(ip->i_pdquot == NULL);
1961 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
1962
1963 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1964 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1965 }
1966}
1967