Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_btree.h"
15#include "xfs_ialloc.h"
16#include "xfs_ialloc_btree.h"
17#include "xfs_iwalk.h"
18#include "xfs_itable.h"
19#include "xfs_error.h"
20#include "xfs_icache.h"
21#include "xfs_health.h"
22#include "xfs_trans.h"
23
24/*
25 * Bulk Stat
26 * =========
27 *
28 * Use the inode walking functions to fill out struct xfs_bulkstat for every
29 * allocated inode, then pass the stat information to some externally provided
30 * iteration function.
31 */
32
33struct xfs_bstat_chunk {
34 bulkstat_one_fmt_pf formatter;
35 struct xfs_ibulk *breq;
36 struct xfs_bulkstat *buf;
37};
38
39/*
40 * Fill out the bulkstat info for a single inode and report it somewhere.
41 *
42 * bc->breq->lastino is effectively the inode cursor as we walk through the
43 * filesystem. Therefore, we update it any time we need to move the cursor
44 * forward, regardless of whether or not we're sending any bstat information
45 * back to userspace. If the inode is internal metadata or, has been freed
46 * out from under us, we just simply keep going.
47 *
48 * However, if any other type of error happens we want to stop right where we
49 * are so that userspace will call back with exact number of the bad inode and
50 * we can send back an error code.
51 *
52 * Note that if the formatter tells us there's no space left in the buffer we
53 * move the cursor forward and abort the walk.
54 */
55STATIC int
56xfs_bulkstat_one_int(
57 struct xfs_mount *mp,
58 struct user_namespace *mnt_userns,
59 struct xfs_trans *tp,
60 xfs_ino_t ino,
61 struct xfs_bstat_chunk *bc)
62{
63 struct user_namespace *sb_userns = mp->m_super->s_user_ns;
64 struct xfs_inode *ip; /* incore inode pointer */
65 struct inode *inode;
66 struct xfs_bulkstat *buf = bc->buf;
67 xfs_extnum_t nextents;
68 int error = -EINVAL;
69 vfsuid_t vfsuid;
70 vfsgid_t vfsgid;
71
72 if (xfs_internal_inum(mp, ino))
73 goto out_advance;
74
75 error = xfs_iget(mp, tp, ino,
76 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
77 XFS_ILOCK_SHARED, &ip);
78 if (error == -ENOENT || error == -EINVAL)
79 goto out_advance;
80 if (error)
81 goto out;
82
83 ASSERT(ip != NULL);
84 ASSERT(ip->i_imap.im_blkno != 0);
85 inode = VFS_I(ip);
86 vfsuid = i_uid_into_vfsuid(mnt_userns, inode);
87 vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
88
89 /* xfs_iget returns the following without needing
90 * further change.
91 */
92 buf->bs_projectid = ip->i_projid;
93 buf->bs_ino = ino;
94 buf->bs_uid = from_kuid(sb_userns, vfsuid_into_kuid(vfsuid));
95 buf->bs_gid = from_kgid(sb_userns, vfsgid_into_kgid(vfsgid));
96 buf->bs_size = ip->i_disk_size;
97
98 buf->bs_nlink = inode->i_nlink;
99 buf->bs_atime = inode->i_atime.tv_sec;
100 buf->bs_atime_nsec = inode->i_atime.tv_nsec;
101 buf->bs_mtime = inode->i_mtime.tv_sec;
102 buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
103 buf->bs_ctime = inode->i_ctime.tv_sec;
104 buf->bs_ctime_nsec = inode->i_ctime.tv_nsec;
105 buf->bs_gen = inode->i_generation;
106 buf->bs_mode = inode->i_mode;
107
108 buf->bs_xflags = xfs_ip2xflags(ip);
109 buf->bs_extsize_blks = ip->i_extsize;
110
111 nextents = xfs_ifork_nextents(&ip->i_df);
112 if (!(bc->breq->flags & XFS_IBULK_NREXT64))
113 buf->bs_extents = min(nextents, XFS_MAX_EXTCNT_DATA_FORK_SMALL);
114 else
115 buf->bs_extents64 = nextents;
116
117 xfs_bulkstat_health(ip, buf);
118 buf->bs_aextents = xfs_ifork_nextents(&ip->i_af);
119 buf->bs_forkoff = xfs_inode_fork_boff(ip);
120 buf->bs_version = XFS_BULKSTAT_VERSION_V5;
121
122 if (xfs_has_v3inodes(mp)) {
123 buf->bs_btime = ip->i_crtime.tv_sec;
124 buf->bs_btime_nsec = ip->i_crtime.tv_nsec;
125 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
126 buf->bs_cowextsize_blks = ip->i_cowextsize;
127 }
128
129 switch (ip->i_df.if_format) {
130 case XFS_DINODE_FMT_DEV:
131 buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
132 buf->bs_blksize = BLKDEV_IOSIZE;
133 buf->bs_blocks = 0;
134 break;
135 case XFS_DINODE_FMT_LOCAL:
136 buf->bs_rdev = 0;
137 buf->bs_blksize = mp->m_sb.sb_blocksize;
138 buf->bs_blocks = 0;
139 break;
140 case XFS_DINODE_FMT_EXTENTS:
141 case XFS_DINODE_FMT_BTREE:
142 buf->bs_rdev = 0;
143 buf->bs_blksize = mp->m_sb.sb_blocksize;
144 buf->bs_blocks = ip->i_nblocks + ip->i_delayed_blks;
145 break;
146 }
147 xfs_iunlock(ip, XFS_ILOCK_SHARED);
148 xfs_irele(ip);
149
150 error = bc->formatter(bc->breq, buf);
151 if (error == -ECANCELED)
152 goto out_advance;
153 if (error)
154 goto out;
155
156out_advance:
157 /*
158 * Advance the cursor to the inode that comes after the one we just
159 * looked at. We want the caller to move along if the bulkstat
160 * information was copied successfully; if we tried to grab the inode
161 * but it's no longer allocated; or if it's internal metadata.
162 */
163 bc->breq->startino = ino + 1;
164out:
165 return error;
166}
167
168/* Bulkstat a single inode. */
169int
170xfs_bulkstat_one(
171 struct xfs_ibulk *breq,
172 bulkstat_one_fmt_pf formatter)
173{
174 struct xfs_bstat_chunk bc = {
175 .formatter = formatter,
176 .breq = breq,
177 };
178 struct xfs_trans *tp;
179 int error;
180
181 if (breq->mnt_userns != &init_user_ns) {
182 xfs_warn_ratelimited(breq->mp,
183 "bulkstat not supported inside of idmapped mounts.");
184 return -EINVAL;
185 }
186
187 ASSERT(breq->icount == 1);
188
189 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
190 KM_MAYFAIL);
191 if (!bc.buf)
192 return -ENOMEM;
193
194 /*
195 * Grab an empty transaction so that we can use its recursive buffer
196 * locking abilities to detect cycles in the inobt without deadlocking.
197 */
198 error = xfs_trans_alloc_empty(breq->mp, &tp);
199 if (error)
200 goto out;
201
202 error = xfs_bulkstat_one_int(breq->mp, breq->mnt_userns, tp,
203 breq->startino, &bc);
204 xfs_trans_cancel(tp);
205out:
206 kmem_free(bc.buf);
207
208 /*
209 * If we reported one inode to userspace then we abort because we hit
210 * the end of the buffer. Don't leak that back to userspace.
211 */
212 if (error == -ECANCELED)
213 error = 0;
214
215 return error;
216}
217
218static int
219xfs_bulkstat_iwalk(
220 struct xfs_mount *mp,
221 struct xfs_trans *tp,
222 xfs_ino_t ino,
223 void *data)
224{
225 struct xfs_bstat_chunk *bc = data;
226 int error;
227
228 error = xfs_bulkstat_one_int(mp, bc->breq->mnt_userns, tp, ino, data);
229 /* bulkstat just skips over missing inodes */
230 if (error == -ENOENT || error == -EINVAL)
231 return 0;
232 return error;
233}
234
235/*
236 * Check the incoming lastino parameter.
237 *
238 * We allow any inode value that could map to physical space inside the
239 * filesystem because if there are no inodes there, bulkstat moves on to the
240 * next chunk. In other words, the magic agino value of zero takes us to the
241 * first chunk in the AG, and an agino value past the end of the AG takes us to
242 * the first chunk in the next AG.
243 *
244 * Therefore we can end early if the requested inode is beyond the end of the
245 * filesystem or doesn't map properly.
246 */
247static inline bool
248xfs_bulkstat_already_done(
249 struct xfs_mount *mp,
250 xfs_ino_t startino)
251{
252 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
253 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, startino);
254
255 return agno >= mp->m_sb.sb_agcount ||
256 startino != XFS_AGINO_TO_INO(mp, agno, agino);
257}
258
259/* Return stat information in bulk (by-inode) for the filesystem. */
260int
261xfs_bulkstat(
262 struct xfs_ibulk *breq,
263 bulkstat_one_fmt_pf formatter)
264{
265 struct xfs_bstat_chunk bc = {
266 .formatter = formatter,
267 .breq = breq,
268 };
269 struct xfs_trans *tp;
270 unsigned int iwalk_flags = 0;
271 int error;
272
273 if (breq->mnt_userns != &init_user_ns) {
274 xfs_warn_ratelimited(breq->mp,
275 "bulkstat not supported inside of idmapped mounts.");
276 return -EINVAL;
277 }
278 if (xfs_bulkstat_already_done(breq->mp, breq->startino))
279 return 0;
280
281 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
282 KM_MAYFAIL);
283 if (!bc.buf)
284 return -ENOMEM;
285
286 /*
287 * Grab an empty transaction so that we can use its recursive buffer
288 * locking abilities to detect cycles in the inobt without deadlocking.
289 */
290 error = xfs_trans_alloc_empty(breq->mp, &tp);
291 if (error)
292 goto out;
293
294 if (breq->flags & XFS_IBULK_SAME_AG)
295 iwalk_flags |= XFS_IWALK_SAME_AG;
296
297 error = xfs_iwalk(breq->mp, tp, breq->startino, iwalk_flags,
298 xfs_bulkstat_iwalk, breq->icount, &bc);
299 xfs_trans_cancel(tp);
300out:
301 kmem_free(bc.buf);
302
303 /*
304 * We found some inodes, so clear the error status and return them.
305 * The lastino pointer will point directly at the inode that triggered
306 * any error that occurred, so on the next call the error will be
307 * triggered again and propagated to userspace as there will be no
308 * formatted inodes in the buffer.
309 */
310 if (breq->ocount > 0)
311 error = 0;
312
313 return error;
314}
315
316/* Convert bulkstat (v5) to bstat (v1). */
317void
318xfs_bulkstat_to_bstat(
319 struct xfs_mount *mp,
320 struct xfs_bstat *bs1,
321 const struct xfs_bulkstat *bstat)
322{
323 /* memset is needed here because of padding holes in the structure. */
324 memset(bs1, 0, sizeof(struct xfs_bstat));
325 bs1->bs_ino = bstat->bs_ino;
326 bs1->bs_mode = bstat->bs_mode;
327 bs1->bs_nlink = bstat->bs_nlink;
328 bs1->bs_uid = bstat->bs_uid;
329 bs1->bs_gid = bstat->bs_gid;
330 bs1->bs_rdev = bstat->bs_rdev;
331 bs1->bs_blksize = bstat->bs_blksize;
332 bs1->bs_size = bstat->bs_size;
333 bs1->bs_atime.tv_sec = bstat->bs_atime;
334 bs1->bs_mtime.tv_sec = bstat->bs_mtime;
335 bs1->bs_ctime.tv_sec = bstat->bs_ctime;
336 bs1->bs_atime.tv_nsec = bstat->bs_atime_nsec;
337 bs1->bs_mtime.tv_nsec = bstat->bs_mtime_nsec;
338 bs1->bs_ctime.tv_nsec = bstat->bs_ctime_nsec;
339 bs1->bs_blocks = bstat->bs_blocks;
340 bs1->bs_xflags = bstat->bs_xflags;
341 bs1->bs_extsize = XFS_FSB_TO_B(mp, bstat->bs_extsize_blks);
342 bs1->bs_extents = bstat->bs_extents;
343 bs1->bs_gen = bstat->bs_gen;
344 bs1->bs_projid_lo = bstat->bs_projectid & 0xFFFF;
345 bs1->bs_forkoff = bstat->bs_forkoff;
346 bs1->bs_projid_hi = bstat->bs_projectid >> 16;
347 bs1->bs_sick = bstat->bs_sick;
348 bs1->bs_checked = bstat->bs_checked;
349 bs1->bs_cowextsize = XFS_FSB_TO_B(mp, bstat->bs_cowextsize_blks);
350 bs1->bs_dmevmask = 0;
351 bs1->bs_dmstate = 0;
352 bs1->bs_aextents = bstat->bs_aextents;
353}
354
355struct xfs_inumbers_chunk {
356 inumbers_fmt_pf formatter;
357 struct xfs_ibulk *breq;
358};
359
360/*
361 * INUMBERS
362 * ========
363 * This is how we export inode btree records to userspace, so that XFS tools
364 * can figure out where inodes are allocated.
365 */
366
367/*
368 * Format the inode group structure and report it somewhere.
369 *
370 * Similar to xfs_bulkstat_one_int, lastino is the inode cursor as we walk
371 * through the filesystem so we move it forward unless there was a runtime
372 * error. If the formatter tells us the buffer is now full we also move the
373 * cursor forward and abort the walk.
374 */
375STATIC int
376xfs_inumbers_walk(
377 struct xfs_mount *mp,
378 struct xfs_trans *tp,
379 xfs_agnumber_t agno,
380 const struct xfs_inobt_rec_incore *irec,
381 void *data)
382{
383 struct xfs_inumbers inogrp = {
384 .xi_startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino),
385 .xi_alloccount = irec->ir_count - irec->ir_freecount,
386 .xi_allocmask = ~irec->ir_free,
387 .xi_version = XFS_INUMBERS_VERSION_V5,
388 };
389 struct xfs_inumbers_chunk *ic = data;
390 int error;
391
392 error = ic->formatter(ic->breq, &inogrp);
393 if (error && error != -ECANCELED)
394 return error;
395
396 ic->breq->startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino) +
397 XFS_INODES_PER_CHUNK;
398 return error;
399}
400
401/*
402 * Return inode number table for the filesystem.
403 */
404int
405xfs_inumbers(
406 struct xfs_ibulk *breq,
407 inumbers_fmt_pf formatter)
408{
409 struct xfs_inumbers_chunk ic = {
410 .formatter = formatter,
411 .breq = breq,
412 };
413 struct xfs_trans *tp;
414 int error = 0;
415
416 if (xfs_bulkstat_already_done(breq->mp, breq->startino))
417 return 0;
418
419 /*
420 * Grab an empty transaction so that we can use its recursive buffer
421 * locking abilities to detect cycles in the inobt without deadlocking.
422 */
423 error = xfs_trans_alloc_empty(breq->mp, &tp);
424 if (error)
425 goto out;
426
427 error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags,
428 xfs_inumbers_walk, breq->icount, &ic);
429 xfs_trans_cancel(tp);
430out:
431
432 /*
433 * We found some inode groups, so clear the error status and return
434 * them. The lastino pointer will point directly at the inode that
435 * triggered any error that occurred, so on the next call the error
436 * will be triggered again and propagated to userspace as there will be
437 * no formatted inode groups in the buffer.
438 */
439 if (breq->ocount > 0)
440 error = 0;
441
442 return error;
443}
444
445/* Convert an inumbers (v5) struct to a inogrp (v1) struct. */
446void
447xfs_inumbers_to_inogrp(
448 struct xfs_inogrp *ig1,
449 const struct xfs_inumbers *ig)
450{
451 /* memset is needed here because of padding holes in the structure. */
452 memset(ig1, 0, sizeof(struct xfs_inogrp));
453 ig1->xi_startino = ig->xi_startino;
454 ig1->xi_alloccount = ig->xi_alloccount;
455 ig1->xi_allocmask = ig->xi_allocmask;
456}
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_mount.h"
25#include "xfs_inode.h"
26#include "xfs_btree.h"
27#include "xfs_ialloc.h"
28#include "xfs_ialloc_btree.h"
29#include "xfs_itable.h"
30#include "xfs_error.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33
34STATIC int
35xfs_internal_inum(
36 xfs_mount_t *mp,
37 xfs_ino_t ino)
38{
39 return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
40 (xfs_sb_version_hasquota(&mp->m_sb) &&
41 xfs_is_quota_inode(&mp->m_sb, ino)));
42}
43
44/*
45 * Return stat information for one inode.
46 * Return 0 if ok, else errno.
47 */
48int
49xfs_bulkstat_one_int(
50 struct xfs_mount *mp, /* mount point for filesystem */
51 xfs_ino_t ino, /* inode to get data for */
52 void __user *buffer, /* buffer to place output in */
53 int ubsize, /* size of buffer */
54 bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
55 int *ubused, /* bytes used by me */
56 int *stat) /* BULKSTAT_RV_... */
57{
58 struct xfs_icdinode *dic; /* dinode core info pointer */
59 struct xfs_inode *ip; /* incore inode pointer */
60 struct inode *inode;
61 struct xfs_bstat *buf; /* return buffer */
62 int error = 0; /* error value */
63
64 *stat = BULKSTAT_RV_NOTHING;
65
66 if (!buffer || xfs_internal_inum(mp, ino))
67 return -EINVAL;
68
69 buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
70 if (!buf)
71 return -ENOMEM;
72
73 error = xfs_iget(mp, NULL, ino,
74 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
75 XFS_ILOCK_SHARED, &ip);
76 if (error)
77 goto out_free;
78
79 ASSERT(ip != NULL);
80 ASSERT(ip->i_imap.im_blkno != 0);
81 inode = VFS_I(ip);
82
83 dic = &ip->i_d;
84
85 /* xfs_iget returns the following without needing
86 * further change.
87 */
88 buf->bs_projid_lo = dic->di_projid_lo;
89 buf->bs_projid_hi = dic->di_projid_hi;
90 buf->bs_ino = ino;
91 buf->bs_uid = dic->di_uid;
92 buf->bs_gid = dic->di_gid;
93 buf->bs_size = dic->di_size;
94
95 buf->bs_nlink = inode->i_nlink;
96 buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
97 buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
98 buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
99 buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
100 buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
101 buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
102 buf->bs_gen = inode->i_generation;
103 buf->bs_mode = inode->i_mode;
104
105 buf->bs_xflags = xfs_ip2xflags(ip);
106 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
107 buf->bs_extents = dic->di_nextents;
108 memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
109 buf->bs_dmevmask = dic->di_dmevmask;
110 buf->bs_dmstate = dic->di_dmstate;
111 buf->bs_aextents = dic->di_anextents;
112 buf->bs_forkoff = XFS_IFORK_BOFF(ip);
113
114 switch (dic->di_format) {
115 case XFS_DINODE_FMT_DEV:
116 buf->bs_rdev = ip->i_df.if_u2.if_rdev;
117 buf->bs_blksize = BLKDEV_IOSIZE;
118 buf->bs_blocks = 0;
119 break;
120 case XFS_DINODE_FMT_LOCAL:
121 case XFS_DINODE_FMT_UUID:
122 buf->bs_rdev = 0;
123 buf->bs_blksize = mp->m_sb.sb_blocksize;
124 buf->bs_blocks = 0;
125 break;
126 case XFS_DINODE_FMT_EXTENTS:
127 case XFS_DINODE_FMT_BTREE:
128 buf->bs_rdev = 0;
129 buf->bs_blksize = mp->m_sb.sb_blocksize;
130 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
131 break;
132 }
133 xfs_iunlock(ip, XFS_ILOCK_SHARED);
134 IRELE(ip);
135
136 error = formatter(buffer, ubsize, ubused, buf);
137 if (!error)
138 *stat = BULKSTAT_RV_DIDONE;
139
140 out_free:
141 kmem_free(buf);
142 return error;
143}
144
145/* Return 0 on success or positive error */
146STATIC int
147xfs_bulkstat_one_fmt(
148 void __user *ubuffer,
149 int ubsize,
150 int *ubused,
151 const xfs_bstat_t *buffer)
152{
153 if (ubsize < sizeof(*buffer))
154 return -ENOMEM;
155 if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
156 return -EFAULT;
157 if (ubused)
158 *ubused = sizeof(*buffer);
159 return 0;
160}
161
162int
163xfs_bulkstat_one(
164 xfs_mount_t *mp, /* mount point for filesystem */
165 xfs_ino_t ino, /* inode number to get data for */
166 void __user *buffer, /* buffer to place output in */
167 int ubsize, /* size of buffer */
168 int *ubused, /* bytes used by me */
169 int *stat) /* BULKSTAT_RV_... */
170{
171 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
172 xfs_bulkstat_one_fmt, ubused, stat);
173}
174
175/*
176 * Loop over all clusters in a chunk for a given incore inode allocation btree
177 * record. Do a readahead if there are any allocated inodes in that cluster.
178 */
179STATIC void
180xfs_bulkstat_ichunk_ra(
181 struct xfs_mount *mp,
182 xfs_agnumber_t agno,
183 struct xfs_inobt_rec_incore *irec)
184{
185 xfs_agblock_t agbno;
186 struct blk_plug plug;
187 int blks_per_cluster;
188 int inodes_per_cluster;
189 int i; /* inode chunk index */
190
191 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
192 blks_per_cluster = xfs_icluster_size_fsb(mp);
193 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
194
195 blk_start_plug(&plug);
196 for (i = 0; i < XFS_INODES_PER_CHUNK;
197 i += inodes_per_cluster, agbno += blks_per_cluster) {
198 if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
199 xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
200 &xfs_inode_buf_ops);
201 }
202 }
203 blk_finish_plug(&plug);
204}
205
206/*
207 * Lookup the inode chunk that the given inode lives in and then get the record
208 * if we found the chunk. If the inode was not the last in the chunk and there
209 * are some left allocated, update the data for the pointed-to record as well as
210 * return the count of grabbed inodes.
211 */
212STATIC int
213xfs_bulkstat_grab_ichunk(
214 struct xfs_btree_cur *cur, /* btree cursor */
215 xfs_agino_t agino, /* starting inode of chunk */
216 int *icount,/* return # of inodes grabbed */
217 struct xfs_inobt_rec_incore *irec) /* btree record */
218{
219 int idx; /* index into inode chunk */
220 int stat;
221 int error = 0;
222
223 /* Lookup the inode chunk that this inode lives in */
224 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
225 if (error)
226 return error;
227 if (!stat) {
228 *icount = 0;
229 return error;
230 }
231
232 /* Get the record, should always work */
233 error = xfs_inobt_get_rec(cur, irec, &stat);
234 if (error)
235 return error;
236 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1);
237
238 /* Check if the record contains the inode in request */
239 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
240 *icount = 0;
241 return 0;
242 }
243
244 idx = agino - irec->ir_startino + 1;
245 if (idx < XFS_INODES_PER_CHUNK &&
246 (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
247 int i;
248
249 /* We got a right chunk with some left inodes allocated at it.
250 * Grab the chunk record. Mark all the uninteresting inodes
251 * free -- because they're before our start point.
252 */
253 for (i = 0; i < idx; i++) {
254 if (XFS_INOBT_MASK(i) & ~irec->ir_free)
255 irec->ir_freecount++;
256 }
257
258 irec->ir_free |= xfs_inobt_maskn(0, idx);
259 *icount = irec->ir_count - irec->ir_freecount;
260 }
261
262 return 0;
263}
264
265#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
266
267struct xfs_bulkstat_agichunk {
268 char __user **ac_ubuffer;/* pointer into user's buffer */
269 int ac_ubleft; /* bytes left in user's buffer */
270 int ac_ubelem; /* spaces used in user's buffer */
271};
272
273/*
274 * Process inodes in chunk with a pointer to a formatter function
275 * that will iget the inode and fill in the appropriate structure.
276 */
277static int
278xfs_bulkstat_ag_ichunk(
279 struct xfs_mount *mp,
280 xfs_agnumber_t agno,
281 struct xfs_inobt_rec_incore *irbp,
282 bulkstat_one_pf formatter,
283 size_t statstruct_size,
284 struct xfs_bulkstat_agichunk *acp,
285 xfs_agino_t *last_agino)
286{
287 char __user **ubufp = acp->ac_ubuffer;
288 int chunkidx;
289 int error = 0;
290 xfs_agino_t agino = irbp->ir_startino;
291
292 for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
293 chunkidx++, agino++) {
294 int fmterror;
295 int ubused;
296
297 /* inode won't fit in buffer, we are done */
298 if (acp->ac_ubleft < statstruct_size)
299 break;
300
301 /* Skip if this inode is free */
302 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
303 continue;
304
305 /* Get the inode and fill in a single buffer */
306 ubused = statstruct_size;
307 error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
308 *ubufp, acp->ac_ubleft, &ubused, &fmterror);
309
310 if (fmterror == BULKSTAT_RV_GIVEUP ||
311 (error && error != -ENOENT && error != -EINVAL)) {
312 acp->ac_ubleft = 0;
313 ASSERT(error);
314 break;
315 }
316
317 /* be careful not to leak error if at end of chunk */
318 if (fmterror == BULKSTAT_RV_NOTHING || error) {
319 error = 0;
320 continue;
321 }
322
323 *ubufp += ubused;
324 acp->ac_ubleft -= ubused;
325 acp->ac_ubelem++;
326 }
327
328 /*
329 * Post-update *last_agino. At this point, agino will always point one
330 * inode past the last inode we processed successfully. Hence we
331 * substract that inode when setting the *last_agino cursor so that we
332 * return the correct cookie to userspace. On the next bulkstat call,
333 * the inode under the lastino cookie will be skipped as we have already
334 * processed it here.
335 */
336 *last_agino = agino - 1;
337
338 return error;
339}
340
341/*
342 * Return stat information in bulk (by-inode) for the filesystem.
343 */
344int /* error status */
345xfs_bulkstat(
346 xfs_mount_t *mp, /* mount point for filesystem */
347 xfs_ino_t *lastinop, /* last inode returned */
348 int *ubcountp, /* size of buffer/count returned */
349 bulkstat_one_pf formatter, /* func that'd fill a single buf */
350 size_t statstruct_size, /* sizeof struct filling */
351 char __user *ubuffer, /* buffer with inode stats */
352 int *done) /* 1 if there are more stats to get */
353{
354 xfs_buf_t *agbp; /* agi header buffer */
355 xfs_agino_t agino; /* inode # in allocation group */
356 xfs_agnumber_t agno; /* allocation group number */
357 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
358 size_t irbsize; /* size of irec buffer in bytes */
359 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
360 int nirbuf; /* size of irbuf */
361 int ubcount; /* size of user's buffer */
362 struct xfs_bulkstat_agichunk ac;
363 int error = 0;
364
365 /*
366 * Get the last inode value, see if there's nothing to do.
367 */
368 agno = XFS_INO_TO_AGNO(mp, *lastinop);
369 agino = XFS_INO_TO_AGINO(mp, *lastinop);
370 if (agno >= mp->m_sb.sb_agcount ||
371 *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
372 *done = 1;
373 *ubcountp = 0;
374 return 0;
375 }
376
377 ubcount = *ubcountp; /* statstruct's */
378 ac.ac_ubuffer = &ubuffer;
379 ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
380 ac.ac_ubelem = 0;
381
382 *ubcountp = 0;
383 *done = 0;
384
385 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
386 if (!irbuf)
387 return -ENOMEM;
388
389 nirbuf = irbsize / sizeof(*irbuf);
390
391 /*
392 * Loop over the allocation groups, starting from the last
393 * inode returned; 0 means start of the allocation group.
394 */
395 while (agno < mp->m_sb.sb_agcount) {
396 struct xfs_inobt_rec_incore *irbp = irbuf;
397 struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf;
398 bool end_of_ag = false;
399 int icount = 0;
400 int stat;
401
402 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
403 if (error)
404 break;
405 /*
406 * Allocate and initialize a btree cursor for ialloc btree.
407 */
408 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
409 XFS_BTNUM_INO);
410 if (agino > 0) {
411 /*
412 * In the middle of an allocation group, we need to get
413 * the remainder of the chunk we're in.
414 */
415 struct xfs_inobt_rec_incore r;
416
417 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
418 if (error)
419 goto del_cursor;
420 if (icount) {
421 irbp->ir_startino = r.ir_startino;
422 irbp->ir_holemask = r.ir_holemask;
423 irbp->ir_count = r.ir_count;
424 irbp->ir_freecount = r.ir_freecount;
425 irbp->ir_free = r.ir_free;
426 irbp++;
427 }
428 /* Increment to the next record */
429 error = xfs_btree_increment(cur, 0, &stat);
430 } else {
431 /* Start of ag. Lookup the first inode chunk */
432 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
433 }
434 if (error || stat == 0) {
435 end_of_ag = true;
436 goto del_cursor;
437 }
438
439 /*
440 * Loop through inode btree records in this ag,
441 * until we run out of inodes or space in the buffer.
442 */
443 while (irbp < irbufend && icount < ubcount) {
444 struct xfs_inobt_rec_incore r;
445
446 error = xfs_inobt_get_rec(cur, &r, &stat);
447 if (error || stat == 0) {
448 end_of_ag = true;
449 goto del_cursor;
450 }
451
452 /*
453 * If this chunk has any allocated inodes, save it.
454 * Also start read-ahead now for this chunk.
455 */
456 if (r.ir_freecount < r.ir_count) {
457 xfs_bulkstat_ichunk_ra(mp, agno, &r);
458 irbp->ir_startino = r.ir_startino;
459 irbp->ir_holemask = r.ir_holemask;
460 irbp->ir_count = r.ir_count;
461 irbp->ir_freecount = r.ir_freecount;
462 irbp->ir_free = r.ir_free;
463 irbp++;
464 icount += r.ir_count - r.ir_freecount;
465 }
466 error = xfs_btree_increment(cur, 0, &stat);
467 if (error || stat == 0) {
468 end_of_ag = true;
469 goto del_cursor;
470 }
471 cond_resched();
472 }
473
474 /*
475 * Drop the btree buffers and the agi buffer as we can't hold any
476 * of the locks these represent when calling iget. If there is a
477 * pending error, then we are done.
478 */
479del_cursor:
480 xfs_btree_del_cursor(cur, error ?
481 XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
482 xfs_buf_relse(agbp);
483 if (error)
484 break;
485 /*
486 * Now format all the good inodes into the user's buffer. The
487 * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
488 * for the next loop iteration.
489 */
490 irbufend = irbp;
491 for (irbp = irbuf;
492 irbp < irbufend && ac.ac_ubleft >= statstruct_size;
493 irbp++) {
494 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
495 formatter, statstruct_size, &ac,
496 &agino);
497 if (error)
498 break;
499
500 cond_resched();
501 }
502
503 /*
504 * If we've run out of space or had a formatting error, we
505 * are now done
506 */
507 if (ac.ac_ubleft < statstruct_size || error)
508 break;
509
510 if (end_of_ag) {
511 agno++;
512 agino = 0;
513 }
514 }
515 /*
516 * Done, we're either out of filesystem or space to put the data.
517 */
518 kmem_free(irbuf);
519 *ubcountp = ac.ac_ubelem;
520
521 /*
522 * We found some inodes, so clear the error status and return them.
523 * The lastino pointer will point directly at the inode that triggered
524 * any error that occurred, so on the next call the error will be
525 * triggered again and propagated to userspace as there will be no
526 * formatted inodes in the buffer.
527 */
528 if (ac.ac_ubelem)
529 error = 0;
530
531 /*
532 * If we ran out of filesystem, lastino will point off the end of
533 * the filesystem so the next call will return immediately.
534 */
535 *lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
536 if (agno >= mp->m_sb.sb_agcount)
537 *done = 1;
538
539 return error;
540}
541
542int
543xfs_inumbers_fmt(
544 void __user *ubuffer, /* buffer to write to */
545 const struct xfs_inogrp *buffer, /* buffer to read from */
546 long count, /* # of elements to read */
547 long *written) /* # of bytes written */
548{
549 if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
550 return -EFAULT;
551 *written = count * sizeof(*buffer);
552 return 0;
553}
554
555/*
556 * Return inode number table for the filesystem.
557 */
558int /* error status */
559xfs_inumbers(
560 struct xfs_mount *mp,/* mount point for filesystem */
561 xfs_ino_t *lastino,/* last inode returned */
562 int *count,/* size of buffer/count returned */
563 void __user *ubuffer,/* buffer with inode descriptions */
564 inumbers_fmt_pf formatter)
565{
566 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino);
567 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino);
568 struct xfs_btree_cur *cur = NULL;
569 struct xfs_buf *agbp = NULL;
570 struct xfs_inogrp *buffer;
571 int bcount;
572 int left = *count;
573 int bufidx = 0;
574 int error = 0;
575
576 *count = 0;
577 if (agno >= mp->m_sb.sb_agcount ||
578 *lastino != XFS_AGINO_TO_INO(mp, agno, agino))
579 return error;
580
581 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
582 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
583 do {
584 struct xfs_inobt_rec_incore r;
585 int stat;
586
587 if (!agbp) {
588 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
589 if (error)
590 break;
591
592 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
593 XFS_BTNUM_INO);
594 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
595 &stat);
596 if (error)
597 break;
598 if (!stat)
599 goto next_ag;
600 }
601
602 error = xfs_inobt_get_rec(cur, &r, &stat);
603 if (error)
604 break;
605 if (!stat)
606 goto next_ag;
607
608 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
609 buffer[bufidx].xi_startino =
610 XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
611 buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount;
612 buffer[bufidx].xi_allocmask = ~r.ir_free;
613 if (++bufidx == bcount) {
614 long written;
615
616 error = formatter(ubuffer, buffer, bufidx, &written);
617 if (error)
618 break;
619 ubuffer += written;
620 *count += bufidx;
621 bufidx = 0;
622 }
623 if (!--left)
624 break;
625
626 error = xfs_btree_increment(cur, 0, &stat);
627 if (error)
628 break;
629 if (stat)
630 continue;
631
632next_ag:
633 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
634 cur = NULL;
635 xfs_buf_relse(agbp);
636 agbp = NULL;
637 agino = 0;
638 agno++;
639 } while (agno < mp->m_sb.sb_agcount);
640
641 if (!error) {
642 if (bufidx) {
643 long written;
644
645 error = formatter(ubuffer, buffer, bufidx, &written);
646 if (!error)
647 *count += bufidx;
648 }
649 *lastino = XFS_AGINO_TO_INO(mp, agno, agino);
650 }
651
652 kmem_free(buffer);
653 if (cur)
654 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
655 XFS_BTREE_NOERROR));
656 if (agbp)
657 xfs_buf_relse(agbp);
658
659 return error;
660}