Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6#include "xfs.h"
  7#include "xfs_fs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 
 
 12#include "xfs_mount.h"
 
 
 
 
 13#include "xfs_inode.h"
 14#include "xfs_btree.h"
 15#include "xfs_ialloc.h"
 16#include "xfs_ialloc_btree.h"
 17#include "xfs_iwalk.h"
 18#include "xfs_itable.h"
 19#include "xfs_error.h"
 20#include "xfs_icache.h"
 21#include "xfs_health.h"
 22#include "xfs_trans.h"
 23
 24/*
 25 * Bulk Stat
 26 * =========
 27 *
 28 * Use the inode walking functions to fill out struct xfs_bulkstat for every
 29 * allocated inode, then pass the stat information to some externally provided
 30 * iteration function.
 31 */
 32
 33struct xfs_bstat_chunk {
 34	bulkstat_one_fmt_pf	formatter;
 35	struct xfs_ibulk	*breq;
 36	struct xfs_bulkstat	*buf;
 37};
 38
 39static inline bool
 40want_metadir_file(
 41	struct xfs_inode	*ip,
 42	struct xfs_ibulk	*breq)
 43{
 44	return xfs_is_metadir_inode(ip) && (breq->flags & XFS_IBULK_METADIR);
 
 
 45}
 46
 47/*
 48 * Fill out the bulkstat info for a single inode and report it somewhere.
 49 *
 50 * bc->breq->lastino is effectively the inode cursor as we walk through the
 51 * filesystem.  Therefore, we update it any time we need to move the cursor
 52 * forward, regardless of whether or not we're sending any bstat information
 53 * back to userspace.  If the inode is internal metadata or, has been freed
 54 * out from under us, we just simply keep going.
 55 *
 56 * However, if any other type of error happens we want to stop right where we
 57 * are so that userspace will call back with exact number of the bad inode and
 58 * we can send back an error code.
 59 *
 60 * Note that if the formatter tells us there's no space left in the buffer we
 61 * move the cursor forward and abort the walk.
 62 */
 63STATIC int
 64xfs_bulkstat_one_int(
 65	struct xfs_mount	*mp,
 66	struct mnt_idmap	*idmap,
 67	struct xfs_trans	*tp,
 68	xfs_ino_t		ino,
 69	struct xfs_bstat_chunk	*bc)
 
 
 70{
 71	struct user_namespace	*sb_userns = mp->m_super->s_user_ns;
 72	struct xfs_inode	*ip;		/* incore inode pointer */
 73	struct inode		*inode;
 74	struct xfs_bulkstat	*buf = bc->buf;
 75	xfs_extnum_t		nextents;
 76	int			error = -EINVAL;
 77	vfsuid_t		vfsuid;
 78	vfsgid_t		vfsgid;
 
 
 
 
 
 79
 80	error = xfs_iget(mp, tp, ino,
 81			 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
 82			 XFS_ILOCK_SHARED, &ip);
 83	if (error == -ENOENT || error == -EINVAL)
 84		goto out_advance;
 85	if (error)
 86		goto out;
 87
 88	/* Reload the incore unlinked list to avoid failure in inodegc. */
 89	if (xfs_inode_unlinked_incomplete(ip)) {
 90		error = xfs_inode_reload_unlinked_bucket(tp, ip);
 91		if (error) {
 92			xfs_iunlock(ip, XFS_ILOCK_SHARED);
 93			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 94			xfs_irele(ip);
 95			return error;
 96		}
 97	}
 98
 99	ASSERT(ip != NULL);
100	ASSERT(ip->i_imap.im_blkno != 0);
101	inode = VFS_I(ip);
102	vfsuid = i_uid_into_vfsuid(idmap, inode);
103	vfsgid = i_gid_into_vfsgid(idmap, inode);
104
105	/*
106	 * If caller wants files from the metadata directories, push out the
107	 * bare minimum information for enabling scrub.
108	 */
109	if (want_metadir_file(ip, bc->breq)) {
110		memset(buf, 0, sizeof(*buf));
111		buf->bs_ino = ino;
112		buf->bs_gen = inode->i_generation;
113		buf->bs_mode = inode->i_mode & S_IFMT;
114		xfs_bulkstat_health(ip, buf);
115		buf->bs_version = XFS_BULKSTAT_VERSION_V5;
116		xfs_iunlock(ip, XFS_ILOCK_SHARED);
117		xfs_irele(ip);
118
119		error = bc->formatter(bc->breq, buf);
120		if (!error || error == -ECANCELED)
121			goto out_advance;
122		goto out;
123	}
124
125	/* If this is a private inode, don't leak its details to userspace. */
126	if (IS_PRIVATE(inode) || xfs_is_sb_inum(mp, ino)) {
127		xfs_iunlock(ip, XFS_ILOCK_SHARED);
128		xfs_irele(ip);
129		error = -EINVAL;
130		goto out_advance;
131	}
132
133	/* xfs_iget returns the following without needing
134	 * further change.
135	 */
136	buf->bs_projectid = ip->i_projid;
 
 
137	buf->bs_ino = ino;
138	buf->bs_uid = from_kuid(sb_userns, vfsuid_into_kuid(vfsuid));
139	buf->bs_gid = from_kgid(sb_userns, vfsgid_into_kgid(vfsgid));
140	buf->bs_size = ip->i_disk_size;
141
142	buf->bs_nlink = inode->i_nlink;
143	buf->bs_atime = inode_get_atime_sec(inode);
144	buf->bs_atime_nsec = inode_get_atime_nsec(inode);
145	buf->bs_mtime = inode_get_mtime_sec(inode);
146	buf->bs_mtime_nsec = inode_get_mtime_nsec(inode);
147	buf->bs_ctime = inode_get_ctime_sec(inode);
148	buf->bs_ctime_nsec = inode_get_ctime_nsec(inode);
149	buf->bs_gen = inode->i_generation;
150	buf->bs_mode = inode->i_mode;
151
152	buf->bs_xflags = xfs_ip2xflags(ip);
153	buf->bs_extsize_blks = ip->i_extsize;
154
155	nextents = xfs_ifork_nextents(&ip->i_df);
156	if (!(bc->breq->flags & XFS_IBULK_NREXT64))
157		buf->bs_extents = min(nextents, XFS_MAX_EXTCNT_DATA_FORK_SMALL);
158	else
159		buf->bs_extents64 = nextents;
160
161	xfs_bulkstat_health(ip, buf);
162	buf->bs_aextents = xfs_ifork_nextents(&ip->i_af);
163	buf->bs_forkoff = xfs_inode_fork_boff(ip);
164	buf->bs_version = XFS_BULKSTAT_VERSION_V5;
165
166	if (xfs_has_v3inodes(mp)) {
167		buf->bs_btime = ip->i_crtime.tv_sec;
168		buf->bs_btime_nsec = ip->i_crtime.tv_nsec;
169		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
170			buf->bs_cowextsize_blks = ip->i_cowextsize;
171	}
172
173	switch (ip->i_df.if_format) {
174	case XFS_DINODE_FMT_DEV:
175		buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
176		buf->bs_blksize = BLKDEV_IOSIZE;
177		buf->bs_blocks = 0;
178		break;
179	case XFS_DINODE_FMT_LOCAL:
 
180		buf->bs_rdev = 0;
181		buf->bs_blksize = mp->m_sb.sb_blocksize;
182		buf->bs_blocks = 0;
183		break;
184	case XFS_DINODE_FMT_EXTENTS:
185	case XFS_DINODE_FMT_BTREE:
186		buf->bs_rdev = 0;
187		buf->bs_blksize = mp->m_sb.sb_blocksize;
188		buf->bs_blocks = ip->i_nblocks + ip->i_delayed_blks;
189		break;
190	}
191	xfs_iunlock(ip, XFS_ILOCK_SHARED);
192	xfs_irele(ip);
193
194	error = bc->formatter(bc->breq, buf);
195	if (error == -ECANCELED)
196		goto out_advance;
197	if (error)
198		goto out;
199
200out_advance:
201	/*
202	 * Advance the cursor to the inode that comes after the one we just
203	 * looked at.  We want the caller to move along if the bulkstat
204	 * information was copied successfully; if we tried to grab the inode
205	 * but it's no longer allocated; or if it's internal metadata.
206	 */
207	bc->breq->startino = ino + 1;
208out:
209	return error;
210}
211
212/* Bulkstat a single inode. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213int
214xfs_bulkstat_one(
215	struct xfs_ibulk	*breq,
216	bulkstat_one_fmt_pf	formatter)
 
 
 
 
217{
218	struct xfs_bstat_chunk	bc = {
219		.formatter	= formatter,
220		.breq		= breq,
221	};
222	struct xfs_trans	*tp;
223	int			error;
224
225	if (breq->idmap != &nop_mnt_idmap) {
226		xfs_warn_ratelimited(breq->mp,
227			"bulkstat not supported inside of idmapped mounts.");
228		return -EINVAL;
229	}
230
231	ASSERT(breq->icount == 1);
232
233	bc.buf = kzalloc(sizeof(struct xfs_bulkstat),
234			GFP_KERNEL | __GFP_RETRY_MAYFAIL);
235	if (!bc.buf)
236		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
238	/*
239	 * Grab an empty transaction so that we can use its recursive buffer
240	 * locking abilities to detect cycles in the inobt without deadlocking.
241	 */
242	error = xfs_trans_alloc_empty(breq->mp, &tp);
243	if (error)
244		goto out;
245
246	error = xfs_bulkstat_one_int(breq->mp, breq->idmap, tp,
247			breq->startino, &bc);
248	xfs_trans_cancel(tp);
249out:
250	kfree(bc.buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
252	/*
253	 * If we reported one inode to userspace then we abort because we hit
254	 * the end of the buffer.  Don't leak that back to userspace.
255	 */
256	if (error == -ECANCELED)
257		error = 0;
258
259	return error;
260}
261
262static int
263xfs_bulkstat_iwalk(
264	struct xfs_mount	*mp,
265	struct xfs_trans	*tp,
266	xfs_ino_t		ino,
267	void			*data)
268{
269	struct xfs_bstat_chunk	*bc = data;
270	int			error;
271
272	error = xfs_bulkstat_one_int(mp, bc->breq->idmap, tp, ino, data);
273	/* bulkstat just skips over missing inodes */
274	if (error == -ENOENT || error == -EINVAL)
275		return 0;
276	return error;
277}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
279/*
280 * Check the incoming lastino parameter.
281 *
282 * We allow any inode value that could map to physical space inside the
283 * filesystem because if there are no inodes there, bulkstat moves on to the
284 * next chunk.  In other words, the magic agino value of zero takes us to the
285 * first chunk in the AG, and an agino value past the end of the AG takes us to
286 * the first chunk in the next AG.
287 *
288 * Therefore we can end early if the requested inode is beyond the end of the
289 * filesystem or doesn't map properly.
290 */
291static inline bool
292xfs_bulkstat_already_done(
293	struct xfs_mount	*mp,
294	xfs_ino_t		startino)
295{
296	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
297	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, startino);
298
299	return agno >= mp->m_sb.sb_agcount ||
300	       startino != XFS_AGINO_TO_INO(mp, agno, agino);
301}
302
303/* Return stat information in bulk (by-inode) for the filesystem. */
304int
305xfs_bulkstat(
306	struct xfs_ibulk	*breq,
307	bulkstat_one_fmt_pf	formatter)
308{
309	struct xfs_bstat_chunk	bc = {
310		.formatter	= formatter,
311		.breq		= breq,
312	};
313	struct xfs_trans	*tp;
314	unsigned int		iwalk_flags = 0;
315	int			error;
316
317	if (breq->idmap != &nop_mnt_idmap) {
318		xfs_warn_ratelimited(breq->mp,
319			"bulkstat not supported inside of idmapped mounts.");
320		return -EINVAL;
321	}
322	if (xfs_bulkstat_already_done(breq->mp, breq->startino))
323		return 0;
324
325	bc.buf = kzalloc(sizeof(struct xfs_bulkstat),
326			GFP_KERNEL | __GFP_RETRY_MAYFAIL);
327	if (!bc.buf)
328		return -ENOMEM;
329
330	/*
331	 * Grab an empty transaction so that we can use its recursive buffer
332	 * locking abilities to detect cycles in the inobt without deadlocking.
333	 */
334	error = xfs_trans_alloc_empty(breq->mp, &tp);
335	if (error)
336		goto out;
337
338	if (breq->flags & XFS_IBULK_SAME_AG)
339		iwalk_flags |= XFS_IWALK_SAME_AG;
340
341	error = xfs_iwalk(breq->mp, tp, breq->startino, iwalk_flags,
342			xfs_bulkstat_iwalk, breq->icount, &bc);
343	xfs_trans_cancel(tp);
344out:
345	kfree(bc.buf);
346
347	/*
348	 * We found some inodes, so clear the error status and return them.
349	 * The lastino pointer will point directly at the inode that triggered
350	 * any error that occurred, so on the next call the error will be
351	 * triggered again and propagated to userspace as there will be no
352	 * formatted inodes in the buffer.
353	 */
354	if (breq->ocount > 0)
355		error = 0;
 
 
 
 
 
 
 
 
 
 
356
357	return error;
358}
359
360/* Convert bulkstat (v5) to bstat (v1). */
361void
362xfs_bulkstat_to_bstat(
363	struct xfs_mount		*mp,
364	struct xfs_bstat		*bs1,
365	const struct xfs_bulkstat	*bstat)
 
 
 
 
366{
367	/* memset is needed here because of padding holes in the structure. */
368	memset(bs1, 0, sizeof(struct xfs_bstat));
369	bs1->bs_ino = bstat->bs_ino;
370	bs1->bs_mode = bstat->bs_mode;
371	bs1->bs_nlink = bstat->bs_nlink;
372	bs1->bs_uid = bstat->bs_uid;
373	bs1->bs_gid = bstat->bs_gid;
374	bs1->bs_rdev = bstat->bs_rdev;
375	bs1->bs_blksize = bstat->bs_blksize;
376	bs1->bs_size = bstat->bs_size;
377	bs1->bs_atime.tv_sec = bstat->bs_atime;
378	bs1->bs_mtime.tv_sec = bstat->bs_mtime;
379	bs1->bs_ctime.tv_sec = bstat->bs_ctime;
380	bs1->bs_atime.tv_nsec = bstat->bs_atime_nsec;
381	bs1->bs_mtime.tv_nsec = bstat->bs_mtime_nsec;
382	bs1->bs_ctime.tv_nsec = bstat->bs_ctime_nsec;
383	bs1->bs_blocks = bstat->bs_blocks;
384	bs1->bs_xflags = bstat->bs_xflags;
385	bs1->bs_extsize = XFS_FSB_TO_B(mp, bstat->bs_extsize_blks);
386	bs1->bs_extents = bstat->bs_extents;
387	bs1->bs_gen = bstat->bs_gen;
388	bs1->bs_projid_lo = bstat->bs_projectid & 0xFFFF;
389	bs1->bs_forkoff = bstat->bs_forkoff;
390	bs1->bs_projid_hi = bstat->bs_projectid >> 16;
391	bs1->bs_sick = bstat->bs_sick;
392	bs1->bs_checked = bstat->bs_checked;
393	bs1->bs_cowextsize = XFS_FSB_TO_B(mp, bstat->bs_cowextsize_blks);
394	bs1->bs_dmevmask = 0;
395	bs1->bs_dmstate = 0;
396	bs1->bs_aextents = bstat->bs_aextents;
397}
398
399struct xfs_inumbers_chunk {
400	inumbers_fmt_pf		formatter;
401	struct xfs_ibulk	*breq;
402};
 
 
 
 
403
404/*
405 * INUMBERS
406 * ========
407 * This is how we export inode btree records to userspace, so that XFS tools
408 * can figure out where inodes are allocated.
409 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410
411/*
412 * Format the inode group structure and report it somewhere.
413 *
414 * Similar to xfs_bulkstat_one_int, lastino is the inode cursor as we walk
415 * through the filesystem so we move it forward unless there was a runtime
416 * error.  If the formatter tells us the buffer is now full we also move the
417 * cursor forward and abort the walk.
418 */
419STATIC int
420xfs_inumbers_walk(
421	struct xfs_mount	*mp,
422	struct xfs_trans	*tp,
423	xfs_agnumber_t		agno,
424	const struct xfs_inobt_rec_incore *irec,
425	void			*data)
426{
427	struct xfs_inumbers	inogrp = {
428		.xi_startino	= XFS_AGINO_TO_INO(mp, agno, irec->ir_startino),
429		.xi_alloccount	= irec->ir_count - irec->ir_freecount,
430		.xi_allocmask	= ~irec->ir_free,
431		.xi_version	= XFS_INUMBERS_VERSION_V5,
432	};
433	struct xfs_inumbers_chunk *ic = data;
434	int			error;
435
436	error = ic->formatter(ic->breq, &inogrp);
437	if (error && error != -ECANCELED)
438		return error;
439
440	ic->breq->startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino) +
441			XFS_INODES_PER_CHUNK;
442	return error;
443}
444
445/*
446 * Return inode number table for the filesystem.
447 */
448int
449xfs_inumbers(
450	struct xfs_ibulk	*breq,
451	inumbers_fmt_pf		formatter)
 
 
 
452{
453	struct xfs_inumbers_chunk ic = {
454		.formatter	= formatter,
455		.breq		= breq,
456	};
457	struct xfs_trans	*tp;
458	int			error = 0;
459
460	if (xfs_bulkstat_already_done(breq->mp, breq->startino))
461		return 0;
462
463	/*
464	 * Grab an empty transaction so that we can use its recursive buffer
465	 * locking abilities to detect cycles in the inobt without deadlocking.
466	 */
467	error = xfs_trans_alloc_empty(breq->mp, &tp);
468	if (error)
469		goto out;
470
471	error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags,
472			xfs_inumbers_walk, breq->icount, &ic);
473	xfs_trans_cancel(tp);
474out:
475
476	/*
477	 * We found some inode groups, so clear the error status and return
478	 * them.  The lastino pointer will point directly at the inode that
479	 * triggered any error that occurred, so on the next call the error
480	 * will be triggered again and propagated to userspace as there will be
481	 * no formatted inode groups in the buffer.
482	 */
483	if (breq->ocount > 0)
484		error = 0;
485
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486	return error;
487}
488
489/* Convert an inumbers (v5) struct to a inogrp (v1) struct. */
490void
491xfs_inumbers_to_inogrp(
492	struct xfs_inogrp		*ig1,
493	const struct xfs_inumbers	*ig)
494{
495	/* memset is needed here because of padding holes in the structure. */
496	memset(ig1, 0, sizeof(struct xfs_inogrp));
497	ig1->xi_startino = ig->xi_startino;
498	ig1->xi_alloccount = ig->xi_alloccount;
499	ig1->xi_allocmask = ig->xi_allocmask;
500}
v3.5.6
 
  1/*
  2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_types.h"
 21#include "xfs_log.h"
 22#include "xfs_inum.h"
 23#include "xfs_trans.h"
 24#include "xfs_sb.h"
 25#include "xfs_ag.h"
 26#include "xfs_mount.h"
 27#include "xfs_bmap_btree.h"
 28#include "xfs_alloc_btree.h"
 29#include "xfs_ialloc_btree.h"
 30#include "xfs_dinode.h"
 31#include "xfs_inode.h"
 
 32#include "xfs_ialloc.h"
 
 
 33#include "xfs_itable.h"
 34#include "xfs_error.h"
 35#include "xfs_btree.h"
 36#include "xfs_trace.h"
 
 
 
 
 
 
 
 
 
 
 37
 38STATIC int
 39xfs_internal_inum(
 40	xfs_mount_t	*mp,
 41	xfs_ino_t	ino)
 
 
 
 
 
 
 42{
 43	return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
 44		(xfs_sb_version_hasquota(&mp->m_sb) &&
 45		 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
 46}
 47
 48/*
 49 * Return stat information for one inode.
 50 * Return 0 if ok, else errno.
 
 
 
 
 
 
 
 
 
 
 
 
 51 */
 52int
 53xfs_bulkstat_one_int(
 54	struct xfs_mount	*mp,		/* mount point for filesystem */
 55	xfs_ino_t		ino,		/* inode to get data for */
 56	void __user		*buffer,	/* buffer to place output in */
 57	int			ubsize,		/* size of buffer */
 58	bulkstat_one_fmt_pf	formatter,	/* formatter, copy to user */
 59	int			*ubused,	/* bytes used by me */
 60	int			*stat)		/* BULKSTAT_RV_... */
 61{
 62	struct xfs_icdinode	*dic;		/* dinode core info pointer */
 63	struct xfs_inode	*ip;		/* incore inode pointer */
 64	struct xfs_bstat	*buf;		/* return buffer */
 65	int			error = 0;	/* error value */
 66
 67	*stat = BULKSTAT_RV_NOTHING;
 68
 69	if (!buffer || xfs_internal_inum(mp, ino))
 70		return XFS_ERROR(EINVAL);
 71
 72	buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
 73	if (!buf)
 74		return XFS_ERROR(ENOMEM);
 75
 76	error = xfs_iget(mp, NULL, ino,
 77			 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
 78			 XFS_ILOCK_SHARED, &ip);
 79	if (error) {
 80		*stat = BULKSTAT_RV_NOTHING;
 81		goto out_free;
 
 
 
 
 
 
 
 
 
 
 
 82	}
 83
 84	ASSERT(ip != NULL);
 85	ASSERT(ip->i_imap.im_blkno != 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86
 87	dic = &ip->i_d;
 
 
 
 
 
 
 88
 89	/* xfs_iget returns the following without needing
 90	 * further change.
 91	 */
 92	buf->bs_nlink = dic->di_nlink;
 93	buf->bs_projid_lo = dic->di_projid_lo;
 94	buf->bs_projid_hi = dic->di_projid_hi;
 95	buf->bs_ino = ino;
 96	buf->bs_mode = dic->di_mode;
 97	buf->bs_uid = dic->di_uid;
 98	buf->bs_gid = dic->di_gid;
 99	buf->bs_size = dic->di_size;
100	buf->bs_atime.tv_sec = dic->di_atime.t_sec;
101	buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
102	buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
103	buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
104	buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
105	buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
 
 
 
 
106	buf->bs_xflags = xfs_ip2xflags(ip);
107	buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
108	buf->bs_extents = dic->di_nextents;
109	buf->bs_gen = dic->di_gen;
110	memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
111	buf->bs_dmevmask = dic->di_dmevmask;
112	buf->bs_dmstate = dic->di_dmstate;
113	buf->bs_aextents = dic->di_anextents;
114	buf->bs_forkoff = XFS_IFORK_BOFF(ip);
 
 
 
 
 
 
 
 
 
 
 
115
116	switch (dic->di_format) {
117	case XFS_DINODE_FMT_DEV:
118		buf->bs_rdev = ip->i_df.if_u2.if_rdev;
119		buf->bs_blksize = BLKDEV_IOSIZE;
120		buf->bs_blocks = 0;
121		break;
122	case XFS_DINODE_FMT_LOCAL:
123	case XFS_DINODE_FMT_UUID:
124		buf->bs_rdev = 0;
125		buf->bs_blksize = mp->m_sb.sb_blocksize;
126		buf->bs_blocks = 0;
127		break;
128	case XFS_DINODE_FMT_EXTENTS:
129	case XFS_DINODE_FMT_BTREE:
130		buf->bs_rdev = 0;
131		buf->bs_blksize = mp->m_sb.sb_blocksize;
132		buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
133		break;
134	}
135	xfs_iunlock(ip, XFS_ILOCK_SHARED);
136	IRELE(ip);
137
138	error = formatter(buffer, ubsize, ubused, buf);
 
 
 
 
139
140	if (!error)
141		*stat = BULKSTAT_RV_DIDONE;
142
143 out_free:
144	kmem_free(buf);
 
 
 
 
145	return error;
146}
147
148/* Return 0 on success or positive error */
149STATIC int
150xfs_bulkstat_one_fmt(
151	void			__user *ubuffer,
152	int			ubsize,
153	int			*ubused,
154	const xfs_bstat_t	*buffer)
155{
156	if (ubsize < sizeof(*buffer))
157		return XFS_ERROR(ENOMEM);
158	if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
159		return XFS_ERROR(EFAULT);
160	if (ubused)
161		*ubused = sizeof(*buffer);
162	return 0;
163}
164
165int
166xfs_bulkstat_one(
167	xfs_mount_t	*mp,		/* mount point for filesystem */
168	xfs_ino_t	ino,		/* inode number to get data for */
169	void		__user *buffer,	/* buffer to place output in */
170	int		ubsize,		/* size of buffer */
171	int		*ubused,	/* bytes used by me */
172	int		*stat)		/* BULKSTAT_RV_... */
173{
174	return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
175				    xfs_bulkstat_one_fmt, ubused, stat);
176}
 
 
 
 
 
 
 
 
 
177
178#define XFS_BULKSTAT_UBLEFT(ubleft)	((ubleft) >= statstruct_size)
179
180/*
181 * Return stat information in bulk (by-inode) for the filesystem.
182 */
183int					/* error status */
184xfs_bulkstat(
185	xfs_mount_t		*mp,	/* mount point for filesystem */
186	xfs_ino_t		*lastinop, /* last inode returned */
187	int			*ubcountp, /* size of buffer/count returned */
188	bulkstat_one_pf		formatter, /* func that'd fill a single buf */
189	size_t			statstruct_size, /* sizeof struct filling */
190	char			__user *ubuffer, /* buffer with inode stats */
191	int			*done)	/* 1 if there are more stats to get */
192{
193	xfs_agblock_t		agbno=0;/* allocation group block number */
194	xfs_buf_t		*agbp;	/* agi header buffer */
195	xfs_agi_t		*agi;	/* agi header data */
196	xfs_agino_t		agino;	/* inode # in allocation group */
197	xfs_agnumber_t		agno;	/* allocation group number */
198	int			chunkidx; /* current index into inode chunk */
199	int			clustidx; /* current index into inode cluster */
200	xfs_btree_cur_t		*cur;	/* btree cursor for ialloc btree */
201	int			end_of_ag; /* set if we've seen the ag end */
202	int			error;	/* error code */
203	int                     fmterror;/* bulkstat formatter result */
204	int			i;	/* loop index */
205	int			icount;	/* count of inodes good in irbuf */
206	size_t			irbsize; /* size of irec buffer in bytes */
207	xfs_ino_t		ino;	/* inode number (filesystem) */
208	xfs_inobt_rec_incore_t	*irbp;	/* current irec buffer pointer */
209	xfs_inobt_rec_incore_t	*irbuf;	/* start of irec buffer */
210	xfs_inobt_rec_incore_t	*irbufend; /* end of good irec buffer entries */
211	xfs_ino_t		lastino; /* last inode number returned */
212	int			nbcluster; /* # of blocks in a cluster */
213	int			nicluster; /* # of inodes in a cluster */
214	int			nimask;	/* mask for inode clusters */
215	int			nirbuf;	/* size of irbuf */
216	int			rval;	/* return value error code */
217	int			tmp;	/* result value from btree calls */
218	int			ubcount; /* size of user's buffer */
219	int			ubleft;	/* bytes left in user's buffer */
220	char			__user *ubufp;	/* pointer into user's buffer */
221	int			ubelem;	/* spaces used in user's buffer */
222	int			ubused;	/* bytes used by formatter */
223	xfs_buf_t		*bp;	/* ptr to on-disk inode cluster buf */
224
225	/*
226	 * Get the last inode value, see if there's nothing to do.
 
227	 */
228	ino = (xfs_ino_t)*lastinop;
229	lastino = ino;
230	agno = XFS_INO_TO_AGNO(mp, ino);
231	agino = XFS_INO_TO_AGINO(mp, ino);
232	if (agno >= mp->m_sb.sb_agcount ||
233	    ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
234		*done = 1;
235		*ubcountp = 0;
236		return 0;
237	}
238	if (!ubcountp || *ubcountp <= 0) {
239		return EINVAL;
240	}
241	ubcount = *ubcountp; /* statstruct's */
242	ubleft = ubcount * statstruct_size; /* bytes */
243	*ubcountp = ubelem = 0;
244	*done = 0;
245	fmterror = 0;
246	ubufp = ubuffer;
247	nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ?
248		mp->m_sb.sb_inopblock :
249		(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
250	nimask = ~(nicluster - 1);
251	nbcluster = nicluster >> mp->m_sb.sb_inopblog;
252	irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
253	if (!irbuf)
254		return ENOMEM;
255
256	nirbuf = irbsize / sizeof(*irbuf);
257
258	/*
259	 * Loop over the allocation groups, starting from the last
260	 * inode returned; 0 means start of the allocation group.
261	 */
262	rval = 0;
263	while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
264		cond_resched();
265		bp = NULL;
266		error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
267		if (error) {
268			/*
269			 * Skip this allocation group and go to the next one.
270			 */
271			agno++;
272			agino = 0;
273			continue;
274		}
275		agi = XFS_BUF_TO_AGI(agbp);
276		/*
277		 * Allocate and initialize a btree cursor for ialloc btree.
278		 */
279		cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno);
280		irbp = irbuf;
281		irbufend = irbuf + nirbuf;
282		end_of_ag = 0;
283		/*
284		 * If we're returning in the middle of an allocation group,
285		 * we need to get the remainder of the chunk we're in.
286		 */
287		if (agino > 0) {
288			xfs_inobt_rec_incore_t r;
289
290			/*
291			 * Lookup the inode chunk that this inode lives in.
292			 */
293			error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE,
294						 &tmp);
295			if (!error &&	/* no I/O error */
296			    tmp &&	/* lookup succeeded */
297					/* got the record, should always work */
298			    !(error = xfs_inobt_get_rec(cur, &r, &i)) &&
299			    i == 1 &&
300					/* this is the right chunk */
301			    agino < r.ir_startino + XFS_INODES_PER_CHUNK &&
302					/* lastino was not last in chunk */
303			    (chunkidx = agino - r.ir_startino + 1) <
304				    XFS_INODES_PER_CHUNK &&
305					/* there are some left allocated */
306			    xfs_inobt_maskn(chunkidx,
307				    XFS_INODES_PER_CHUNK - chunkidx) &
308				    ~r.ir_free) {
309				/*
310				 * Grab the chunk record.  Mark all the
311				 * uninteresting inodes (because they're
312				 * before our start point) free.
313				 */
314				for (i = 0; i < chunkidx; i++) {
315					if (XFS_INOBT_MASK(i) & ~r.ir_free)
316						r.ir_freecount++;
317				}
318				r.ir_free |= xfs_inobt_maskn(0, chunkidx);
319				irbp->ir_startino = r.ir_startino;
320				irbp->ir_freecount = r.ir_freecount;
321				irbp->ir_free = r.ir_free;
322				irbp++;
323				agino = r.ir_startino + XFS_INODES_PER_CHUNK;
324				icount = XFS_INODES_PER_CHUNK - r.ir_freecount;
325			} else {
326				/*
327				 * If any of those tests failed, bump the
328				 * inode number (just in case).
329				 */
330				agino++;
331				icount = 0;
332			}
333			/*
334			 * In any case, increment to the next record.
335			 */
336			if (!error)
337				error = xfs_btree_increment(cur, 0, &tmp);
338		} else {
339			/*
340			 * Start of ag.  Lookup the first inode chunk.
341			 */
342			error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
343			icount = 0;
344		}
345		/*
346		 * Loop through inode btree records in this ag,
347		 * until we run out of inodes or space in the buffer.
348		 */
349		while (irbp < irbufend && icount < ubcount) {
350			xfs_inobt_rec_incore_t r;
351
352			/*
353			 * Loop as long as we're unable to read the
354			 * inode btree.
355			 */
356			while (error) {
357				agino += XFS_INODES_PER_CHUNK;
358				if (XFS_AGINO_TO_AGBNO(mp, agino) >=
359						be32_to_cpu(agi->agi_length))
360					break;
361				error = xfs_inobt_lookup(cur, agino,
362							 XFS_LOOKUP_GE, &tmp);
363				cond_resched();
364			}
365			/*
366			 * If ran off the end of the ag either with an error,
367			 * or the normal way, set end and stop collecting.
368			 */
369			if (error) {
370				end_of_ag = 1;
371				break;
372			}
373
374			error = xfs_inobt_get_rec(cur, &r, &i);
375			if (error || i == 0) {
376				end_of_ag = 1;
377				break;
378			}
379
380			/*
381			 * If this chunk has any allocated inodes, save it.
382			 * Also start read-ahead now for this chunk.
383			 */
384			if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
385				/*
386				 * Loop over all clusters in the next chunk.
387				 * Do a readahead if there are any allocated
388				 * inodes in that cluster.
389				 */
390				agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
391				for (chunkidx = 0;
392				     chunkidx < XFS_INODES_PER_CHUNK;
393				     chunkidx += nicluster,
394				     agbno += nbcluster) {
395					if (xfs_inobt_maskn(chunkidx, nicluster)
396							& ~r.ir_free)
397						xfs_btree_reada_bufs(mp, agno,
398							agbno, nbcluster);
399				}
400				irbp->ir_startino = r.ir_startino;
401				irbp->ir_freecount = r.ir_freecount;
402				irbp->ir_free = r.ir_free;
403				irbp++;
404				icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
405			}
406			/*
407			 * Set agino to after this chunk and bump the cursor.
408			 */
409			agino = r.ir_startino + XFS_INODES_PER_CHUNK;
410			error = xfs_btree_increment(cur, 0, &tmp);
411			cond_resched();
412		}
413		/*
414		 * Drop the btree buffers and the agi buffer.
415		 * We can't hold any of the locks these represent
416		 * when calling iget.
417		 */
418		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
419		xfs_buf_relse(agbp);
420		/*
421		 * Now format all the good inodes into the user's buffer.
422		 */
423		irbufend = irbp;
424		for (irbp = irbuf;
425		     irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
426			/*
427			 * Now process this chunk of inodes.
428			 */
429			for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
430			     XFS_BULKSTAT_UBLEFT(ubleft) &&
431				irbp->ir_freecount < XFS_INODES_PER_CHUNK;
432			     chunkidx++, clustidx++, agino++) {
433				ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
434				/*
435				 * Recompute agbno if this is the
436				 * first inode of the cluster.
437				 *
438				 * Careful with clustidx.   There can be
439				 * multiple clusters per chunk, a single
440				 * cluster per chunk or a cluster that has
441				 * inodes represented from several different
442				 * chunks (if blocksize is large).
443				 *
444				 * Because of this, the starting clustidx is
445				 * initialized to zero in this loop but must
446				 * later be reset after reading in the cluster
447				 * buffer.
448				 */
449				if ((chunkidx & (nicluster - 1)) == 0) {
450					agbno = XFS_AGINO_TO_AGBNO(mp,
451							irbp->ir_startino) +
452						((chunkidx & nimask) >>
453						 mp->m_sb.sb_inopblog);
454				}
455				ino = XFS_AGINO_TO_INO(mp, agno, agino);
456				/*
457				 * Skip if this inode is free.
458				 */
459				if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
460					lastino = ino;
461					continue;
462				}
463				/*
464				 * Count used inodes as free so we can tell
465				 * when the chunk is used up.
466				 */
467				irbp->ir_freecount++;
468
469				/*
470				 * Get the inode and fill in a single buffer.
471				 */
472				ubused = statstruct_size;
473				error = formatter(mp, ino, ubufp, ubleft,
474						  &ubused, &fmterror);
475				if (fmterror == BULKSTAT_RV_NOTHING) {
476					if (error && error != ENOENT &&
477						error != EINVAL) {
478						ubleft = 0;
479						rval = error;
480						break;
481					}
482					lastino = ino;
483					continue;
484				}
485				if (fmterror == BULKSTAT_RV_GIVEUP) {
486					ubleft = 0;
487					ASSERT(error);
488					rval = error;
489					break;
490				}
491				if (ubufp)
492					ubufp += ubused;
493				ubleft -= ubused;
494				ubelem++;
495				lastino = ino;
496			}
497
498			cond_resched();
499		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
501		if (bp)
502			xfs_buf_relse(bp);
 
503
504		/*
505		 * Set up for the next loop iteration.
506		 */
507		if (XFS_BULKSTAT_UBLEFT(ubleft)) {
508			if (end_of_ag) {
509				agno++;
510				agino = 0;
511			} else
512				agino = XFS_INO_TO_AGINO(mp, lastino);
513		} else
514			break;
 
 
 
 
 
 
 
515	}
 
 
 
 
 
 
 
 
516	/*
517	 * Done, we're either out of filesystem or space to put the data.
 
518	 */
519	kmem_free_large(irbuf);
520	*ubcountp = ubelem;
 
 
 
 
 
 
 
 
 
 
 
521	/*
522	 * Found some inodes, return them now and return the error next time.
 
 
 
 
523	 */
524	if (ubelem)
525		rval = 0;
526	if (agno >= mp->m_sb.sb_agcount) {
527		/*
528		 * If we ran out of filesystem, mark lastino as off
529		 * the end of the filesystem, so the next call
530		 * will return immediately.
531		 */
532		*lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
533		*done = 1;
534	} else
535		*lastinop = (xfs_ino_t)lastino;
536
537	return rval;
538}
539
540/*
541 * Return stat information in bulk (by-inode) for the filesystem.
542 * Special case for non-sequential one inode bulkstat.
543 */
544int					/* error status */
545xfs_bulkstat_single(
546	xfs_mount_t		*mp,	/* mount point for filesystem */
547	xfs_ino_t		*lastinop, /* inode to return */
548	char			__user *buffer, /* buffer with inode stats */
549	int			*done)	/* 1 if there are more stats to get */
550{
551	int			count;	/* count value for bulkstat call */
552	int			error;	/* return value */
553	xfs_ino_t		ino;	/* filesystem inode number */
554	int			res;	/* result from bs1 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555
556	/*
557	 * note that requesting valid inode numbers which are not allocated
558	 * to inodes will most likely cause xfs_itobp to generate warning
559	 * messages about bad magic numbers. This is ok. The fact that
560	 * the inode isn't actually an inode is handled by the
561	 * error check below. Done this way to make the usual case faster
562	 * at the expense of the error case.
563	 */
564
565	ino = (xfs_ino_t)*lastinop;
566	error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res);
567	if (error) {
568		/*
569		 * Special case way failed, do it the "long" way
570		 * to see if that works.
571		 */
572		(*lastinop)--;
573		count = 1;
574		if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
575				sizeof(xfs_bstat_t), buffer, done))
576			return error;
577		if (count == 0 || (xfs_ino_t)*lastinop != ino)
578			return error == EFSCORRUPTED ?
579				XFS_ERROR(EINVAL) : error;
580		else
581			return 0;
582	}
583	*done = 0;
584	return 0;
585}
586
587int
588xfs_inumbers_fmt(
589	void			__user *ubuffer, /* buffer to write to */
590	const xfs_inogrp_t	*buffer,	/* buffer to read from */
591	long			count,		/* # of elements to read */
592	long			*written)	/* # of bytes written */
 
 
 
 
 
 
 
 
 
593{
594	if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
595		return -EFAULT;
596	*written = count * sizeof(*buffer);
597	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
598}
599
600/*
601 * Return inode number table for the filesystem.
602 */
603int					/* error status */
604xfs_inumbers(
605	xfs_mount_t	*mp,		/* mount point for filesystem */
606	xfs_ino_t	*lastino,	/* last inode returned */
607	int		*count,		/* size of buffer/count returned */
608	void		__user *ubuffer,/* buffer with inode descriptions */
609	inumbers_fmt_pf	formatter)
610{
611	xfs_buf_t	*agbp;
612	xfs_agino_t	agino;
613	xfs_agnumber_t	agno;
614	int		bcount;
615	xfs_inogrp_t	*buffer;
616	int		bufidx;
617	xfs_btree_cur_t	*cur;
618	int		error;
619	xfs_inobt_rec_incore_t r;
620	int		i;
621	xfs_ino_t	ino;
622	int		left;
623	int		tmp;
624
625	ino = (xfs_ino_t)*lastino;
626	agno = XFS_INO_TO_AGNO(mp, ino);
627	agino = XFS_INO_TO_AGINO(mp, ino);
628	left = *count;
629	*count = 0;
630	bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
631	buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
632	error = bufidx = 0;
633	cur = NULL;
634	agbp = NULL;
635	while (left > 0 && agno < mp->m_sb.sb_agcount) {
636		if (agbp == NULL) {
637			error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
638			if (error) {
639				/*
640				 * If we can't read the AGI of this ag,
641				 * then just skip to the next one.
642				 */
643				ASSERT(cur == NULL);
644				agbp = NULL;
645				agno++;
646				agino = 0;
647				continue;
648			}
649			cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno);
650			error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
651						 &tmp);
652			if (error) {
653				xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
654				cur = NULL;
655				xfs_buf_relse(agbp);
656				agbp = NULL;
657				/*
658				 * Move up the last inode in the current
659				 * chunk.  The lookup_ge will always get
660				 * us the first inode in the next chunk.
661				 */
662				agino += XFS_INODES_PER_CHUNK - 1;
663				continue;
664			}
665		}
666		error = xfs_inobt_get_rec(cur, &r, &i);
667		if (error || i == 0) {
668			xfs_buf_relse(agbp);
669			agbp = NULL;
670			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
671			cur = NULL;
672			agno++;
673			agino = 0;
674			continue;
675		}
676		agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
677		buffer[bufidx].xi_startino =
678			XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
679		buffer[bufidx].xi_alloccount =
680			XFS_INODES_PER_CHUNK - r.ir_freecount;
681		buffer[bufidx].xi_allocmask = ~r.ir_free;
682		bufidx++;
683		left--;
684		if (bufidx == bcount) {
685			long written;
686			if (formatter(ubuffer, buffer, bufidx, &written)) {
687				error = XFS_ERROR(EFAULT);
688				break;
689			}
690			ubuffer += written;
691			*count += bufidx;
692			bufidx = 0;
693		}
694		if (left) {
695			error = xfs_btree_increment(cur, 0, &tmp);
696			if (error) {
697				xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
698				cur = NULL;
699				xfs_buf_relse(agbp);
700				agbp = NULL;
701				/*
702				 * The agino value has already been bumped.
703				 * Just try to skip up to it.
704				 */
705				agino += XFS_INODES_PER_CHUNK;
706				continue;
707			}
708		}
709	}
710	if (!error) {
711		if (bufidx) {
712			long written;
713			if (formatter(ubuffer, buffer, bufidx, &written))
714				error = XFS_ERROR(EFAULT);
715			else
716				*count += bufidx;
717		}
718		*lastino = XFS_AGINO_TO_INO(mp, agno, agino);
719	}
720	kmem_free(buffer);
721	if (cur)
722		xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
723					   XFS_BTREE_NOERROR));
724	if (agbp)
725		xfs_buf_relse(agbp);
726	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
727}