Loading...
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_mount.h"
25#include "xfs_inode.h"
26#include "xfs_btree.h"
27#include "xfs_ialloc.h"
28#include "xfs_ialloc_btree.h"
29#include "xfs_itable.h"
30#include "xfs_error.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33
34/*
35 * Return stat information for one inode.
36 * Return 0 if ok, else errno.
37 */
38int
39xfs_bulkstat_one_int(
40 struct xfs_mount *mp, /* mount point for filesystem */
41 xfs_ino_t ino, /* inode to get data for */
42 void __user *buffer, /* buffer to place output in */
43 int ubsize, /* size of buffer */
44 bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
45 int *ubused, /* bytes used by me */
46 int *stat) /* BULKSTAT_RV_... */
47{
48 struct xfs_icdinode *dic; /* dinode core info pointer */
49 struct xfs_inode *ip; /* incore inode pointer */
50 struct inode *inode;
51 struct xfs_bstat *buf; /* return buffer */
52 int error = 0; /* error value */
53
54 *stat = BULKSTAT_RV_NOTHING;
55
56 if (!buffer || xfs_internal_inum(mp, ino))
57 return -EINVAL;
58
59 buf = kmem_zalloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
60 if (!buf)
61 return -ENOMEM;
62
63 error = xfs_iget(mp, NULL, ino,
64 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
65 XFS_ILOCK_SHARED, &ip);
66 if (error)
67 goto out_free;
68
69 ASSERT(ip != NULL);
70 ASSERT(ip->i_imap.im_blkno != 0);
71 inode = VFS_I(ip);
72
73 dic = &ip->i_d;
74
75 /* xfs_iget returns the following without needing
76 * further change.
77 */
78 buf->bs_projid_lo = dic->di_projid_lo;
79 buf->bs_projid_hi = dic->di_projid_hi;
80 buf->bs_ino = ino;
81 buf->bs_uid = dic->di_uid;
82 buf->bs_gid = dic->di_gid;
83 buf->bs_size = dic->di_size;
84
85 buf->bs_nlink = inode->i_nlink;
86 buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
87 buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
88 buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
89 buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
90 buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
91 buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
92 buf->bs_gen = inode->i_generation;
93 buf->bs_mode = inode->i_mode;
94
95 buf->bs_xflags = xfs_ip2xflags(ip);
96 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
97 buf->bs_extents = dic->di_nextents;
98 memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
99 buf->bs_dmevmask = dic->di_dmevmask;
100 buf->bs_dmstate = dic->di_dmstate;
101 buf->bs_aextents = dic->di_anextents;
102 buf->bs_forkoff = XFS_IFORK_BOFF(ip);
103
104 if (dic->di_version == 3) {
105 if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
106 buf->bs_cowextsize = dic->di_cowextsize <<
107 mp->m_sb.sb_blocklog;
108 }
109
110 switch (dic->di_format) {
111 case XFS_DINODE_FMT_DEV:
112 buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
113 buf->bs_blksize = BLKDEV_IOSIZE;
114 buf->bs_blocks = 0;
115 break;
116 case XFS_DINODE_FMT_LOCAL:
117 buf->bs_rdev = 0;
118 buf->bs_blksize = mp->m_sb.sb_blocksize;
119 buf->bs_blocks = 0;
120 break;
121 case XFS_DINODE_FMT_EXTENTS:
122 case XFS_DINODE_FMT_BTREE:
123 buf->bs_rdev = 0;
124 buf->bs_blksize = mp->m_sb.sb_blocksize;
125 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
126 break;
127 }
128 xfs_iunlock(ip, XFS_ILOCK_SHARED);
129 IRELE(ip);
130
131 error = formatter(buffer, ubsize, ubused, buf);
132 if (!error)
133 *stat = BULKSTAT_RV_DIDONE;
134
135 out_free:
136 kmem_free(buf);
137 return error;
138}
139
140/* Return 0 on success or positive error */
141STATIC int
142xfs_bulkstat_one_fmt(
143 void __user *ubuffer,
144 int ubsize,
145 int *ubused,
146 const xfs_bstat_t *buffer)
147{
148 if (ubsize < sizeof(*buffer))
149 return -ENOMEM;
150 if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
151 return -EFAULT;
152 if (ubused)
153 *ubused = sizeof(*buffer);
154 return 0;
155}
156
157int
158xfs_bulkstat_one(
159 xfs_mount_t *mp, /* mount point for filesystem */
160 xfs_ino_t ino, /* inode number to get data for */
161 void __user *buffer, /* buffer to place output in */
162 int ubsize, /* size of buffer */
163 int *ubused, /* bytes used by me */
164 int *stat) /* BULKSTAT_RV_... */
165{
166 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
167 xfs_bulkstat_one_fmt, ubused, stat);
168}
169
170/*
171 * Loop over all clusters in a chunk for a given incore inode allocation btree
172 * record. Do a readahead if there are any allocated inodes in that cluster.
173 */
174STATIC void
175xfs_bulkstat_ichunk_ra(
176 struct xfs_mount *mp,
177 xfs_agnumber_t agno,
178 struct xfs_inobt_rec_incore *irec)
179{
180 xfs_agblock_t agbno;
181 struct blk_plug plug;
182 int blks_per_cluster;
183 int inodes_per_cluster;
184 int i; /* inode chunk index */
185
186 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
187 blks_per_cluster = xfs_icluster_size_fsb(mp);
188 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
189
190 blk_start_plug(&plug);
191 for (i = 0; i < XFS_INODES_PER_CHUNK;
192 i += inodes_per_cluster, agbno += blks_per_cluster) {
193 if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
194 xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
195 &xfs_inode_buf_ops);
196 }
197 }
198 blk_finish_plug(&plug);
199}
200
201/*
202 * Lookup the inode chunk that the given inode lives in and then get the record
203 * if we found the chunk. If the inode was not the last in the chunk and there
204 * are some left allocated, update the data for the pointed-to record as well as
205 * return the count of grabbed inodes.
206 */
207STATIC int
208xfs_bulkstat_grab_ichunk(
209 struct xfs_btree_cur *cur, /* btree cursor */
210 xfs_agino_t agino, /* starting inode of chunk */
211 int *icount,/* return # of inodes grabbed */
212 struct xfs_inobt_rec_incore *irec) /* btree record */
213{
214 int idx; /* index into inode chunk */
215 int stat;
216 int error = 0;
217
218 /* Lookup the inode chunk that this inode lives in */
219 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
220 if (error)
221 return error;
222 if (!stat) {
223 *icount = 0;
224 return error;
225 }
226
227 /* Get the record, should always work */
228 error = xfs_inobt_get_rec(cur, irec, &stat);
229 if (error)
230 return error;
231 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1);
232
233 /* Check if the record contains the inode in request */
234 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
235 *icount = 0;
236 return 0;
237 }
238
239 idx = agino - irec->ir_startino + 1;
240 if (idx < XFS_INODES_PER_CHUNK &&
241 (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
242 int i;
243
244 /* We got a right chunk with some left inodes allocated at it.
245 * Grab the chunk record. Mark all the uninteresting inodes
246 * free -- because they're before our start point.
247 */
248 for (i = 0; i < idx; i++) {
249 if (XFS_INOBT_MASK(i) & ~irec->ir_free)
250 irec->ir_freecount++;
251 }
252
253 irec->ir_free |= xfs_inobt_maskn(0, idx);
254 *icount = irec->ir_count - irec->ir_freecount;
255 }
256
257 return 0;
258}
259
260#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
261
262struct xfs_bulkstat_agichunk {
263 char __user **ac_ubuffer;/* pointer into user's buffer */
264 int ac_ubleft; /* bytes left in user's buffer */
265 int ac_ubelem; /* spaces used in user's buffer */
266};
267
268/*
269 * Process inodes in chunk with a pointer to a formatter function
270 * that will iget the inode and fill in the appropriate structure.
271 */
272static int
273xfs_bulkstat_ag_ichunk(
274 struct xfs_mount *mp,
275 xfs_agnumber_t agno,
276 struct xfs_inobt_rec_incore *irbp,
277 bulkstat_one_pf formatter,
278 size_t statstruct_size,
279 struct xfs_bulkstat_agichunk *acp,
280 xfs_agino_t *last_agino)
281{
282 char __user **ubufp = acp->ac_ubuffer;
283 int chunkidx;
284 int error = 0;
285 xfs_agino_t agino = irbp->ir_startino;
286
287 for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
288 chunkidx++, agino++) {
289 int fmterror;
290 int ubused;
291
292 /* inode won't fit in buffer, we are done */
293 if (acp->ac_ubleft < statstruct_size)
294 break;
295
296 /* Skip if this inode is free */
297 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
298 continue;
299
300 /* Get the inode and fill in a single buffer */
301 ubused = statstruct_size;
302 error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
303 *ubufp, acp->ac_ubleft, &ubused, &fmterror);
304
305 if (fmterror == BULKSTAT_RV_GIVEUP ||
306 (error && error != -ENOENT && error != -EINVAL)) {
307 acp->ac_ubleft = 0;
308 ASSERT(error);
309 break;
310 }
311
312 /* be careful not to leak error if at end of chunk */
313 if (fmterror == BULKSTAT_RV_NOTHING || error) {
314 error = 0;
315 continue;
316 }
317
318 *ubufp += ubused;
319 acp->ac_ubleft -= ubused;
320 acp->ac_ubelem++;
321 }
322
323 /*
324 * Post-update *last_agino. At this point, agino will always point one
325 * inode past the last inode we processed successfully. Hence we
326 * substract that inode when setting the *last_agino cursor so that we
327 * return the correct cookie to userspace. On the next bulkstat call,
328 * the inode under the lastino cookie will be skipped as we have already
329 * processed it here.
330 */
331 *last_agino = agino - 1;
332
333 return error;
334}
335
336/*
337 * Return stat information in bulk (by-inode) for the filesystem.
338 */
339int /* error status */
340xfs_bulkstat(
341 xfs_mount_t *mp, /* mount point for filesystem */
342 xfs_ino_t *lastinop, /* last inode returned */
343 int *ubcountp, /* size of buffer/count returned */
344 bulkstat_one_pf formatter, /* func that'd fill a single buf */
345 size_t statstruct_size, /* sizeof struct filling */
346 char __user *ubuffer, /* buffer with inode stats */
347 int *done) /* 1 if there are more stats to get */
348{
349 xfs_buf_t *agbp; /* agi header buffer */
350 xfs_agino_t agino; /* inode # in allocation group */
351 xfs_agnumber_t agno; /* allocation group number */
352 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
353 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
354 int nirbuf; /* size of irbuf */
355 int ubcount; /* size of user's buffer */
356 struct xfs_bulkstat_agichunk ac;
357 int error = 0;
358
359 /*
360 * Get the last inode value, see if there's nothing to do.
361 */
362 agno = XFS_INO_TO_AGNO(mp, *lastinop);
363 agino = XFS_INO_TO_AGINO(mp, *lastinop);
364 if (agno >= mp->m_sb.sb_agcount ||
365 *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
366 *done = 1;
367 *ubcountp = 0;
368 return 0;
369 }
370
371 ubcount = *ubcountp; /* statstruct's */
372 ac.ac_ubuffer = &ubuffer;
373 ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
374 ac.ac_ubelem = 0;
375
376 *ubcountp = 0;
377 *done = 0;
378
379 irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
380 if (!irbuf)
381 return -ENOMEM;
382 nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
383
384 /*
385 * Loop over the allocation groups, starting from the last
386 * inode returned; 0 means start of the allocation group.
387 */
388 while (agno < mp->m_sb.sb_agcount) {
389 struct xfs_inobt_rec_incore *irbp = irbuf;
390 struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf;
391 bool end_of_ag = false;
392 int icount = 0;
393 int stat;
394
395 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
396 if (error)
397 break;
398 /*
399 * Allocate and initialize a btree cursor for ialloc btree.
400 */
401 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
402 XFS_BTNUM_INO);
403 if (agino > 0) {
404 /*
405 * In the middle of an allocation group, we need to get
406 * the remainder of the chunk we're in.
407 */
408 struct xfs_inobt_rec_incore r;
409
410 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
411 if (error)
412 goto del_cursor;
413 if (icount) {
414 irbp->ir_startino = r.ir_startino;
415 irbp->ir_holemask = r.ir_holemask;
416 irbp->ir_count = r.ir_count;
417 irbp->ir_freecount = r.ir_freecount;
418 irbp->ir_free = r.ir_free;
419 irbp++;
420 }
421 /* Increment to the next record */
422 error = xfs_btree_increment(cur, 0, &stat);
423 } else {
424 /* Start of ag. Lookup the first inode chunk */
425 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
426 }
427 if (error || stat == 0) {
428 end_of_ag = true;
429 goto del_cursor;
430 }
431
432 /*
433 * Loop through inode btree records in this ag,
434 * until we run out of inodes or space in the buffer.
435 */
436 while (irbp < irbufend && icount < ubcount) {
437 struct xfs_inobt_rec_incore r;
438
439 error = xfs_inobt_get_rec(cur, &r, &stat);
440 if (error || stat == 0) {
441 end_of_ag = true;
442 goto del_cursor;
443 }
444
445 /*
446 * If this chunk has any allocated inodes, save it.
447 * Also start read-ahead now for this chunk.
448 */
449 if (r.ir_freecount < r.ir_count) {
450 xfs_bulkstat_ichunk_ra(mp, agno, &r);
451 irbp->ir_startino = r.ir_startino;
452 irbp->ir_holemask = r.ir_holemask;
453 irbp->ir_count = r.ir_count;
454 irbp->ir_freecount = r.ir_freecount;
455 irbp->ir_free = r.ir_free;
456 irbp++;
457 icount += r.ir_count - r.ir_freecount;
458 }
459 error = xfs_btree_increment(cur, 0, &stat);
460 if (error || stat == 0) {
461 end_of_ag = true;
462 goto del_cursor;
463 }
464 cond_resched();
465 }
466
467 /*
468 * Drop the btree buffers and the agi buffer as we can't hold any
469 * of the locks these represent when calling iget. If there is a
470 * pending error, then we are done.
471 */
472del_cursor:
473 xfs_btree_del_cursor(cur, error ?
474 XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
475 xfs_buf_relse(agbp);
476 if (error)
477 break;
478 /*
479 * Now format all the good inodes into the user's buffer. The
480 * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
481 * for the next loop iteration.
482 */
483 irbufend = irbp;
484 for (irbp = irbuf;
485 irbp < irbufend && ac.ac_ubleft >= statstruct_size;
486 irbp++) {
487 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
488 formatter, statstruct_size, &ac,
489 &agino);
490 if (error)
491 break;
492
493 cond_resched();
494 }
495
496 /*
497 * If we've run out of space or had a formatting error, we
498 * are now done
499 */
500 if (ac.ac_ubleft < statstruct_size || error)
501 break;
502
503 if (end_of_ag) {
504 agno++;
505 agino = 0;
506 }
507 }
508 /*
509 * Done, we're either out of filesystem or space to put the data.
510 */
511 kmem_free(irbuf);
512 *ubcountp = ac.ac_ubelem;
513
514 /*
515 * We found some inodes, so clear the error status and return them.
516 * The lastino pointer will point directly at the inode that triggered
517 * any error that occurred, so on the next call the error will be
518 * triggered again and propagated to userspace as there will be no
519 * formatted inodes in the buffer.
520 */
521 if (ac.ac_ubelem)
522 error = 0;
523
524 /*
525 * If we ran out of filesystem, lastino will point off the end of
526 * the filesystem so the next call will return immediately.
527 */
528 *lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
529 if (agno >= mp->m_sb.sb_agcount)
530 *done = 1;
531
532 return error;
533}
534
535int
536xfs_inumbers_fmt(
537 void __user *ubuffer, /* buffer to write to */
538 const struct xfs_inogrp *buffer, /* buffer to read from */
539 long count, /* # of elements to read */
540 long *written) /* # of bytes written */
541{
542 if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
543 return -EFAULT;
544 *written = count * sizeof(*buffer);
545 return 0;
546}
547
548/*
549 * Return inode number table for the filesystem.
550 */
551int /* error status */
552xfs_inumbers(
553 struct xfs_mount *mp,/* mount point for filesystem */
554 xfs_ino_t *lastino,/* last inode returned */
555 int *count,/* size of buffer/count returned */
556 void __user *ubuffer,/* buffer with inode descriptions */
557 inumbers_fmt_pf formatter)
558{
559 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino);
560 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino);
561 struct xfs_btree_cur *cur = NULL;
562 struct xfs_buf *agbp = NULL;
563 struct xfs_inogrp *buffer;
564 int bcount;
565 int left = *count;
566 int bufidx = 0;
567 int error = 0;
568
569 *count = 0;
570 if (agno >= mp->m_sb.sb_agcount ||
571 *lastino != XFS_AGINO_TO_INO(mp, agno, agino))
572 return error;
573
574 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
575 buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
576 do {
577 struct xfs_inobt_rec_incore r;
578 int stat;
579
580 if (!agbp) {
581 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
582 if (error)
583 break;
584
585 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
586 XFS_BTNUM_INO);
587 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
588 &stat);
589 if (error)
590 break;
591 if (!stat)
592 goto next_ag;
593 }
594
595 error = xfs_inobt_get_rec(cur, &r, &stat);
596 if (error)
597 break;
598 if (!stat)
599 goto next_ag;
600
601 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
602 buffer[bufidx].xi_startino =
603 XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
604 buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount;
605 buffer[bufidx].xi_allocmask = ~r.ir_free;
606 if (++bufidx == bcount) {
607 long written;
608
609 error = formatter(ubuffer, buffer, bufidx, &written);
610 if (error)
611 break;
612 ubuffer += written;
613 *count += bufidx;
614 bufidx = 0;
615 }
616 if (!--left)
617 break;
618
619 error = xfs_btree_increment(cur, 0, &stat);
620 if (error)
621 break;
622 if (stat)
623 continue;
624
625next_ag:
626 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
627 cur = NULL;
628 xfs_buf_relse(agbp);
629 agbp = NULL;
630 agino = 0;
631 agno++;
632 } while (agno < mp->m_sb.sb_agcount);
633
634 if (!error) {
635 if (bufidx) {
636 long written;
637
638 error = formatter(ubuffer, buffer, bufidx, &written);
639 if (!error)
640 *count += bufidx;
641 }
642 *lastino = XFS_AGINO_TO_INO(mp, agno, agino);
643 }
644
645 kmem_free(buffer);
646 if (cur)
647 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
648 XFS_BTREE_NOERROR));
649 if (agbp)
650 xfs_buf_relse(agbp);
651
652 return error;
653}
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_mount.h"
25#include "xfs_inode.h"
26#include "xfs_btree.h"
27#include "xfs_ialloc.h"
28#include "xfs_ialloc_btree.h"
29#include "xfs_itable.h"
30#include "xfs_error.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33
34STATIC int
35xfs_internal_inum(
36 xfs_mount_t *mp,
37 xfs_ino_t ino)
38{
39 return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
40 (xfs_sb_version_hasquota(&mp->m_sb) &&
41 xfs_is_quota_inode(&mp->m_sb, ino)));
42}
43
44/*
45 * Return stat information for one inode.
46 * Return 0 if ok, else errno.
47 */
48int
49xfs_bulkstat_one_int(
50 struct xfs_mount *mp, /* mount point for filesystem */
51 xfs_ino_t ino, /* inode to get data for */
52 void __user *buffer, /* buffer to place output in */
53 int ubsize, /* size of buffer */
54 bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
55 int *ubused, /* bytes used by me */
56 int *stat) /* BULKSTAT_RV_... */
57{
58 struct xfs_icdinode *dic; /* dinode core info pointer */
59 struct xfs_inode *ip; /* incore inode pointer */
60 struct inode *inode;
61 struct xfs_bstat *buf; /* return buffer */
62 int error = 0; /* error value */
63
64 *stat = BULKSTAT_RV_NOTHING;
65
66 if (!buffer || xfs_internal_inum(mp, ino))
67 return -EINVAL;
68
69 buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
70 if (!buf)
71 return -ENOMEM;
72
73 error = xfs_iget(mp, NULL, ino,
74 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
75 XFS_ILOCK_SHARED, &ip);
76 if (error)
77 goto out_free;
78
79 ASSERT(ip != NULL);
80 ASSERT(ip->i_imap.im_blkno != 0);
81 inode = VFS_I(ip);
82
83 dic = &ip->i_d;
84
85 /* xfs_iget returns the following without needing
86 * further change.
87 */
88 buf->bs_projid_lo = dic->di_projid_lo;
89 buf->bs_projid_hi = dic->di_projid_hi;
90 buf->bs_ino = ino;
91 buf->bs_uid = dic->di_uid;
92 buf->bs_gid = dic->di_gid;
93 buf->bs_size = dic->di_size;
94
95 buf->bs_nlink = inode->i_nlink;
96 buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
97 buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
98 buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
99 buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
100 buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
101 buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
102 buf->bs_gen = inode->i_generation;
103 buf->bs_mode = inode->i_mode;
104
105 buf->bs_xflags = xfs_ip2xflags(ip);
106 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
107 buf->bs_extents = dic->di_nextents;
108 memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
109 buf->bs_dmevmask = dic->di_dmevmask;
110 buf->bs_dmstate = dic->di_dmstate;
111 buf->bs_aextents = dic->di_anextents;
112 buf->bs_forkoff = XFS_IFORK_BOFF(ip);
113
114 switch (dic->di_format) {
115 case XFS_DINODE_FMT_DEV:
116 buf->bs_rdev = ip->i_df.if_u2.if_rdev;
117 buf->bs_blksize = BLKDEV_IOSIZE;
118 buf->bs_blocks = 0;
119 break;
120 case XFS_DINODE_FMT_LOCAL:
121 case XFS_DINODE_FMT_UUID:
122 buf->bs_rdev = 0;
123 buf->bs_blksize = mp->m_sb.sb_blocksize;
124 buf->bs_blocks = 0;
125 break;
126 case XFS_DINODE_FMT_EXTENTS:
127 case XFS_DINODE_FMT_BTREE:
128 buf->bs_rdev = 0;
129 buf->bs_blksize = mp->m_sb.sb_blocksize;
130 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
131 break;
132 }
133 xfs_iunlock(ip, XFS_ILOCK_SHARED);
134 IRELE(ip);
135
136 error = formatter(buffer, ubsize, ubused, buf);
137 if (!error)
138 *stat = BULKSTAT_RV_DIDONE;
139
140 out_free:
141 kmem_free(buf);
142 return error;
143}
144
145/* Return 0 on success or positive error */
146STATIC int
147xfs_bulkstat_one_fmt(
148 void __user *ubuffer,
149 int ubsize,
150 int *ubused,
151 const xfs_bstat_t *buffer)
152{
153 if (ubsize < sizeof(*buffer))
154 return -ENOMEM;
155 if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
156 return -EFAULT;
157 if (ubused)
158 *ubused = sizeof(*buffer);
159 return 0;
160}
161
162int
163xfs_bulkstat_one(
164 xfs_mount_t *mp, /* mount point for filesystem */
165 xfs_ino_t ino, /* inode number to get data for */
166 void __user *buffer, /* buffer to place output in */
167 int ubsize, /* size of buffer */
168 int *ubused, /* bytes used by me */
169 int *stat) /* BULKSTAT_RV_... */
170{
171 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
172 xfs_bulkstat_one_fmt, ubused, stat);
173}
174
175/*
176 * Loop over all clusters in a chunk for a given incore inode allocation btree
177 * record. Do a readahead if there are any allocated inodes in that cluster.
178 */
179STATIC void
180xfs_bulkstat_ichunk_ra(
181 struct xfs_mount *mp,
182 xfs_agnumber_t agno,
183 struct xfs_inobt_rec_incore *irec)
184{
185 xfs_agblock_t agbno;
186 struct blk_plug plug;
187 int blks_per_cluster;
188 int inodes_per_cluster;
189 int i; /* inode chunk index */
190
191 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
192 blks_per_cluster = xfs_icluster_size_fsb(mp);
193 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
194
195 blk_start_plug(&plug);
196 for (i = 0; i < XFS_INODES_PER_CHUNK;
197 i += inodes_per_cluster, agbno += blks_per_cluster) {
198 if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
199 xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
200 &xfs_inode_buf_ops);
201 }
202 }
203 blk_finish_plug(&plug);
204}
205
206/*
207 * Lookup the inode chunk that the given inode lives in and then get the record
208 * if we found the chunk. If the inode was not the last in the chunk and there
209 * are some left allocated, update the data for the pointed-to record as well as
210 * return the count of grabbed inodes.
211 */
212STATIC int
213xfs_bulkstat_grab_ichunk(
214 struct xfs_btree_cur *cur, /* btree cursor */
215 xfs_agino_t agino, /* starting inode of chunk */
216 int *icount,/* return # of inodes grabbed */
217 struct xfs_inobt_rec_incore *irec) /* btree record */
218{
219 int idx; /* index into inode chunk */
220 int stat;
221 int error = 0;
222
223 /* Lookup the inode chunk that this inode lives in */
224 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
225 if (error)
226 return error;
227 if (!stat) {
228 *icount = 0;
229 return error;
230 }
231
232 /* Get the record, should always work */
233 error = xfs_inobt_get_rec(cur, irec, &stat);
234 if (error)
235 return error;
236 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1);
237
238 /* Check if the record contains the inode in request */
239 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
240 *icount = 0;
241 return 0;
242 }
243
244 idx = agino - irec->ir_startino + 1;
245 if (idx < XFS_INODES_PER_CHUNK &&
246 (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
247 int i;
248
249 /* We got a right chunk with some left inodes allocated at it.
250 * Grab the chunk record. Mark all the uninteresting inodes
251 * free -- because they're before our start point.
252 */
253 for (i = 0; i < idx; i++) {
254 if (XFS_INOBT_MASK(i) & ~irec->ir_free)
255 irec->ir_freecount++;
256 }
257
258 irec->ir_free |= xfs_inobt_maskn(0, idx);
259 *icount = irec->ir_count - irec->ir_freecount;
260 }
261
262 return 0;
263}
264
265#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
266
267struct xfs_bulkstat_agichunk {
268 char __user **ac_ubuffer;/* pointer into user's buffer */
269 int ac_ubleft; /* bytes left in user's buffer */
270 int ac_ubelem; /* spaces used in user's buffer */
271};
272
273/*
274 * Process inodes in chunk with a pointer to a formatter function
275 * that will iget the inode and fill in the appropriate structure.
276 */
277static int
278xfs_bulkstat_ag_ichunk(
279 struct xfs_mount *mp,
280 xfs_agnumber_t agno,
281 struct xfs_inobt_rec_incore *irbp,
282 bulkstat_one_pf formatter,
283 size_t statstruct_size,
284 struct xfs_bulkstat_agichunk *acp,
285 xfs_agino_t *last_agino)
286{
287 char __user **ubufp = acp->ac_ubuffer;
288 int chunkidx;
289 int error = 0;
290 xfs_agino_t agino = irbp->ir_startino;
291
292 for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
293 chunkidx++, agino++) {
294 int fmterror;
295 int ubused;
296
297 /* inode won't fit in buffer, we are done */
298 if (acp->ac_ubleft < statstruct_size)
299 break;
300
301 /* Skip if this inode is free */
302 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
303 continue;
304
305 /* Get the inode and fill in a single buffer */
306 ubused = statstruct_size;
307 error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
308 *ubufp, acp->ac_ubleft, &ubused, &fmterror);
309
310 if (fmterror == BULKSTAT_RV_GIVEUP ||
311 (error && error != -ENOENT && error != -EINVAL)) {
312 acp->ac_ubleft = 0;
313 ASSERT(error);
314 break;
315 }
316
317 /* be careful not to leak error if at end of chunk */
318 if (fmterror == BULKSTAT_RV_NOTHING || error) {
319 error = 0;
320 continue;
321 }
322
323 *ubufp += ubused;
324 acp->ac_ubleft -= ubused;
325 acp->ac_ubelem++;
326 }
327
328 /*
329 * Post-update *last_agino. At this point, agino will always point one
330 * inode past the last inode we processed successfully. Hence we
331 * substract that inode when setting the *last_agino cursor so that we
332 * return the correct cookie to userspace. On the next bulkstat call,
333 * the inode under the lastino cookie will be skipped as we have already
334 * processed it here.
335 */
336 *last_agino = agino - 1;
337
338 return error;
339}
340
341/*
342 * Return stat information in bulk (by-inode) for the filesystem.
343 */
344int /* error status */
345xfs_bulkstat(
346 xfs_mount_t *mp, /* mount point for filesystem */
347 xfs_ino_t *lastinop, /* last inode returned */
348 int *ubcountp, /* size of buffer/count returned */
349 bulkstat_one_pf formatter, /* func that'd fill a single buf */
350 size_t statstruct_size, /* sizeof struct filling */
351 char __user *ubuffer, /* buffer with inode stats */
352 int *done) /* 1 if there are more stats to get */
353{
354 xfs_buf_t *agbp; /* agi header buffer */
355 xfs_agino_t agino; /* inode # in allocation group */
356 xfs_agnumber_t agno; /* allocation group number */
357 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
358 size_t irbsize; /* size of irec buffer in bytes */
359 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
360 int nirbuf; /* size of irbuf */
361 int ubcount; /* size of user's buffer */
362 struct xfs_bulkstat_agichunk ac;
363 int error = 0;
364
365 /*
366 * Get the last inode value, see if there's nothing to do.
367 */
368 agno = XFS_INO_TO_AGNO(mp, *lastinop);
369 agino = XFS_INO_TO_AGINO(mp, *lastinop);
370 if (agno >= mp->m_sb.sb_agcount ||
371 *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
372 *done = 1;
373 *ubcountp = 0;
374 return 0;
375 }
376
377 ubcount = *ubcountp; /* statstruct's */
378 ac.ac_ubuffer = &ubuffer;
379 ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
380 ac.ac_ubelem = 0;
381
382 *ubcountp = 0;
383 *done = 0;
384
385 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
386 if (!irbuf)
387 return -ENOMEM;
388
389 nirbuf = irbsize / sizeof(*irbuf);
390
391 /*
392 * Loop over the allocation groups, starting from the last
393 * inode returned; 0 means start of the allocation group.
394 */
395 while (agno < mp->m_sb.sb_agcount) {
396 struct xfs_inobt_rec_incore *irbp = irbuf;
397 struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf;
398 bool end_of_ag = false;
399 int icount = 0;
400 int stat;
401
402 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
403 if (error)
404 break;
405 /*
406 * Allocate and initialize a btree cursor for ialloc btree.
407 */
408 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
409 XFS_BTNUM_INO);
410 if (agino > 0) {
411 /*
412 * In the middle of an allocation group, we need to get
413 * the remainder of the chunk we're in.
414 */
415 struct xfs_inobt_rec_incore r;
416
417 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
418 if (error)
419 goto del_cursor;
420 if (icount) {
421 irbp->ir_startino = r.ir_startino;
422 irbp->ir_holemask = r.ir_holemask;
423 irbp->ir_count = r.ir_count;
424 irbp->ir_freecount = r.ir_freecount;
425 irbp->ir_free = r.ir_free;
426 irbp++;
427 }
428 /* Increment to the next record */
429 error = xfs_btree_increment(cur, 0, &stat);
430 } else {
431 /* Start of ag. Lookup the first inode chunk */
432 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
433 }
434 if (error || stat == 0) {
435 end_of_ag = true;
436 goto del_cursor;
437 }
438
439 /*
440 * Loop through inode btree records in this ag,
441 * until we run out of inodes or space in the buffer.
442 */
443 while (irbp < irbufend && icount < ubcount) {
444 struct xfs_inobt_rec_incore r;
445
446 error = xfs_inobt_get_rec(cur, &r, &stat);
447 if (error || stat == 0) {
448 end_of_ag = true;
449 goto del_cursor;
450 }
451
452 /*
453 * If this chunk has any allocated inodes, save it.
454 * Also start read-ahead now for this chunk.
455 */
456 if (r.ir_freecount < r.ir_count) {
457 xfs_bulkstat_ichunk_ra(mp, agno, &r);
458 irbp->ir_startino = r.ir_startino;
459 irbp->ir_holemask = r.ir_holemask;
460 irbp->ir_count = r.ir_count;
461 irbp->ir_freecount = r.ir_freecount;
462 irbp->ir_free = r.ir_free;
463 irbp++;
464 icount += r.ir_count - r.ir_freecount;
465 }
466 error = xfs_btree_increment(cur, 0, &stat);
467 if (error || stat == 0) {
468 end_of_ag = true;
469 goto del_cursor;
470 }
471 cond_resched();
472 }
473
474 /*
475 * Drop the btree buffers and the agi buffer as we can't hold any
476 * of the locks these represent when calling iget. If there is a
477 * pending error, then we are done.
478 */
479del_cursor:
480 xfs_btree_del_cursor(cur, error ?
481 XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
482 xfs_buf_relse(agbp);
483 if (error)
484 break;
485 /*
486 * Now format all the good inodes into the user's buffer. The
487 * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
488 * for the next loop iteration.
489 */
490 irbufend = irbp;
491 for (irbp = irbuf;
492 irbp < irbufend && ac.ac_ubleft >= statstruct_size;
493 irbp++) {
494 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
495 formatter, statstruct_size, &ac,
496 &agino);
497 if (error)
498 break;
499
500 cond_resched();
501 }
502
503 /*
504 * If we've run out of space or had a formatting error, we
505 * are now done
506 */
507 if (ac.ac_ubleft < statstruct_size || error)
508 break;
509
510 if (end_of_ag) {
511 agno++;
512 agino = 0;
513 }
514 }
515 /*
516 * Done, we're either out of filesystem or space to put the data.
517 */
518 kmem_free(irbuf);
519 *ubcountp = ac.ac_ubelem;
520
521 /*
522 * We found some inodes, so clear the error status and return them.
523 * The lastino pointer will point directly at the inode that triggered
524 * any error that occurred, so on the next call the error will be
525 * triggered again and propagated to userspace as there will be no
526 * formatted inodes in the buffer.
527 */
528 if (ac.ac_ubelem)
529 error = 0;
530
531 /*
532 * If we ran out of filesystem, lastino will point off the end of
533 * the filesystem so the next call will return immediately.
534 */
535 *lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
536 if (agno >= mp->m_sb.sb_agcount)
537 *done = 1;
538
539 return error;
540}
541
542int
543xfs_inumbers_fmt(
544 void __user *ubuffer, /* buffer to write to */
545 const struct xfs_inogrp *buffer, /* buffer to read from */
546 long count, /* # of elements to read */
547 long *written) /* # of bytes written */
548{
549 if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
550 return -EFAULT;
551 *written = count * sizeof(*buffer);
552 return 0;
553}
554
555/*
556 * Return inode number table for the filesystem.
557 */
558int /* error status */
559xfs_inumbers(
560 struct xfs_mount *mp,/* mount point for filesystem */
561 xfs_ino_t *lastino,/* last inode returned */
562 int *count,/* size of buffer/count returned */
563 void __user *ubuffer,/* buffer with inode descriptions */
564 inumbers_fmt_pf formatter)
565{
566 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino);
567 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino);
568 struct xfs_btree_cur *cur = NULL;
569 struct xfs_buf *agbp = NULL;
570 struct xfs_inogrp *buffer;
571 int bcount;
572 int left = *count;
573 int bufidx = 0;
574 int error = 0;
575
576 *count = 0;
577 if (agno >= mp->m_sb.sb_agcount ||
578 *lastino != XFS_AGINO_TO_INO(mp, agno, agino))
579 return error;
580
581 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
582 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
583 do {
584 struct xfs_inobt_rec_incore r;
585 int stat;
586
587 if (!agbp) {
588 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
589 if (error)
590 break;
591
592 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
593 XFS_BTNUM_INO);
594 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
595 &stat);
596 if (error)
597 break;
598 if (!stat)
599 goto next_ag;
600 }
601
602 error = xfs_inobt_get_rec(cur, &r, &stat);
603 if (error)
604 break;
605 if (!stat)
606 goto next_ag;
607
608 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
609 buffer[bufidx].xi_startino =
610 XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
611 buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount;
612 buffer[bufidx].xi_allocmask = ~r.ir_free;
613 if (++bufidx == bcount) {
614 long written;
615
616 error = formatter(ubuffer, buffer, bufidx, &written);
617 if (error)
618 break;
619 ubuffer += written;
620 *count += bufidx;
621 bufidx = 0;
622 }
623 if (!--left)
624 break;
625
626 error = xfs_btree_increment(cur, 0, &stat);
627 if (error)
628 break;
629 if (stat)
630 continue;
631
632next_ag:
633 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
634 cur = NULL;
635 xfs_buf_relse(agbp);
636 agbp = NULL;
637 agino = 0;
638 agno++;
639 } while (agno < mp->m_sb.sb_agcount);
640
641 if (!error) {
642 if (bufidx) {
643 long written;
644
645 error = formatter(ubuffer, buffer, bufidx, &written);
646 if (!error)
647 *count += bufidx;
648 }
649 *lastino = XFS_AGINO_TO_INO(mp, agno, agino);
650 }
651
652 kmem_free(buffer);
653 if (cur)
654 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
655 XFS_BTREE_NOERROR));
656 if (agbp)
657 xfs_buf_relse(agbp);
658
659 return error;
660}