Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/completion.h>
15#include <linux/buffer_head.h>
16#include <linux/fs.h>
17#include <linux/gfs2_ondisk.h>
18#include <linux/prefetch.h>
19#include <linux/blkdev.h>
20#include <linux/rbtree.h>
21#include <linux/random.h>
22
23#include "gfs2.h"
24#include "incore.h"
25#include "glock.h"
26#include "glops.h"
27#include "lops.h"
28#include "meta_io.h"
29#include "quota.h"
30#include "rgrp.h"
31#include "super.h"
32#include "trans.h"
33#include "util.h"
34#include "log.h"
35#include "inode.h"
36#include "trace_gfs2.h"
37#include "dir.h"
38
39#define BFITNOENT ((u32)~0)
40#define NO_BLOCK ((u64)~0)
41
42#if BITS_PER_LONG == 32
43#define LBITMASK (0x55555555UL)
44#define LBITSKIP55 (0x55555555UL)
45#define LBITSKIP00 (0x00000000UL)
46#else
47#define LBITMASK (0x5555555555555555UL)
48#define LBITSKIP55 (0x5555555555555555UL)
49#define LBITSKIP00 (0x0000000000000000UL)
50#endif
51
52/*
53 * These routines are used by the resource group routines (rgrp.c)
54 * to keep track of block allocation. Each block is represented by two
55 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
56 *
57 * 0 = Free
58 * 1 = Used (not metadata)
59 * 2 = Unlinked (still in use) inode
60 * 3 = Used (metadata)
61 */
62
63struct gfs2_extent {
64 struct gfs2_rbm rbm;
65 u32 len;
66};
67
68static const char valid_change[16] = {
69 /* current */
70 /* n */ 0, 1, 1, 1,
71 /* e */ 1, 0, 0, 0,
72 /* w */ 0, 0, 0, 1,
73 1, 0, 0, 0
74};
75
76static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
77 const struct gfs2_inode *ip, bool nowrap);
78
79
80/**
81 * gfs2_setbit - Set a bit in the bitmaps
82 * @rbm: The position of the bit to set
83 * @do_clone: Also set the clone bitmap, if it exists
84 * @new_state: the new state of the block
85 *
86 */
87
88static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
89 unsigned char new_state)
90{
91 unsigned char *byte1, *byte2, *end, cur_state;
92 struct gfs2_bitmap *bi = rbm_bi(rbm);
93 unsigned int buflen = bi->bi_len;
94 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
95
96 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
97 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
98
99 BUG_ON(byte1 >= end);
100
101 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
102
103 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
104 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
105 rbm->offset, cur_state, new_state);
106 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
107 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
108 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
109 bi->bi_offset, bi->bi_len);
110 dump_stack();
111 gfs2_consist_rgrpd(rbm->rgd);
112 return;
113 }
114 *byte1 ^= (cur_state ^ new_state) << bit;
115
116 if (do_clone && bi->bi_clone) {
117 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
118 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
119 *byte2 ^= (cur_state ^ new_state) << bit;
120 }
121}
122
123/**
124 * gfs2_testbit - test a bit in the bitmaps
125 * @rbm: The bit to test
126 *
127 * Returns: The two bit block state of the requested bit
128 */
129
130static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
131{
132 struct gfs2_bitmap *bi = rbm_bi(rbm);
133 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
134 const u8 *byte;
135 unsigned int bit;
136
137 byte = buffer + (rbm->offset / GFS2_NBBY);
138 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
139
140 return (*byte >> bit) & GFS2_BIT_MASK;
141}
142
143/**
144 * gfs2_bit_search
145 * @ptr: Pointer to bitmap data
146 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
147 * @state: The state we are searching for
148 *
149 * We xor the bitmap data with a patter which is the bitwise opposite
150 * of what we are looking for, this gives rise to a pattern of ones
151 * wherever there is a match. Since we have two bits per entry, we
152 * take this pattern, shift it down by one place and then and it with
153 * the original. All the even bit positions (0,2,4, etc) then represent
154 * successful matches, so we mask with 0x55555..... to remove the unwanted
155 * odd bit positions.
156 *
157 * This allows searching of a whole u64 at once (32 blocks) with a
158 * single test (on 64 bit arches).
159 */
160
161static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
162{
163 u64 tmp;
164 static const u64 search[] = {
165 [0] = 0xffffffffffffffffULL,
166 [1] = 0xaaaaaaaaaaaaaaaaULL,
167 [2] = 0x5555555555555555ULL,
168 [3] = 0x0000000000000000ULL,
169 };
170 tmp = le64_to_cpu(*ptr) ^ search[state];
171 tmp &= (tmp >> 1);
172 tmp &= mask;
173 return tmp;
174}
175
176/**
177 * rs_cmp - multi-block reservation range compare
178 * @blk: absolute file system block number of the new reservation
179 * @len: number of blocks in the new reservation
180 * @rs: existing reservation to compare against
181 *
182 * returns: 1 if the block range is beyond the reach of the reservation
183 * -1 if the block range is before the start of the reservation
184 * 0 if the block range overlaps with the reservation
185 */
186static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
187{
188 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
189
190 if (blk >= startblk + rs->rs_free)
191 return 1;
192 if (blk + len - 1 < startblk)
193 return -1;
194 return 0;
195}
196
197/**
198 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
199 * a block in a given allocation state.
200 * @buf: the buffer that holds the bitmaps
201 * @len: the length (in bytes) of the buffer
202 * @goal: start search at this block's bit-pair (within @buffer)
203 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
204 *
205 * Scope of @goal and returned block number is only within this bitmap buffer,
206 * not entire rgrp or filesystem. @buffer will be offset from the actual
207 * beginning of a bitmap block buffer, skipping any header structures, but
208 * headers are always a multiple of 64 bits long so that the buffer is
209 * always aligned to a 64 bit boundary.
210 *
211 * The size of the buffer is in bytes, but is it assumed that it is
212 * always ok to read a complete multiple of 64 bits at the end
213 * of the block in case the end is no aligned to a natural boundary.
214 *
215 * Return: the block number (bitmap buffer scope) that was found
216 */
217
218static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
219 u32 goal, u8 state)
220{
221 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
222 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
223 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
224 u64 tmp;
225 u64 mask = 0x5555555555555555ULL;
226 u32 bit;
227
228 /* Mask off bits we don't care about at the start of the search */
229 mask <<= spoint;
230 tmp = gfs2_bit_search(ptr, mask, state);
231 ptr++;
232 while(tmp == 0 && ptr < end) {
233 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
234 ptr++;
235 }
236 /* Mask off any bits which are more than len bytes from the start */
237 if (ptr == end && (len & (sizeof(u64) - 1)))
238 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
239 /* Didn't find anything, so return */
240 if (tmp == 0)
241 return BFITNOENT;
242 ptr--;
243 bit = __ffs64(tmp);
244 bit /= 2; /* two bits per entry in the bitmap */
245 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
246}
247
248/**
249 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
250 * @rbm: The rbm with rgd already set correctly
251 * @block: The block number (filesystem relative)
252 *
253 * This sets the bi and offset members of an rbm based on a
254 * resource group and a filesystem relative block number. The
255 * resource group must be set in the rbm on entry, the bi and
256 * offset members will be set by this function.
257 *
258 * Returns: 0 on success, or an error code
259 */
260
261static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
262{
263 u64 rblock = block - rbm->rgd->rd_data0;
264
265 if (WARN_ON_ONCE(rblock > UINT_MAX))
266 return -EINVAL;
267 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
268 return -E2BIG;
269
270 rbm->bii = 0;
271 rbm->offset = (u32)(rblock);
272 /* Check if the block is within the first block */
273 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
274 return 0;
275
276 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
277 rbm->offset += (sizeof(struct gfs2_rgrp) -
278 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
279 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
280 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
281 return 0;
282}
283
284/**
285 * gfs2_rbm_incr - increment an rbm structure
286 * @rbm: The rbm with rgd already set correctly
287 *
288 * This function takes an existing rbm structure and increments it to the next
289 * viable block offset.
290 *
291 * Returns: If incrementing the offset would cause the rbm to go past the
292 * end of the rgrp, true is returned, otherwise false.
293 *
294 */
295
296static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
297{
298 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
299 rbm->offset++;
300 return false;
301 }
302 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
303 return true;
304
305 rbm->offset = 0;
306 rbm->bii++;
307 return false;
308}
309
310/**
311 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
312 * @rbm: Position to search (value/result)
313 * @n_unaligned: Number of unaligned blocks to check
314 * @len: Decremented for each block found (terminate on zero)
315 *
316 * Returns: true if a non-free block is encountered
317 */
318
319static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
320{
321 u32 n;
322 u8 res;
323
324 for (n = 0; n < n_unaligned; n++) {
325 res = gfs2_testbit(rbm);
326 if (res != GFS2_BLKST_FREE)
327 return true;
328 (*len)--;
329 if (*len == 0)
330 return true;
331 if (gfs2_rbm_incr(rbm))
332 return true;
333 }
334
335 return false;
336}
337
338/**
339 * gfs2_free_extlen - Return extent length of free blocks
340 * @rrbm: Starting position
341 * @len: Max length to check
342 *
343 * Starting at the block specified by the rbm, see how many free blocks
344 * there are, not reading more than len blocks ahead. This can be done
345 * using memchr_inv when the blocks are byte aligned, but has to be done
346 * on a block by block basis in case of unaligned blocks. Also this
347 * function can cope with bitmap boundaries (although it must stop on
348 * a resource group boundary)
349 *
350 * Returns: Number of free blocks in the extent
351 */
352
353static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
354{
355 struct gfs2_rbm rbm = *rrbm;
356 u32 n_unaligned = rbm.offset & 3;
357 u32 size = len;
358 u32 bytes;
359 u32 chunk_size;
360 u8 *ptr, *start, *end;
361 u64 block;
362 struct gfs2_bitmap *bi;
363
364 if (n_unaligned &&
365 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
366 goto out;
367
368 n_unaligned = len & 3;
369 /* Start is now byte aligned */
370 while (len > 3) {
371 bi = rbm_bi(&rbm);
372 start = bi->bi_bh->b_data;
373 if (bi->bi_clone)
374 start = bi->bi_clone;
375 end = start + bi->bi_bh->b_size;
376 start += bi->bi_offset;
377 BUG_ON(rbm.offset & 3);
378 start += (rbm.offset / GFS2_NBBY);
379 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
380 ptr = memchr_inv(start, 0, bytes);
381 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
382 chunk_size *= GFS2_NBBY;
383 BUG_ON(len < chunk_size);
384 len -= chunk_size;
385 block = gfs2_rbm_to_block(&rbm);
386 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
387 n_unaligned = 0;
388 break;
389 }
390 if (ptr) {
391 n_unaligned = 3;
392 break;
393 }
394 n_unaligned = len & 3;
395 }
396
397 /* Deal with any bits left over at the end */
398 if (n_unaligned)
399 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
400out:
401 return size - len;
402}
403
404/**
405 * gfs2_bitcount - count the number of bits in a certain state
406 * @rgd: the resource group descriptor
407 * @buffer: the buffer that holds the bitmaps
408 * @buflen: the length (in bytes) of the buffer
409 * @state: the state of the block we're looking for
410 *
411 * Returns: The number of bits
412 */
413
414static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
415 unsigned int buflen, u8 state)
416{
417 const u8 *byte = buffer;
418 const u8 *end = buffer + buflen;
419 const u8 state1 = state << 2;
420 const u8 state2 = state << 4;
421 const u8 state3 = state << 6;
422 u32 count = 0;
423
424 for (; byte < end; byte++) {
425 if (((*byte) & 0x03) == state)
426 count++;
427 if (((*byte) & 0x0C) == state1)
428 count++;
429 if (((*byte) & 0x30) == state2)
430 count++;
431 if (((*byte) & 0xC0) == state3)
432 count++;
433 }
434
435 return count;
436}
437
438/**
439 * gfs2_rgrp_verify - Verify that a resource group is consistent
440 * @rgd: the rgrp
441 *
442 */
443
444void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
445{
446 struct gfs2_sbd *sdp = rgd->rd_sbd;
447 struct gfs2_bitmap *bi = NULL;
448 u32 length = rgd->rd_length;
449 u32 count[4], tmp;
450 int buf, x;
451
452 memset(count, 0, 4 * sizeof(u32));
453
454 /* Count # blocks in each of 4 possible allocation states */
455 for (buf = 0; buf < length; buf++) {
456 bi = rgd->rd_bits + buf;
457 for (x = 0; x < 4; x++)
458 count[x] += gfs2_bitcount(rgd,
459 bi->bi_bh->b_data +
460 bi->bi_offset,
461 bi->bi_len, x);
462 }
463
464 if (count[0] != rgd->rd_free) {
465 if (gfs2_consist_rgrpd(rgd))
466 fs_err(sdp, "free data mismatch: %u != %u\n",
467 count[0], rgd->rd_free);
468 return;
469 }
470
471 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
472 if (count[1] != tmp) {
473 if (gfs2_consist_rgrpd(rgd))
474 fs_err(sdp, "used data mismatch: %u != %u\n",
475 count[1], tmp);
476 return;
477 }
478
479 if (count[2] + count[3] != rgd->rd_dinodes) {
480 if (gfs2_consist_rgrpd(rgd))
481 fs_err(sdp, "used metadata mismatch: %u != %u\n",
482 count[2] + count[3], rgd->rd_dinodes);
483 return;
484 }
485}
486
487/**
488 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
489 * @sdp: The GFS2 superblock
490 * @blk: The data block number
491 * @exact: True if this needs to be an exact match
492 *
493 * The @exact argument should be set to true by most callers. The exception
494 * is when we need to match blocks which are not represented by the rgrp
495 * bitmap, but which are part of the rgrp (i.e. padding blocks) which are
496 * there for alignment purposes. Another way of looking at it is that @exact
497 * matches only valid data/metadata blocks, but with @exact false, it will
498 * match any block within the extent of the rgrp.
499 *
500 * Returns: The resource group, or NULL if not found
501 */
502
503struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
504{
505 struct rb_node *n, *next;
506 struct gfs2_rgrpd *cur;
507
508 spin_lock(&sdp->sd_rindex_spin);
509 n = sdp->sd_rindex_tree.rb_node;
510 while (n) {
511 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
512 next = NULL;
513 if (blk < cur->rd_addr)
514 next = n->rb_left;
515 else if (blk >= cur->rd_data0 + cur->rd_data)
516 next = n->rb_right;
517 if (next == NULL) {
518 spin_unlock(&sdp->sd_rindex_spin);
519 if (exact) {
520 if (blk < cur->rd_addr)
521 return NULL;
522 if (blk >= cur->rd_data0 + cur->rd_data)
523 return NULL;
524 }
525 return cur;
526 }
527 n = next;
528 }
529 spin_unlock(&sdp->sd_rindex_spin);
530
531 return NULL;
532}
533
534/**
535 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
536 * @sdp: The GFS2 superblock
537 *
538 * Returns: The first rgrp in the filesystem
539 */
540
541struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
542{
543 const struct rb_node *n;
544 struct gfs2_rgrpd *rgd;
545
546 spin_lock(&sdp->sd_rindex_spin);
547 n = rb_first(&sdp->sd_rindex_tree);
548 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
549 spin_unlock(&sdp->sd_rindex_spin);
550
551 return rgd;
552}
553
554/**
555 * gfs2_rgrpd_get_next - get the next RG
556 * @rgd: the resource group descriptor
557 *
558 * Returns: The next rgrp
559 */
560
561struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
562{
563 struct gfs2_sbd *sdp = rgd->rd_sbd;
564 const struct rb_node *n;
565
566 spin_lock(&sdp->sd_rindex_spin);
567 n = rb_next(&rgd->rd_node);
568 if (n == NULL)
569 n = rb_first(&sdp->sd_rindex_tree);
570
571 if (unlikely(&rgd->rd_node == n)) {
572 spin_unlock(&sdp->sd_rindex_spin);
573 return NULL;
574 }
575 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
576 spin_unlock(&sdp->sd_rindex_spin);
577 return rgd;
578}
579
580void check_and_update_goal(struct gfs2_inode *ip)
581{
582 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
583 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
584 ip->i_goal = ip->i_no_addr;
585}
586
587void gfs2_free_clones(struct gfs2_rgrpd *rgd)
588{
589 int x;
590
591 for (x = 0; x < rgd->rd_length; x++) {
592 struct gfs2_bitmap *bi = rgd->rd_bits + x;
593 kfree(bi->bi_clone);
594 bi->bi_clone = NULL;
595 }
596}
597
598/**
599 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
600 * plus a quota allocations data structure, if necessary
601 * @ip: the inode for this reservation
602 */
603int gfs2_rsqa_alloc(struct gfs2_inode *ip)
604{
605 return gfs2_qa_alloc(ip);
606}
607
608static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
609{
610 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
611 (unsigned long long)rs->rs_inum,
612 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
613 rs->rs_rbm.offset, rs->rs_free);
614}
615
616/**
617 * __rs_deltree - remove a multi-block reservation from the rgd tree
618 * @rs: The reservation to remove
619 *
620 */
621static void __rs_deltree(struct gfs2_blkreserv *rs)
622{
623 struct gfs2_rgrpd *rgd;
624
625 if (!gfs2_rs_active(rs))
626 return;
627
628 rgd = rs->rs_rbm.rgd;
629 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
630 rb_erase(&rs->rs_node, &rgd->rd_rstree);
631 RB_CLEAR_NODE(&rs->rs_node);
632
633 if (rs->rs_free) {
634 struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
635
636 /* return reserved blocks to the rgrp */
637 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
638 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
639 /* The rgrp extent failure point is likely not to increase;
640 it will only do so if the freed blocks are somehow
641 contiguous with a span of free blocks that follows. Still,
642 it will force the number to be recalculated later. */
643 rgd->rd_extfail_pt += rs->rs_free;
644 rs->rs_free = 0;
645 clear_bit(GBF_FULL, &bi->bi_flags);
646 }
647}
648
649/**
650 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
651 * @rs: The reservation to remove
652 *
653 */
654void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
655{
656 struct gfs2_rgrpd *rgd;
657
658 rgd = rs->rs_rbm.rgd;
659 if (rgd) {
660 spin_lock(&rgd->rd_rsspin);
661 __rs_deltree(rs);
662 BUG_ON(rs->rs_free);
663 spin_unlock(&rgd->rd_rsspin);
664 }
665}
666
667/**
668 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
669 * @ip: The inode for this reservation
670 * @wcount: The inode's write count, or NULL
671 *
672 */
673void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
674{
675 down_write(&ip->i_rw_mutex);
676 if ((wcount == NULL) || (atomic_read(wcount) <= 1))
677 gfs2_rs_deltree(&ip->i_res);
678 up_write(&ip->i_rw_mutex);
679 gfs2_qa_delete(ip, wcount);
680}
681
682/**
683 * return_all_reservations - return all reserved blocks back to the rgrp.
684 * @rgd: the rgrp that needs its space back
685 *
686 * We previously reserved a bunch of blocks for allocation. Now we need to
687 * give them back. This leave the reservation structures in tact, but removes
688 * all of their corresponding "no-fly zones".
689 */
690static void return_all_reservations(struct gfs2_rgrpd *rgd)
691{
692 struct rb_node *n;
693 struct gfs2_blkreserv *rs;
694
695 spin_lock(&rgd->rd_rsspin);
696 while ((n = rb_first(&rgd->rd_rstree))) {
697 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
698 __rs_deltree(rs);
699 }
700 spin_unlock(&rgd->rd_rsspin);
701}
702
703void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
704{
705 struct rb_node *n;
706 struct gfs2_rgrpd *rgd;
707 struct gfs2_glock *gl;
708
709 while ((n = rb_first(&sdp->sd_rindex_tree))) {
710 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
711 gl = rgd->rd_gl;
712
713 rb_erase(n, &sdp->sd_rindex_tree);
714
715 if (gl) {
716 glock_clear_object(gl, rgd);
717 gfs2_glock_put(gl);
718 }
719
720 gfs2_free_clones(rgd);
721 kfree(rgd->rd_bits);
722 rgd->rd_bits = NULL;
723 return_all_reservations(rgd);
724 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
725 }
726}
727
728static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
729{
730 pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
731 pr_info("ri_length = %u\n", rgd->rd_length);
732 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
733 pr_info("ri_data = %u\n", rgd->rd_data);
734 pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
735}
736
737/**
738 * gfs2_compute_bitstructs - Compute the bitmap sizes
739 * @rgd: The resource group descriptor
740 *
741 * Calculates bitmap descriptors, one for each block that contains bitmap data
742 *
743 * Returns: errno
744 */
745
746static int compute_bitstructs(struct gfs2_rgrpd *rgd)
747{
748 struct gfs2_sbd *sdp = rgd->rd_sbd;
749 struct gfs2_bitmap *bi;
750 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
751 u32 bytes_left, bytes;
752 int x;
753
754 if (!length)
755 return -EINVAL;
756
757 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
758 if (!rgd->rd_bits)
759 return -ENOMEM;
760
761 bytes_left = rgd->rd_bitbytes;
762
763 for (x = 0; x < length; x++) {
764 bi = rgd->rd_bits + x;
765
766 bi->bi_flags = 0;
767 /* small rgrp; bitmap stored completely in header block */
768 if (length == 1) {
769 bytes = bytes_left;
770 bi->bi_offset = sizeof(struct gfs2_rgrp);
771 bi->bi_start = 0;
772 bi->bi_len = bytes;
773 bi->bi_blocks = bytes * GFS2_NBBY;
774 /* header block */
775 } else if (x == 0) {
776 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
777 bi->bi_offset = sizeof(struct gfs2_rgrp);
778 bi->bi_start = 0;
779 bi->bi_len = bytes;
780 bi->bi_blocks = bytes * GFS2_NBBY;
781 /* last block */
782 } else if (x + 1 == length) {
783 bytes = bytes_left;
784 bi->bi_offset = sizeof(struct gfs2_meta_header);
785 bi->bi_start = rgd->rd_bitbytes - bytes_left;
786 bi->bi_len = bytes;
787 bi->bi_blocks = bytes * GFS2_NBBY;
788 /* other blocks */
789 } else {
790 bytes = sdp->sd_sb.sb_bsize -
791 sizeof(struct gfs2_meta_header);
792 bi->bi_offset = sizeof(struct gfs2_meta_header);
793 bi->bi_start = rgd->rd_bitbytes - bytes_left;
794 bi->bi_len = bytes;
795 bi->bi_blocks = bytes * GFS2_NBBY;
796 }
797
798 bytes_left -= bytes;
799 }
800
801 if (bytes_left) {
802 gfs2_consist_rgrpd(rgd);
803 return -EIO;
804 }
805 bi = rgd->rd_bits + (length - 1);
806 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
807 if (gfs2_consist_rgrpd(rgd)) {
808 gfs2_rindex_print(rgd);
809 fs_err(sdp, "start=%u len=%u offset=%u\n",
810 bi->bi_start, bi->bi_len, bi->bi_offset);
811 }
812 return -EIO;
813 }
814
815 return 0;
816}
817
818/**
819 * gfs2_ri_total - Total up the file system space, according to the rindex.
820 * @sdp: the filesystem
821 *
822 */
823u64 gfs2_ri_total(struct gfs2_sbd *sdp)
824{
825 u64 total_data = 0;
826 struct inode *inode = sdp->sd_rindex;
827 struct gfs2_inode *ip = GFS2_I(inode);
828 char buf[sizeof(struct gfs2_rindex)];
829 int error, rgrps;
830
831 for (rgrps = 0;; rgrps++) {
832 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
833
834 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
835 break;
836 error = gfs2_internal_read(ip, buf, &pos,
837 sizeof(struct gfs2_rindex));
838 if (error != sizeof(struct gfs2_rindex))
839 break;
840 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
841 }
842 return total_data;
843}
844
845static int rgd_insert(struct gfs2_rgrpd *rgd)
846{
847 struct gfs2_sbd *sdp = rgd->rd_sbd;
848 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
849
850 /* Figure out where to put new node */
851 while (*newn) {
852 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
853 rd_node);
854
855 parent = *newn;
856 if (rgd->rd_addr < cur->rd_addr)
857 newn = &((*newn)->rb_left);
858 else if (rgd->rd_addr > cur->rd_addr)
859 newn = &((*newn)->rb_right);
860 else
861 return -EEXIST;
862 }
863
864 rb_link_node(&rgd->rd_node, parent, newn);
865 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
866 sdp->sd_rgrps++;
867 return 0;
868}
869
870/**
871 * read_rindex_entry - Pull in a new resource index entry from the disk
872 * @ip: Pointer to the rindex inode
873 *
874 * Returns: 0 on success, > 0 on EOF, error code otherwise
875 */
876
877static int read_rindex_entry(struct gfs2_inode *ip)
878{
879 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
880 const unsigned bsize = sdp->sd_sb.sb_bsize;
881 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
882 struct gfs2_rindex buf;
883 int error;
884 struct gfs2_rgrpd *rgd;
885
886 if (pos >= i_size_read(&ip->i_inode))
887 return 1;
888
889 error = gfs2_internal_read(ip, (char *)&buf, &pos,
890 sizeof(struct gfs2_rindex));
891
892 if (error != sizeof(struct gfs2_rindex))
893 return (error == 0) ? 1 : error;
894
895 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
896 error = -ENOMEM;
897 if (!rgd)
898 return error;
899
900 rgd->rd_sbd = sdp;
901 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
902 rgd->rd_length = be32_to_cpu(buf.ri_length);
903 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
904 rgd->rd_data = be32_to_cpu(buf.ri_data);
905 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
906 spin_lock_init(&rgd->rd_rsspin);
907
908 error = compute_bitstructs(rgd);
909 if (error)
910 goto fail;
911
912 error = gfs2_glock_get(sdp, rgd->rd_addr,
913 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
914 if (error)
915 goto fail;
916
917 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
918 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
919 if (rgd->rd_data > sdp->sd_max_rg_data)
920 sdp->sd_max_rg_data = rgd->rd_data;
921 spin_lock(&sdp->sd_rindex_spin);
922 error = rgd_insert(rgd);
923 spin_unlock(&sdp->sd_rindex_spin);
924 if (!error) {
925 glock_set_object(rgd->rd_gl, rgd);
926 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
927 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
928 rgd->rd_length) * bsize) - 1;
929 return 0;
930 }
931
932 error = 0; /* someone else read in the rgrp; free it and ignore it */
933 gfs2_glock_put(rgd->rd_gl);
934
935fail:
936 kfree(rgd->rd_bits);
937 rgd->rd_bits = NULL;
938 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
939 return error;
940}
941
942/**
943 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
944 * @sdp: the GFS2 superblock
945 *
946 * The purpose of this function is to select a subset of the resource groups
947 * and mark them as PREFERRED. We do it in such a way that each node prefers
948 * to use a unique set of rgrps to minimize glock contention.
949 */
950static void set_rgrp_preferences(struct gfs2_sbd *sdp)
951{
952 struct gfs2_rgrpd *rgd, *first;
953 int i;
954
955 /* Skip an initial number of rgrps, based on this node's journal ID.
956 That should start each node out on its own set. */
957 rgd = gfs2_rgrpd_get_first(sdp);
958 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
959 rgd = gfs2_rgrpd_get_next(rgd);
960 first = rgd;
961
962 do {
963 rgd->rd_flags |= GFS2_RDF_PREFERRED;
964 for (i = 0; i < sdp->sd_journals; i++) {
965 rgd = gfs2_rgrpd_get_next(rgd);
966 if (!rgd || rgd == first)
967 break;
968 }
969 } while (rgd && rgd != first);
970}
971
972/**
973 * gfs2_ri_update - Pull in a new resource index from the disk
974 * @ip: pointer to the rindex inode
975 *
976 * Returns: 0 on successful update, error code otherwise
977 */
978
979static int gfs2_ri_update(struct gfs2_inode *ip)
980{
981 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
982 int error;
983
984 do {
985 error = read_rindex_entry(ip);
986 } while (error == 0);
987
988 if (error < 0)
989 return error;
990
991 set_rgrp_preferences(sdp);
992
993 sdp->sd_rindex_uptodate = 1;
994 return 0;
995}
996
997/**
998 * gfs2_rindex_update - Update the rindex if required
999 * @sdp: The GFS2 superblock
1000 *
1001 * We grab a lock on the rindex inode to make sure that it doesn't
1002 * change whilst we are performing an operation. We keep this lock
1003 * for quite long periods of time compared to other locks. This
1004 * doesn't matter, since it is shared and it is very, very rarely
1005 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1006 *
1007 * This makes sure that we're using the latest copy of the resource index
1008 * special file, which might have been updated if someone expanded the
1009 * filesystem (via gfs2_grow utility), which adds new resource groups.
1010 *
1011 * Returns: 0 on succeess, error code otherwise
1012 */
1013
1014int gfs2_rindex_update(struct gfs2_sbd *sdp)
1015{
1016 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1017 struct gfs2_glock *gl = ip->i_gl;
1018 struct gfs2_holder ri_gh;
1019 int error = 0;
1020 int unlock_required = 0;
1021
1022 /* Read new copy from disk if we don't have the latest */
1023 if (!sdp->sd_rindex_uptodate) {
1024 if (!gfs2_glock_is_locked_by_me(gl)) {
1025 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1026 if (error)
1027 return error;
1028 unlock_required = 1;
1029 }
1030 if (!sdp->sd_rindex_uptodate)
1031 error = gfs2_ri_update(ip);
1032 if (unlock_required)
1033 gfs2_glock_dq_uninit(&ri_gh);
1034 }
1035
1036 return error;
1037}
1038
1039static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1040{
1041 const struct gfs2_rgrp *str = buf;
1042 u32 rg_flags;
1043
1044 rg_flags = be32_to_cpu(str->rg_flags);
1045 rg_flags &= ~GFS2_RDF_MASK;
1046 rgd->rd_flags &= GFS2_RDF_MASK;
1047 rgd->rd_flags |= rg_flags;
1048 rgd->rd_free = be32_to_cpu(str->rg_free);
1049 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1050 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1051 /* rd_data0, rd_data and rd_bitbytes already set from rindex */
1052}
1053
1054static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1055{
1056 struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
1057 struct gfs2_rgrp *str = buf;
1058 u32 crc;
1059
1060 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1061 str->rg_free = cpu_to_be32(rgd->rd_free);
1062 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1063 if (next == NULL)
1064 str->rg_skip = 0;
1065 else if (next->rd_addr > rgd->rd_addr)
1066 str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr);
1067 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1068 str->rg_data0 = cpu_to_be64(rgd->rd_data0);
1069 str->rg_data = cpu_to_be32(rgd->rd_data);
1070 str->rg_bitbytes = cpu_to_be32(rgd->rd_bitbytes);
1071 str->rg_crc = 0;
1072 crc = gfs2_disk_hash(buf, sizeof(struct gfs2_rgrp));
1073 str->rg_crc = cpu_to_be32(crc);
1074
1075 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1076}
1077
1078static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1079{
1080 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1081 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1082
1083 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1084 rgl->rl_dinodes != str->rg_dinodes ||
1085 rgl->rl_igeneration != str->rg_igeneration)
1086 return 0;
1087 return 1;
1088}
1089
1090static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1091{
1092 const struct gfs2_rgrp *str = buf;
1093
1094 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1095 rgl->rl_flags = str->rg_flags;
1096 rgl->rl_free = str->rg_free;
1097 rgl->rl_dinodes = str->rg_dinodes;
1098 rgl->rl_igeneration = str->rg_igeneration;
1099 rgl->__pad = 0UL;
1100}
1101
1102static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1103{
1104 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1105 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1106 rgl->rl_unlinked = cpu_to_be32(unlinked);
1107}
1108
1109static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1110{
1111 struct gfs2_bitmap *bi;
1112 const u32 length = rgd->rd_length;
1113 const u8 *buffer = NULL;
1114 u32 i, goal, count = 0;
1115
1116 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1117 goal = 0;
1118 buffer = bi->bi_bh->b_data + bi->bi_offset;
1119 WARN_ON(!buffer_uptodate(bi->bi_bh));
1120 while (goal < bi->bi_len * GFS2_NBBY) {
1121 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1122 GFS2_BLKST_UNLINKED);
1123 if (goal == BFITNOENT)
1124 break;
1125 count++;
1126 goal++;
1127 }
1128 }
1129
1130 return count;
1131}
1132
1133
1134/**
1135 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1136 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1137 *
1138 * Read in all of a Resource Group's header and bitmap blocks.
1139 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
1140 *
1141 * Returns: errno
1142 */
1143
1144static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1145{
1146 struct gfs2_sbd *sdp = rgd->rd_sbd;
1147 struct gfs2_glock *gl = rgd->rd_gl;
1148 unsigned int length = rgd->rd_length;
1149 struct gfs2_bitmap *bi;
1150 unsigned int x, y;
1151 int error;
1152
1153 if (rgd->rd_bits[0].bi_bh != NULL)
1154 return 0;
1155
1156 for (x = 0; x < length; x++) {
1157 bi = rgd->rd_bits + x;
1158 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
1159 if (error)
1160 goto fail;
1161 }
1162
1163 for (y = length; y--;) {
1164 bi = rgd->rd_bits + y;
1165 error = gfs2_meta_wait(sdp, bi->bi_bh);
1166 if (error)
1167 goto fail;
1168 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1169 GFS2_METATYPE_RG)) {
1170 error = -EIO;
1171 goto fail;
1172 }
1173 }
1174
1175 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1176 for (x = 0; x < length; x++)
1177 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1178 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1179 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1180 rgd->rd_free_clone = rgd->rd_free;
1181 /* max out the rgrp allocation failure point */
1182 rgd->rd_extfail_pt = rgd->rd_free;
1183 }
1184 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1185 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1186 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1187 rgd->rd_bits[0].bi_bh->b_data);
1188 }
1189 else if (sdp->sd_args.ar_rgrplvb) {
1190 if (!gfs2_rgrp_lvb_valid(rgd)){
1191 gfs2_consist_rgrpd(rgd);
1192 error = -EIO;
1193 goto fail;
1194 }
1195 if (rgd->rd_rgl->rl_unlinked == 0)
1196 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1197 }
1198 return 0;
1199
1200fail:
1201 while (x--) {
1202 bi = rgd->rd_bits + x;
1203 brelse(bi->bi_bh);
1204 bi->bi_bh = NULL;
1205 gfs2_assert_warn(sdp, !bi->bi_clone);
1206 }
1207
1208 return error;
1209}
1210
1211static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1212{
1213 u32 rl_flags;
1214
1215 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1216 return 0;
1217
1218 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1219 return gfs2_rgrp_bh_get(rgd);
1220
1221 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1222 rl_flags &= ~GFS2_RDF_MASK;
1223 rgd->rd_flags &= GFS2_RDF_MASK;
1224 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1225 if (rgd->rd_rgl->rl_unlinked == 0)
1226 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1227 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1228 rgd->rd_free_clone = rgd->rd_free;
1229 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1230 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1231 return 0;
1232}
1233
1234int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1235{
1236 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1237 struct gfs2_sbd *sdp = rgd->rd_sbd;
1238
1239 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1240 return 0;
1241 return gfs2_rgrp_bh_get(rgd);
1242}
1243
1244/**
1245 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1246 * @rgd: The resource group
1247 *
1248 */
1249
1250void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1251{
1252 int x, length = rgd->rd_length;
1253
1254 for (x = 0; x < length; x++) {
1255 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1256 if (bi->bi_bh) {
1257 brelse(bi->bi_bh);
1258 bi->bi_bh = NULL;
1259 }
1260 }
1261
1262}
1263
1264/**
1265 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1266 * @gh: The glock holder for the resource group
1267 *
1268 */
1269
1270void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1271{
1272 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1273 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1274 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1275
1276 if (rgd && demote_requested)
1277 gfs2_rgrp_brelse(rgd);
1278}
1279
1280int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1281 struct buffer_head *bh,
1282 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1283{
1284 struct super_block *sb = sdp->sd_vfs;
1285 u64 blk;
1286 sector_t start = 0;
1287 sector_t nr_blks = 0;
1288 int rv;
1289 unsigned int x;
1290 u32 trimmed = 0;
1291 u8 diff;
1292
1293 for (x = 0; x < bi->bi_len; x++) {
1294 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1295 clone += bi->bi_offset;
1296 clone += x;
1297 if (bh) {
1298 const u8 *orig = bh->b_data + bi->bi_offset + x;
1299 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1300 } else {
1301 diff = ~(*clone | (*clone >> 1));
1302 }
1303 diff &= 0x55;
1304 if (diff == 0)
1305 continue;
1306 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1307 while(diff) {
1308 if (diff & 1) {
1309 if (nr_blks == 0)
1310 goto start_new_extent;
1311 if ((start + nr_blks) != blk) {
1312 if (nr_blks >= minlen) {
1313 rv = sb_issue_discard(sb,
1314 start, nr_blks,
1315 GFP_NOFS, 0);
1316 if (rv)
1317 goto fail;
1318 trimmed += nr_blks;
1319 }
1320 nr_blks = 0;
1321start_new_extent:
1322 start = blk;
1323 }
1324 nr_blks++;
1325 }
1326 diff >>= 2;
1327 blk++;
1328 }
1329 }
1330 if (nr_blks >= minlen) {
1331 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1332 if (rv)
1333 goto fail;
1334 trimmed += nr_blks;
1335 }
1336 if (ptrimmed)
1337 *ptrimmed = trimmed;
1338 return 0;
1339
1340fail:
1341 if (sdp->sd_args.ar_discard)
1342 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
1343 sdp->sd_args.ar_discard = 0;
1344 return -EIO;
1345}
1346
1347/**
1348 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1349 * @filp: Any file on the filesystem
1350 * @argp: Pointer to the arguments (also used to pass result)
1351 *
1352 * Returns: 0 on success, otherwise error code
1353 */
1354
1355int gfs2_fitrim(struct file *filp, void __user *argp)
1356{
1357 struct inode *inode = file_inode(filp);
1358 struct gfs2_sbd *sdp = GFS2_SB(inode);
1359 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1360 struct buffer_head *bh;
1361 struct gfs2_rgrpd *rgd;
1362 struct gfs2_rgrpd *rgd_end;
1363 struct gfs2_holder gh;
1364 struct fstrim_range r;
1365 int ret = 0;
1366 u64 amt;
1367 u64 trimmed = 0;
1368 u64 start, end, minlen;
1369 unsigned int x;
1370 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1371
1372 if (!capable(CAP_SYS_ADMIN))
1373 return -EPERM;
1374
1375 if (!blk_queue_discard(q))
1376 return -EOPNOTSUPP;
1377
1378 if (copy_from_user(&r, argp, sizeof(r)))
1379 return -EFAULT;
1380
1381 ret = gfs2_rindex_update(sdp);
1382 if (ret)
1383 return ret;
1384
1385 start = r.start >> bs_shift;
1386 end = start + (r.len >> bs_shift);
1387 minlen = max_t(u64, r.minlen,
1388 q->limits.discard_granularity) >> bs_shift;
1389
1390 if (end <= start || minlen > sdp->sd_max_rg_data)
1391 return -EINVAL;
1392
1393 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1394 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1395
1396 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1397 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1398 return -EINVAL; /* start is beyond the end of the fs */
1399
1400 while (1) {
1401
1402 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1403 if (ret)
1404 goto out;
1405
1406 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1407 /* Trim each bitmap in the rgrp */
1408 for (x = 0; x < rgd->rd_length; x++) {
1409 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1410 ret = gfs2_rgrp_send_discards(sdp,
1411 rgd->rd_data0, NULL, bi, minlen,
1412 &amt);
1413 if (ret) {
1414 gfs2_glock_dq_uninit(&gh);
1415 goto out;
1416 }
1417 trimmed += amt;
1418 }
1419
1420 /* Mark rgrp as having been trimmed */
1421 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1422 if (ret == 0) {
1423 bh = rgd->rd_bits[0].bi_bh;
1424 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1425 gfs2_trans_add_meta(rgd->rd_gl, bh);
1426 gfs2_rgrp_out(rgd, bh->b_data);
1427 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
1428 gfs2_trans_end(sdp);
1429 }
1430 }
1431 gfs2_glock_dq_uninit(&gh);
1432
1433 if (rgd == rgd_end)
1434 break;
1435
1436 rgd = gfs2_rgrpd_get_next(rgd);
1437 }
1438
1439out:
1440 r.len = trimmed << bs_shift;
1441 if (copy_to_user(argp, &r, sizeof(r)))
1442 return -EFAULT;
1443
1444 return ret;
1445}
1446
1447/**
1448 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1449 * @ip: the inode structure
1450 *
1451 */
1452static void rs_insert(struct gfs2_inode *ip)
1453{
1454 struct rb_node **newn, *parent = NULL;
1455 int rc;
1456 struct gfs2_blkreserv *rs = &ip->i_res;
1457 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1458 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1459
1460 BUG_ON(gfs2_rs_active(rs));
1461
1462 spin_lock(&rgd->rd_rsspin);
1463 newn = &rgd->rd_rstree.rb_node;
1464 while (*newn) {
1465 struct gfs2_blkreserv *cur =
1466 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1467
1468 parent = *newn;
1469 rc = rs_cmp(fsblock, rs->rs_free, cur);
1470 if (rc > 0)
1471 newn = &((*newn)->rb_right);
1472 else if (rc < 0)
1473 newn = &((*newn)->rb_left);
1474 else {
1475 spin_unlock(&rgd->rd_rsspin);
1476 WARN_ON(1);
1477 return;
1478 }
1479 }
1480
1481 rb_link_node(&rs->rs_node, parent, newn);
1482 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1483
1484 /* Do our rgrp accounting for the reservation */
1485 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1486 spin_unlock(&rgd->rd_rsspin);
1487 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1488}
1489
1490/**
1491 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1492 * @rgd: the resource group descriptor
1493 * @ip: pointer to the inode for which we're reserving blocks
1494 * @ap: the allocation parameters
1495 *
1496 */
1497
1498static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1499 const struct gfs2_alloc_parms *ap)
1500{
1501 struct gfs2_rbm rbm = { .rgd = rgd, };
1502 u64 goal;
1503 struct gfs2_blkreserv *rs = &ip->i_res;
1504 u32 extlen;
1505 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1506 int ret;
1507 struct inode *inode = &ip->i_inode;
1508
1509 if (S_ISDIR(inode->i_mode))
1510 extlen = 1;
1511 else {
1512 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
1513 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1514 }
1515 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1516 return;
1517
1518 /* Find bitmap block that contains bits for goal block */
1519 if (rgrp_contains_block(rgd, ip->i_goal))
1520 goal = ip->i_goal;
1521 else
1522 goal = rgd->rd_last_alloc + rgd->rd_data0;
1523
1524 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1525 return;
1526
1527 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
1528 if (ret == 0) {
1529 rs->rs_rbm = rbm;
1530 rs->rs_free = extlen;
1531 rs->rs_inum = ip->i_no_addr;
1532 rs_insert(ip);
1533 } else {
1534 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1535 rgd->rd_last_alloc = 0;
1536 }
1537}
1538
1539/**
1540 * gfs2_next_unreserved_block - Return next block that is not reserved
1541 * @rgd: The resource group
1542 * @block: The starting block
1543 * @length: The required length
1544 * @ip: Ignore any reservations for this inode
1545 *
1546 * If the block does not appear in any reservation, then return the
1547 * block number unchanged. If it does appear in the reservation, then
1548 * keep looking through the tree of reservations in order to find the
1549 * first block number which is not reserved.
1550 */
1551
1552static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1553 u32 length,
1554 const struct gfs2_inode *ip)
1555{
1556 struct gfs2_blkreserv *rs;
1557 struct rb_node *n;
1558 int rc;
1559
1560 spin_lock(&rgd->rd_rsspin);
1561 n = rgd->rd_rstree.rb_node;
1562 while (n) {
1563 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1564 rc = rs_cmp(block, length, rs);
1565 if (rc < 0)
1566 n = n->rb_left;
1567 else if (rc > 0)
1568 n = n->rb_right;
1569 else
1570 break;
1571 }
1572
1573 if (n) {
1574 while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
1575 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1576 n = n->rb_right;
1577 if (n == NULL)
1578 break;
1579 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1580 }
1581 }
1582
1583 spin_unlock(&rgd->rd_rsspin);
1584 return block;
1585}
1586
1587/**
1588 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1589 * @rbm: The current position in the resource group
1590 * @ip: The inode for which we are searching for blocks
1591 * @minext: The minimum extent length
1592 * @maxext: A pointer to the maximum extent structure
1593 *
1594 * This checks the current position in the rgrp to see whether there is
1595 * a reservation covering this block. If not then this function is a
1596 * no-op. If there is, then the position is moved to the end of the
1597 * contiguous reservation(s) so that we are pointing at the first
1598 * non-reserved block.
1599 *
1600 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1601 */
1602
1603static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1604 const struct gfs2_inode *ip,
1605 u32 minext,
1606 struct gfs2_extent *maxext)
1607{
1608 u64 block = gfs2_rbm_to_block(rbm);
1609 u32 extlen = 1;
1610 u64 nblock;
1611 int ret;
1612
1613 /*
1614 * If we have a minimum extent length, then skip over any extent
1615 * which is less than the min extent length in size.
1616 */
1617 if (minext) {
1618 extlen = gfs2_free_extlen(rbm, minext);
1619 if (extlen <= maxext->len)
1620 goto fail;
1621 }
1622
1623 /*
1624 * Check the extent which has been found against the reservations
1625 * and skip if parts of it are already reserved
1626 */
1627 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1628 if (nblock == block) {
1629 if (!minext || extlen >= minext)
1630 return 0;
1631
1632 if (extlen > maxext->len) {
1633 maxext->len = extlen;
1634 maxext->rbm = *rbm;
1635 }
1636fail:
1637 nblock = block + extlen;
1638 }
1639 ret = gfs2_rbm_from_block(rbm, nblock);
1640 if (ret < 0)
1641 return ret;
1642 return 1;
1643}
1644
1645/**
1646 * gfs2_rbm_find - Look for blocks of a particular state
1647 * @rbm: Value/result starting position and final position
1648 * @state: The state which we want to find
1649 * @minext: Pointer to the requested extent length (NULL for a single block)
1650 * This is updated to be the actual reservation size.
1651 * @ip: If set, check for reservations
1652 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1653 * around until we've reached the starting point.
1654 *
1655 * Side effects:
1656 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1657 * has no free blocks in it.
1658 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1659 * has come up short on a free block search.
1660 *
1661 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1662 */
1663
1664static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1665 const struct gfs2_inode *ip, bool nowrap)
1666{
1667 struct buffer_head *bh;
1668 int initial_bii;
1669 u32 initial_offset;
1670 int first_bii = rbm->bii;
1671 u32 first_offset = rbm->offset;
1672 u32 offset;
1673 u8 *buffer;
1674 int n = 0;
1675 int iters = rbm->rgd->rd_length;
1676 int ret;
1677 struct gfs2_bitmap *bi;
1678 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1679
1680 /* If we are not starting at the beginning of a bitmap, then we
1681 * need to add one to the bitmap count to ensure that we search
1682 * the starting bitmap twice.
1683 */
1684 if (rbm->offset != 0)
1685 iters++;
1686
1687 while(1) {
1688 bi = rbm_bi(rbm);
1689 if (test_bit(GBF_FULL, &bi->bi_flags) &&
1690 (state == GFS2_BLKST_FREE))
1691 goto next_bitmap;
1692
1693 bh = bi->bi_bh;
1694 buffer = bh->b_data + bi->bi_offset;
1695 WARN_ON(!buffer_uptodate(bh));
1696 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1697 buffer = bi->bi_clone + bi->bi_offset;
1698 initial_offset = rbm->offset;
1699 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1700 if (offset == BFITNOENT)
1701 goto bitmap_full;
1702 rbm->offset = offset;
1703 if (ip == NULL)
1704 return 0;
1705
1706 initial_bii = rbm->bii;
1707 ret = gfs2_reservation_check_and_update(rbm, ip,
1708 minext ? *minext : 0,
1709 &maxext);
1710 if (ret == 0)
1711 return 0;
1712 if (ret > 0) {
1713 n += (rbm->bii - initial_bii);
1714 goto next_iter;
1715 }
1716 if (ret == -E2BIG) {
1717 rbm->bii = 0;
1718 rbm->offset = 0;
1719 n += (rbm->bii - initial_bii);
1720 goto res_covered_end_of_rgrp;
1721 }
1722 return ret;
1723
1724bitmap_full: /* Mark bitmap as full and fall through */
1725 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1726 set_bit(GBF_FULL, &bi->bi_flags);
1727
1728next_bitmap: /* Find next bitmap in the rgrp */
1729 rbm->offset = 0;
1730 rbm->bii++;
1731 if (rbm->bii == rbm->rgd->rd_length)
1732 rbm->bii = 0;
1733res_covered_end_of_rgrp:
1734 if ((rbm->bii == 0) && nowrap)
1735 break;
1736 n++;
1737next_iter:
1738 if (n >= iters)
1739 break;
1740 }
1741
1742 if (minext == NULL || state != GFS2_BLKST_FREE)
1743 return -ENOSPC;
1744
1745 /* If the extent was too small, and it's smaller than the smallest
1746 to have failed before, remember for future reference that it's
1747 useless to search this rgrp again for this amount or more. */
1748 if ((first_offset == 0) && (first_bii == 0) &&
1749 (*minext < rbm->rgd->rd_extfail_pt))
1750 rbm->rgd->rd_extfail_pt = *minext;
1751
1752 /* If the maximum extent we found is big enough to fulfill the
1753 minimum requirements, use it anyway. */
1754 if (maxext.len) {
1755 *rbm = maxext.rbm;
1756 *minext = maxext.len;
1757 return 0;
1758 }
1759
1760 return -ENOSPC;
1761}
1762
1763/**
1764 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1765 * @rgd: The rgrp
1766 * @last_unlinked: block address of the last dinode we unlinked
1767 * @skip: block address we should explicitly not unlink
1768 *
1769 * Returns: 0 if no error
1770 * The inode, if one has been found, in inode.
1771 */
1772
1773static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1774{
1775 u64 block;
1776 struct gfs2_sbd *sdp = rgd->rd_sbd;
1777 struct gfs2_glock *gl;
1778 struct gfs2_inode *ip;
1779 int error;
1780 int found = 0;
1781 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1782
1783 while (1) {
1784 down_write(&sdp->sd_log_flush_lock);
1785 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1786 true);
1787 up_write(&sdp->sd_log_flush_lock);
1788 if (error == -ENOSPC)
1789 break;
1790 if (WARN_ON_ONCE(error))
1791 break;
1792
1793 block = gfs2_rbm_to_block(&rbm);
1794 if (gfs2_rbm_from_block(&rbm, block + 1))
1795 break;
1796 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1797 continue;
1798 if (block == skip)
1799 continue;
1800 *last_unlinked = block;
1801
1802 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
1803 if (error)
1804 continue;
1805
1806 /* If the inode is already in cache, we can ignore it here
1807 * because the existing inode disposal code will deal with
1808 * it when all refs have gone away. Accessing gl_object like
1809 * this is not safe in general. Here it is ok because we do
1810 * not dereference the pointer, and we only need an approx
1811 * answer to whether it is NULL or not.
1812 */
1813 ip = gl->gl_object;
1814
1815 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1816 gfs2_glock_put(gl);
1817 else
1818 found++;
1819
1820 /* Limit reclaim to sensible number of tasks */
1821 if (found > NR_CPUS)
1822 return;
1823 }
1824
1825 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1826 return;
1827}
1828
1829/**
1830 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1831 * @rgd: The rgrp in question
1832 * @loops: An indication of how picky we can be (0=very, 1=less so)
1833 *
1834 * This function uses the recently added glock statistics in order to
1835 * figure out whether a parciular resource group is suffering from
1836 * contention from multiple nodes. This is done purely on the basis
1837 * of timings, since this is the only data we have to work with and
1838 * our aim here is to reject a resource group which is highly contended
1839 * but (very important) not to do this too often in order to ensure that
1840 * we do not land up introducing fragmentation by changing resource
1841 * groups when not actually required.
1842 *
1843 * The calculation is fairly simple, we want to know whether the SRTTB
1844 * (i.e. smoothed round trip time for blocking operations) to acquire
1845 * the lock for this rgrp's glock is significantly greater than the
1846 * time taken for resource groups on average. We introduce a margin in
1847 * the form of the variable @var which is computed as the sum of the two
1848 * respective variences, and multiplied by a factor depending on @loops
1849 * and whether we have a lot of data to base the decision on. This is
1850 * then tested against the square difference of the means in order to
1851 * decide whether the result is statistically significant or not.
1852 *
1853 * Returns: A boolean verdict on the congestion status
1854 */
1855
1856static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1857{
1858 const struct gfs2_glock *gl = rgd->rd_gl;
1859 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1860 struct gfs2_lkstats *st;
1861 u64 r_dcount, l_dcount;
1862 u64 l_srttb, a_srttb = 0;
1863 s64 srttb_diff;
1864 u64 sqr_diff;
1865 u64 var;
1866 int cpu, nonzero = 0;
1867
1868 preempt_disable();
1869 for_each_present_cpu(cpu) {
1870 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1871 if (st->stats[GFS2_LKS_SRTTB]) {
1872 a_srttb += st->stats[GFS2_LKS_SRTTB];
1873 nonzero++;
1874 }
1875 }
1876 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1877 if (nonzero)
1878 do_div(a_srttb, nonzero);
1879 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1880 var = st->stats[GFS2_LKS_SRTTVARB] +
1881 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1882 preempt_enable();
1883
1884 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1885 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1886
1887 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1888 return false;
1889
1890 srttb_diff = a_srttb - l_srttb;
1891 sqr_diff = srttb_diff * srttb_diff;
1892
1893 var *= 2;
1894 if (l_dcount < 8 || r_dcount < 8)
1895 var *= 2;
1896 if (loops == 1)
1897 var *= 2;
1898
1899 return ((srttb_diff < 0) && (sqr_diff > var));
1900}
1901
1902/**
1903 * gfs2_rgrp_used_recently
1904 * @rs: The block reservation with the rgrp to test
1905 * @msecs: The time limit in milliseconds
1906 *
1907 * Returns: True if the rgrp glock has been used within the time limit
1908 */
1909static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1910 u64 msecs)
1911{
1912 u64 tdiff;
1913
1914 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1915 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1916
1917 return tdiff > (msecs * 1000 * 1000);
1918}
1919
1920static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1921{
1922 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1923 u32 skip;
1924
1925 get_random_bytes(&skip, sizeof(skip));
1926 return skip % sdp->sd_rgrps;
1927}
1928
1929static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1930{
1931 struct gfs2_rgrpd *rgd = *pos;
1932 struct gfs2_sbd *sdp = rgd->rd_sbd;
1933
1934 rgd = gfs2_rgrpd_get_next(rgd);
1935 if (rgd == NULL)
1936 rgd = gfs2_rgrpd_get_first(sdp);
1937 *pos = rgd;
1938 if (rgd != begin) /* If we didn't wrap */
1939 return true;
1940 return false;
1941}
1942
1943/**
1944 * fast_to_acquire - determine if a resource group will be fast to acquire
1945 *
1946 * If this is one of our preferred rgrps, it should be quicker to acquire,
1947 * because we tried to set ourselves up as dlm lock master.
1948 */
1949static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
1950{
1951 struct gfs2_glock *gl = rgd->rd_gl;
1952
1953 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
1954 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
1955 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1956 return 1;
1957 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
1958 return 1;
1959 return 0;
1960}
1961
1962/**
1963 * gfs2_inplace_reserve - Reserve space in the filesystem
1964 * @ip: the inode to reserve space for
1965 * @ap: the allocation parameters
1966 *
1967 * We try our best to find an rgrp that has at least ap->target blocks
1968 * available. After a couple of passes (loops == 2), the prospects of finding
1969 * such an rgrp diminish. At this stage, we return the first rgrp that has
1970 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
1971 * the number of blocks available in the chosen rgrp.
1972 *
1973 * Returns: 0 on success,
1974 * -ENOMEM if a suitable rgrp can't be found
1975 * errno otherwise
1976 */
1977
1978int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
1979{
1980 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1981 struct gfs2_rgrpd *begin = NULL;
1982 struct gfs2_blkreserv *rs = &ip->i_res;
1983 int error = 0, rg_locked, flags = 0;
1984 u64 last_unlinked = NO_BLOCK;
1985 int loops = 0;
1986 u32 skip = 0;
1987
1988 if (sdp->sd_args.ar_rgrplvb)
1989 flags |= GL_SKIP;
1990 if (gfs2_assert_warn(sdp, ap->target))
1991 return -EINVAL;
1992 if (gfs2_rs_active(rs)) {
1993 begin = rs->rs_rbm.rgd;
1994 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1995 rs->rs_rbm.rgd = begin = ip->i_rgd;
1996 } else {
1997 check_and_update_goal(ip);
1998 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
1999 }
2000 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
2001 skip = gfs2_orlov_skip(ip);
2002 if (rs->rs_rbm.rgd == NULL)
2003 return -EBADSLT;
2004
2005 while (loops < 3) {
2006 rg_locked = 1;
2007
2008 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
2009 rg_locked = 0;
2010 if (skip && skip--)
2011 goto next_rgrp;
2012 if (!gfs2_rs_active(rs)) {
2013 if (loops == 0 &&
2014 !fast_to_acquire(rs->rs_rbm.rgd))
2015 goto next_rgrp;
2016 if ((loops < 2) &&
2017 gfs2_rgrp_used_recently(rs, 1000) &&
2018 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2019 goto next_rgrp;
2020 }
2021 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
2022 LM_ST_EXCLUSIVE, flags,
2023 &rs->rs_rgd_gh);
2024 if (unlikely(error))
2025 return error;
2026 if (!gfs2_rs_active(rs) && (loops < 2) &&
2027 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2028 goto skip_rgrp;
2029 if (sdp->sd_args.ar_rgrplvb) {
2030 error = update_rgrp_lvb(rs->rs_rbm.rgd);
2031 if (unlikely(error)) {
2032 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2033 return error;
2034 }
2035 }
2036 }
2037
2038 /* Skip unuseable resource groups */
2039 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2040 GFS2_RDF_ERROR)) ||
2041 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
2042 goto skip_rgrp;
2043
2044 if (sdp->sd_args.ar_rgrplvb)
2045 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2046
2047 /* Get a reservation if we don't already have one */
2048 if (!gfs2_rs_active(rs))
2049 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
2050
2051 /* Skip rgrps when we can't get a reservation on first pass */
2052 if (!gfs2_rs_active(rs) && (loops < 1))
2053 goto check_rgrp;
2054
2055 /* If rgrp has enough free space, use it */
2056 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
2057 (loops == 2 && ap->min_target &&
2058 rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
2059 ip->i_rgd = rs->rs_rbm.rgd;
2060 ap->allowed = ip->i_rgd->rd_free_clone;
2061 return 0;
2062 }
2063check_rgrp:
2064 /* Check for unlinked inodes which can be reclaimed */
2065 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2066 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2067 ip->i_no_addr);
2068skip_rgrp:
2069 /* Drop reservation, if we couldn't use reserved rgrp */
2070 if (gfs2_rs_active(rs))
2071 gfs2_rs_deltree(rs);
2072
2073 /* Unlock rgrp if required */
2074 if (!rg_locked)
2075 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2076next_rgrp:
2077 /* Find the next rgrp, and continue looking */
2078 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2079 continue;
2080 if (skip)
2081 continue;
2082
2083 /* If we've scanned all the rgrps, but found no free blocks
2084 * then this checks for some less likely conditions before
2085 * trying again.
2086 */
2087 loops++;
2088 /* Check that fs hasn't grown if writing to rindex */
2089 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2090 error = gfs2_ri_update(ip);
2091 if (error)
2092 return error;
2093 }
2094 /* Flushing the log may release space */
2095 if (loops == 2)
2096 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
2097 GFS2_LFC_INPLACE_RESERVE);
2098 }
2099
2100 return -ENOSPC;
2101}
2102
2103/**
2104 * gfs2_inplace_release - release an inplace reservation
2105 * @ip: the inode the reservation was taken out on
2106 *
2107 * Release a reservation made by gfs2_inplace_reserve().
2108 */
2109
2110void gfs2_inplace_release(struct gfs2_inode *ip)
2111{
2112 struct gfs2_blkreserv *rs = &ip->i_res;
2113
2114 if (gfs2_holder_initialized(&rs->rs_rgd_gh))
2115 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2116}
2117
2118/**
2119 * gfs2_get_block_type - Check a block in a RG is of given type
2120 * @rgd: the resource group holding the block
2121 * @block: the block number
2122 *
2123 * Returns: The block type (GFS2_BLKST_*)
2124 */
2125
2126static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
2127{
2128 struct gfs2_rbm rbm = { .rgd = rgd, };
2129 int ret;
2130
2131 ret = gfs2_rbm_from_block(&rbm, block);
2132 WARN_ON_ONCE(ret != 0);
2133
2134 return gfs2_testbit(&rbm);
2135}
2136
2137
2138/**
2139 * gfs2_alloc_extent - allocate an extent from a given bitmap
2140 * @rbm: the resource group information
2141 * @dinode: TRUE if the first block we allocate is for a dinode
2142 * @n: The extent length (value/result)
2143 *
2144 * Add the bitmap buffer to the transaction.
2145 * Set the found bits to @new_state to change block's allocation state.
2146 */
2147static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2148 unsigned int *n)
2149{
2150 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2151 const unsigned int elen = *n;
2152 u64 block;
2153 int ret;
2154
2155 *n = 1;
2156 block = gfs2_rbm_to_block(rbm);
2157 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2158 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2159 block++;
2160 while (*n < elen) {
2161 ret = gfs2_rbm_from_block(&pos, block);
2162 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
2163 break;
2164 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2165 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2166 (*n)++;
2167 block++;
2168 }
2169}
2170
2171/**
2172 * rgblk_free - Change alloc state of given block(s)
2173 * @sdp: the filesystem
2174 * @bstart: the start of a run of blocks to free
2175 * @blen: the length of the block run (all must lie within ONE RG!)
2176 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2177 *
2178 * Returns: Resource group containing the block(s)
2179 */
2180
2181static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2182 u32 blen, unsigned char new_state)
2183{
2184 struct gfs2_rbm rbm;
2185 struct gfs2_bitmap *bi, *bi_prev = NULL;
2186
2187 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2188 if (!rbm.rgd) {
2189 if (gfs2_consist(sdp))
2190 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
2191 return NULL;
2192 }
2193
2194 gfs2_rbm_from_block(&rbm, bstart);
2195 while (blen--) {
2196 bi = rbm_bi(&rbm);
2197 if (bi != bi_prev) {
2198 if (!bi->bi_clone) {
2199 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2200 GFP_NOFS | __GFP_NOFAIL);
2201 memcpy(bi->bi_clone + bi->bi_offset,
2202 bi->bi_bh->b_data + bi->bi_offset,
2203 bi->bi_len);
2204 }
2205 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2206 bi_prev = bi;
2207 }
2208 gfs2_setbit(&rbm, false, new_state);
2209 gfs2_rbm_incr(&rbm);
2210 }
2211
2212 return rbm.rgd;
2213}
2214
2215/**
2216 * gfs2_rgrp_dump - print out an rgrp
2217 * @seq: The iterator
2218 * @gl: The glock in question
2219 *
2220 */
2221
2222void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
2223{
2224 struct gfs2_rgrpd *rgd = gl->gl_object;
2225 struct gfs2_blkreserv *trs;
2226 const struct rb_node *n;
2227
2228 if (rgd == NULL)
2229 return;
2230 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2231 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2232 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2233 rgd->rd_reserved, rgd->rd_extfail_pt);
2234 spin_lock(&rgd->rd_rsspin);
2235 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2236 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2237 dump_rs(seq, trs);
2238 }
2239 spin_unlock(&rgd->rd_rsspin);
2240}
2241
2242static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2243{
2244 struct gfs2_sbd *sdp = rgd->rd_sbd;
2245 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2246 (unsigned long long)rgd->rd_addr);
2247 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2248 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2249 rgd->rd_flags |= GFS2_RDF_ERROR;
2250}
2251
2252/**
2253 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2254 * @ip: The inode we have just allocated blocks for
2255 * @rbm: The start of the allocated blocks
2256 * @len: The extent length
2257 *
2258 * Adjusts a reservation after an allocation has taken place. If the
2259 * reservation does not match the allocation, or if it is now empty
2260 * then it is removed.
2261 */
2262
2263static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2264 const struct gfs2_rbm *rbm, unsigned len)
2265{
2266 struct gfs2_blkreserv *rs = &ip->i_res;
2267 struct gfs2_rgrpd *rgd = rbm->rgd;
2268 unsigned rlen;
2269 u64 block;
2270 int ret;
2271
2272 spin_lock(&rgd->rd_rsspin);
2273 if (gfs2_rs_active(rs)) {
2274 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2275 block = gfs2_rbm_to_block(rbm);
2276 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2277 rlen = min(rs->rs_free, len);
2278 rs->rs_free -= rlen;
2279 rgd->rd_reserved -= rlen;
2280 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2281 if (rs->rs_free && !ret)
2282 goto out;
2283 /* We used up our block reservation, so we should
2284 reserve more blocks next time. */
2285 atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
2286 }
2287 __rs_deltree(rs);
2288 }
2289out:
2290 spin_unlock(&rgd->rd_rsspin);
2291}
2292
2293/**
2294 * gfs2_set_alloc_start - Set starting point for block allocation
2295 * @rbm: The rbm which will be set to the required location
2296 * @ip: The gfs2 inode
2297 * @dinode: Flag to say if allocation includes a new inode
2298 *
2299 * This sets the starting point from the reservation if one is active
2300 * otherwise it falls back to guessing a start point based on the
2301 * inode's goal block or the last allocation point in the rgrp.
2302 */
2303
2304static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2305 const struct gfs2_inode *ip, bool dinode)
2306{
2307 u64 goal;
2308
2309 if (gfs2_rs_active(&ip->i_res)) {
2310 *rbm = ip->i_res.rs_rbm;
2311 return;
2312 }
2313
2314 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2315 goal = ip->i_goal;
2316 else
2317 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2318
2319 gfs2_rbm_from_block(rbm, goal);
2320}
2321
2322/**
2323 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2324 * @ip: the inode to allocate the block for
2325 * @bn: Used to return the starting block number
2326 * @nblocks: requested number of blocks/extent length (value/result)
2327 * @dinode: 1 if we're allocating a dinode block, else 0
2328 * @generation: the generation number of the inode
2329 *
2330 * Returns: 0 or error
2331 */
2332
2333int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2334 bool dinode, u64 *generation)
2335{
2336 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2337 struct buffer_head *dibh;
2338 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2339 unsigned int ndata;
2340 u64 block; /* block, within the file system scope */
2341 int error;
2342
2343 gfs2_set_alloc_start(&rbm, ip, dinode);
2344 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
2345
2346 if (error == -ENOSPC) {
2347 gfs2_set_alloc_start(&rbm, ip, dinode);
2348 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
2349 }
2350
2351 /* Since all blocks are reserved in advance, this shouldn't happen */
2352 if (error) {
2353 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2354 (unsigned long long)ip->i_no_addr, error, *nblocks,
2355 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2356 rbm.rgd->rd_extfail_pt);
2357 goto rgrp_error;
2358 }
2359
2360 gfs2_alloc_extent(&rbm, dinode, nblocks);
2361 block = gfs2_rbm_to_block(&rbm);
2362 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2363 if (gfs2_rs_active(&ip->i_res))
2364 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2365 ndata = *nblocks;
2366 if (dinode)
2367 ndata--;
2368
2369 if (!dinode) {
2370 ip->i_goal = block + ndata - 1;
2371 error = gfs2_meta_inode_buffer(ip, &dibh);
2372 if (error == 0) {
2373 struct gfs2_dinode *di =
2374 (struct gfs2_dinode *)dibh->b_data;
2375 gfs2_trans_add_meta(ip->i_gl, dibh);
2376 di->di_goal_meta = di->di_goal_data =
2377 cpu_to_be64(ip->i_goal);
2378 brelse(dibh);
2379 }
2380 }
2381 if (rbm.rgd->rd_free < *nblocks) {
2382 pr_warn("nblocks=%u\n", *nblocks);
2383 goto rgrp_error;
2384 }
2385
2386 rbm.rgd->rd_free -= *nblocks;
2387 if (dinode) {
2388 rbm.rgd->rd_dinodes++;
2389 *generation = rbm.rgd->rd_igeneration++;
2390 if (*generation == 0)
2391 *generation = rbm.rgd->rd_igeneration++;
2392 }
2393
2394 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2395 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2396 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
2397
2398 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2399 if (dinode)
2400 gfs2_trans_add_unrevoke(sdp, block, *nblocks);
2401
2402 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2403
2404 rbm.rgd->rd_free_clone -= *nblocks;
2405 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2406 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2407 *bn = block;
2408 return 0;
2409
2410rgrp_error:
2411 gfs2_rgrp_error(rbm.rgd);
2412 return -EIO;
2413}
2414
2415/**
2416 * __gfs2_free_blocks - free a contiguous run of block(s)
2417 * @ip: the inode these blocks are being freed from
2418 * @bstart: first block of a run of contiguous blocks
2419 * @blen: the length of the block run
2420 * @meta: 1 if the blocks represent metadata
2421 *
2422 */
2423
2424void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2425{
2426 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2427 struct gfs2_rgrpd *rgd;
2428
2429 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2430 if (!rgd)
2431 return;
2432 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2433 rgd->rd_free += blen;
2434 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2435 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2436 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2437 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2438
2439 /* Directories keep their data in the metadata address space */
2440 if (meta || ip->i_depth)
2441 gfs2_meta_wipe(ip, bstart, blen);
2442}
2443
2444/**
2445 * gfs2_free_meta - free a contiguous run of data block(s)
2446 * @ip: the inode these blocks are being freed from
2447 * @bstart: first block of a run of contiguous blocks
2448 * @blen: the length of the block run
2449 *
2450 */
2451
2452void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2453{
2454 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2455
2456 __gfs2_free_blocks(ip, bstart, blen, 1);
2457 gfs2_statfs_change(sdp, 0, +blen, 0);
2458 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2459}
2460
2461void gfs2_unlink_di(struct inode *inode)
2462{
2463 struct gfs2_inode *ip = GFS2_I(inode);
2464 struct gfs2_sbd *sdp = GFS2_SB(inode);
2465 struct gfs2_rgrpd *rgd;
2466 u64 blkno = ip->i_no_addr;
2467
2468 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2469 if (!rgd)
2470 return;
2471 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2472 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2473 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2474 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2475 update_rgrp_lvb_unlinked(rgd, 1);
2476}
2477
2478void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2479{
2480 struct gfs2_sbd *sdp = rgd->rd_sbd;
2481 struct gfs2_rgrpd *tmp_rgd;
2482
2483 tmp_rgd = rgblk_free(sdp, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2484 if (!tmp_rgd)
2485 return;
2486 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2487
2488 if (!rgd->rd_dinodes)
2489 gfs2_consist_rgrpd(rgd);
2490 rgd->rd_dinodes--;
2491 rgd->rd_free++;
2492
2493 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2494 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2495 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2496 update_rgrp_lvb_unlinked(rgd, -1);
2497
2498 gfs2_statfs_change(sdp, 0, +1, -1);
2499 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2500 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2501 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2502}
2503
2504/**
2505 * gfs2_check_blk_type - Check the type of a block
2506 * @sdp: The superblock
2507 * @no_addr: The block number to check
2508 * @type: The block type we are looking for
2509 *
2510 * Returns: 0 if the block type matches the expected type
2511 * -ESTALE if it doesn't match
2512 * or -ve errno if something went wrong while checking
2513 */
2514
2515int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2516{
2517 struct gfs2_rgrpd *rgd;
2518 struct gfs2_holder rgd_gh;
2519 int error = -EINVAL;
2520
2521 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2522 if (!rgd)
2523 goto fail;
2524
2525 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2526 if (error)
2527 goto fail;
2528
2529 if (gfs2_get_block_type(rgd, no_addr) != type)
2530 error = -ESTALE;
2531
2532 gfs2_glock_dq_uninit(&rgd_gh);
2533fail:
2534 return error;
2535}
2536
2537/**
2538 * gfs2_rlist_add - add a RG to a list of RGs
2539 * @ip: the inode
2540 * @rlist: the list of resource groups
2541 * @block: the block
2542 *
2543 * Figure out what RG a block belongs to and add that RG to the list
2544 *
2545 * FIXME: Don't use NOFAIL
2546 *
2547 */
2548
2549void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2550 u64 block)
2551{
2552 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2553 struct gfs2_rgrpd *rgd;
2554 struct gfs2_rgrpd **tmp;
2555 unsigned int new_space;
2556 unsigned int x;
2557
2558 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2559 return;
2560
2561 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2562 rgd = ip->i_rgd;
2563 else
2564 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2565 if (!rgd) {
2566 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
2567 return;
2568 }
2569 ip->i_rgd = rgd;
2570
2571 for (x = 0; x < rlist->rl_rgrps; x++)
2572 if (rlist->rl_rgd[x] == rgd)
2573 return;
2574
2575 if (rlist->rl_rgrps == rlist->rl_space) {
2576 new_space = rlist->rl_space + 10;
2577
2578 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2579 GFP_NOFS | __GFP_NOFAIL);
2580
2581 if (rlist->rl_rgd) {
2582 memcpy(tmp, rlist->rl_rgd,
2583 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2584 kfree(rlist->rl_rgd);
2585 }
2586
2587 rlist->rl_space = new_space;
2588 rlist->rl_rgd = tmp;
2589 }
2590
2591 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2592}
2593
2594/**
2595 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2596 * and initialize an array of glock holders for them
2597 * @rlist: the list of resource groups
2598 * @state: the lock state to acquire the RG lock in
2599 *
2600 * FIXME: Don't use NOFAIL
2601 *
2602 */
2603
2604void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2605{
2606 unsigned int x;
2607
2608 rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder),
2609 GFP_NOFS | __GFP_NOFAIL);
2610 for (x = 0; x < rlist->rl_rgrps; x++)
2611 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2612 state, 0,
2613 &rlist->rl_ghs[x]);
2614}
2615
2616/**
2617 * gfs2_rlist_free - free a resource group list
2618 * @rlist: the list of resource groups
2619 *
2620 */
2621
2622void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2623{
2624 unsigned int x;
2625
2626 kfree(rlist->rl_rgd);
2627
2628 if (rlist->rl_ghs) {
2629 for (x = 0; x < rlist->rl_rgrps; x++)
2630 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2631 kfree(rlist->rl_ghs);
2632 rlist->rl_ghs = NULL;
2633 }
2634}
2635
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/slab.h>
10#include <linux/spinlock.h>
11#include <linux/completion.h>
12#include <linux/buffer_head.h>
13#include <linux/fs.h>
14#include <linux/gfs2_ondisk.h>
15#include <linux/prefetch.h>
16#include <linux/blkdev.h>
17#include <linux/rbtree.h>
18#include <linux/random.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "glock.h"
23#include "glops.h"
24#include "lops.h"
25#include "meta_io.h"
26#include "quota.h"
27#include "rgrp.h"
28#include "super.h"
29#include "trans.h"
30#include "util.h"
31#include "log.h"
32#include "inode.h"
33#include "trace_gfs2.h"
34#include "dir.h"
35
36#define BFITNOENT ((u32)~0)
37#define NO_BLOCK ((u64)~0)
38
39struct gfs2_rbm {
40 struct gfs2_rgrpd *rgd;
41 u32 offset; /* The offset is bitmap relative */
42 int bii; /* Bitmap index */
43};
44
45static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
46{
47 return rbm->rgd->rd_bits + rbm->bii;
48}
49
50static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
51{
52 BUG_ON(rbm->offset >= rbm->rgd->rd_data);
53 return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
54 rbm->offset;
55}
56
57/*
58 * These routines are used by the resource group routines (rgrp.c)
59 * to keep track of block allocation. Each block is represented by two
60 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
61 *
62 * 0 = Free
63 * 1 = Used (not metadata)
64 * 2 = Unlinked (still in use) inode
65 * 3 = Used (metadata)
66 */
67
68struct gfs2_extent {
69 struct gfs2_rbm rbm;
70 u32 len;
71};
72
73static const char valid_change[16] = {
74 /* current */
75 /* n */ 0, 1, 1, 1,
76 /* e */ 1, 0, 0, 0,
77 /* w */ 0, 0, 0, 1,
78 1, 0, 0, 0
79};
80
81static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
82 struct gfs2_blkreserv *rs, bool nowrap);
83
84
85/**
86 * gfs2_setbit - Set a bit in the bitmaps
87 * @rbm: The position of the bit to set
88 * @do_clone: Also set the clone bitmap, if it exists
89 * @new_state: the new state of the block
90 *
91 */
92
93static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
94 unsigned char new_state)
95{
96 unsigned char *byte1, *byte2, *end, cur_state;
97 struct gfs2_bitmap *bi = rbm_bi(rbm);
98 unsigned int buflen = bi->bi_bytes;
99 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
100
101 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
102 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
103
104 BUG_ON(byte1 >= end);
105
106 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
107
108 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
109 struct gfs2_sbd *sdp = rbm->rgd->rd_sbd;
110
111 fs_warn(sdp, "buf_blk = 0x%x old_state=%d, new_state=%d\n",
112 rbm->offset, cur_state, new_state);
113 fs_warn(sdp, "rgrp=0x%llx bi_start=0x%x biblk: 0x%llx\n",
114 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start,
115 (unsigned long long)bi->bi_bh->b_blocknr);
116 fs_warn(sdp, "bi_offset=0x%x bi_bytes=0x%x block=0x%llx\n",
117 bi->bi_offset, bi->bi_bytes,
118 (unsigned long long)gfs2_rbm_to_block(rbm));
119 dump_stack();
120 gfs2_consist_rgrpd(rbm->rgd);
121 return;
122 }
123 *byte1 ^= (cur_state ^ new_state) << bit;
124
125 if (do_clone && bi->bi_clone) {
126 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
127 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
128 *byte2 ^= (cur_state ^ new_state) << bit;
129 }
130}
131
132/**
133 * gfs2_testbit - test a bit in the bitmaps
134 * @rbm: The bit to test
135 * @use_clone: If true, test the clone bitmap, not the official bitmap.
136 *
137 * Some callers like gfs2_unaligned_extlen need to test the clone bitmaps,
138 * not the "real" bitmaps, to avoid allocating recently freed blocks.
139 *
140 * Returns: The two bit block state of the requested bit
141 */
142
143static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm, bool use_clone)
144{
145 struct gfs2_bitmap *bi = rbm_bi(rbm);
146 const u8 *buffer;
147 const u8 *byte;
148 unsigned int bit;
149
150 if (use_clone && bi->bi_clone)
151 buffer = bi->bi_clone;
152 else
153 buffer = bi->bi_bh->b_data;
154 buffer += bi->bi_offset;
155 byte = buffer + (rbm->offset / GFS2_NBBY);
156 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
157
158 return (*byte >> bit) & GFS2_BIT_MASK;
159}
160
161/**
162 * gfs2_bit_search - search bitmap for a state
163 * @ptr: Pointer to bitmap data
164 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
165 * @state: The state we are searching for
166 *
167 * We xor the bitmap data with a pattern which is the bitwise opposite
168 * of what we are looking for. This gives rise to a pattern of ones
169 * wherever there is a match. Since we have two bits per entry, we
170 * take this pattern, shift it down by one place and then and it with
171 * the original. All the even bit positions (0,2,4, etc) then represent
172 * successful matches, so we mask with 0x55555..... to remove the unwanted
173 * odd bit positions.
174 *
175 * This allows searching of a whole u64 at once (32 blocks) with a
176 * single test (on 64 bit arches).
177 */
178
179static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
180{
181 u64 tmp;
182 static const u64 search[] = {
183 [0] = 0xffffffffffffffffULL,
184 [1] = 0xaaaaaaaaaaaaaaaaULL,
185 [2] = 0x5555555555555555ULL,
186 [3] = 0x0000000000000000ULL,
187 };
188 tmp = le64_to_cpu(*ptr) ^ search[state];
189 tmp &= (tmp >> 1);
190 tmp &= mask;
191 return tmp;
192}
193
194/**
195 * rs_cmp - multi-block reservation range compare
196 * @start: start of the new reservation
197 * @len: number of blocks in the new reservation
198 * @rs: existing reservation to compare against
199 *
200 * returns: 1 if the block range is beyond the reach of the reservation
201 * -1 if the block range is before the start of the reservation
202 * 0 if the block range overlaps with the reservation
203 */
204static inline int rs_cmp(u64 start, u32 len, struct gfs2_blkreserv *rs)
205{
206 if (start >= rs->rs_start + rs->rs_requested)
207 return 1;
208 if (rs->rs_start >= start + len)
209 return -1;
210 return 0;
211}
212
213/**
214 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
215 * a block in a given allocation state.
216 * @buf: the buffer that holds the bitmaps
217 * @len: the length (in bytes) of the buffer
218 * @goal: start search at this block's bit-pair (within @buffer)
219 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
220 *
221 * Scope of @goal and returned block number is only within this bitmap buffer,
222 * not entire rgrp or filesystem. @buffer will be offset from the actual
223 * beginning of a bitmap block buffer, skipping any header structures, but
224 * headers are always a multiple of 64 bits long so that the buffer is
225 * always aligned to a 64 bit boundary.
226 *
227 * The size of the buffer is in bytes, but is it assumed that it is
228 * always ok to read a complete multiple of 64 bits at the end
229 * of the block in case the end is no aligned to a natural boundary.
230 *
231 * Return: the block number (bitmap buffer scope) that was found
232 */
233
234static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
235 u32 goal, u8 state)
236{
237 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
238 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
239 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
240 u64 tmp;
241 u64 mask = 0x5555555555555555ULL;
242 u32 bit;
243
244 /* Mask off bits we don't care about at the start of the search */
245 mask <<= spoint;
246 tmp = gfs2_bit_search(ptr, mask, state);
247 ptr++;
248 while(tmp == 0 && ptr < end) {
249 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
250 ptr++;
251 }
252 /* Mask off any bits which are more than len bytes from the start */
253 if (ptr == end && (len & (sizeof(u64) - 1)))
254 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
255 /* Didn't find anything, so return */
256 if (tmp == 0)
257 return BFITNOENT;
258 ptr--;
259 bit = __ffs64(tmp);
260 bit /= 2; /* two bits per entry in the bitmap */
261 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
262}
263
264/**
265 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
266 * @rbm: The rbm with rgd already set correctly
267 * @block: The block number (filesystem relative)
268 *
269 * This sets the bi and offset members of an rbm based on a
270 * resource group and a filesystem relative block number. The
271 * resource group must be set in the rbm on entry, the bi and
272 * offset members will be set by this function.
273 *
274 * Returns: 0 on success, or an error code
275 */
276
277static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
278{
279 if (!rgrp_contains_block(rbm->rgd, block))
280 return -E2BIG;
281 rbm->bii = 0;
282 rbm->offset = block - rbm->rgd->rd_data0;
283 /* Check if the block is within the first block */
284 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
285 return 0;
286
287 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
288 rbm->offset += (sizeof(struct gfs2_rgrp) -
289 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
290 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
291 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
292 return 0;
293}
294
295/**
296 * gfs2_rbm_add - add a number of blocks to an rbm
297 * @rbm: The rbm with rgd already set correctly
298 * @blocks: The number of blocks to add to rpm
299 *
300 * This function takes an existing rbm structure and adds a number of blocks to
301 * it.
302 *
303 * Returns: True if the new rbm would point past the end of the rgrp.
304 */
305
306static bool gfs2_rbm_add(struct gfs2_rbm *rbm, u32 blocks)
307{
308 struct gfs2_rgrpd *rgd = rbm->rgd;
309 struct gfs2_bitmap *bi = rgd->rd_bits + rbm->bii;
310
311 if (rbm->offset + blocks < bi->bi_blocks) {
312 rbm->offset += blocks;
313 return false;
314 }
315 blocks -= bi->bi_blocks - rbm->offset;
316
317 for(;;) {
318 bi++;
319 if (bi == rgd->rd_bits + rgd->rd_length)
320 return true;
321 if (blocks < bi->bi_blocks) {
322 rbm->offset = blocks;
323 rbm->bii = bi - rgd->rd_bits;
324 return false;
325 }
326 blocks -= bi->bi_blocks;
327 }
328}
329
330/**
331 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
332 * @rbm: Position to search (value/result)
333 * @n_unaligned: Number of unaligned blocks to check
334 * @len: Decremented for each block found (terminate on zero)
335 *
336 * Returns: true if a non-free block is encountered or the end of the resource
337 * group is reached.
338 */
339
340static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
341{
342 u32 n;
343 u8 res;
344
345 for (n = 0; n < n_unaligned; n++) {
346 res = gfs2_testbit(rbm, true);
347 if (res != GFS2_BLKST_FREE)
348 return true;
349 (*len)--;
350 if (*len == 0)
351 return true;
352 if (gfs2_rbm_add(rbm, 1))
353 return true;
354 }
355
356 return false;
357}
358
359/**
360 * gfs2_free_extlen - Return extent length of free blocks
361 * @rrbm: Starting position
362 * @len: Max length to check
363 *
364 * Starting at the block specified by the rbm, see how many free blocks
365 * there are, not reading more than len blocks ahead. This can be done
366 * using memchr_inv when the blocks are byte aligned, but has to be done
367 * on a block by block basis in case of unaligned blocks. Also this
368 * function can cope with bitmap boundaries (although it must stop on
369 * a resource group boundary)
370 *
371 * Returns: Number of free blocks in the extent
372 */
373
374static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
375{
376 struct gfs2_rbm rbm = *rrbm;
377 u32 n_unaligned = rbm.offset & 3;
378 u32 size = len;
379 u32 bytes;
380 u32 chunk_size;
381 u8 *ptr, *start, *end;
382 u64 block;
383 struct gfs2_bitmap *bi;
384
385 if (n_unaligned &&
386 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
387 goto out;
388
389 n_unaligned = len & 3;
390 /* Start is now byte aligned */
391 while (len > 3) {
392 bi = rbm_bi(&rbm);
393 start = bi->bi_bh->b_data;
394 if (bi->bi_clone)
395 start = bi->bi_clone;
396 start += bi->bi_offset;
397 end = start + bi->bi_bytes;
398 BUG_ON(rbm.offset & 3);
399 start += (rbm.offset / GFS2_NBBY);
400 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
401 ptr = memchr_inv(start, 0, bytes);
402 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
403 chunk_size *= GFS2_NBBY;
404 BUG_ON(len < chunk_size);
405 len -= chunk_size;
406 block = gfs2_rbm_to_block(&rbm);
407 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
408 n_unaligned = 0;
409 break;
410 }
411 if (ptr) {
412 n_unaligned = 3;
413 break;
414 }
415 n_unaligned = len & 3;
416 }
417
418 /* Deal with any bits left over at the end */
419 if (n_unaligned)
420 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
421out:
422 return size - len;
423}
424
425/**
426 * gfs2_bitcount - count the number of bits in a certain state
427 * @rgd: the resource group descriptor
428 * @buffer: the buffer that holds the bitmaps
429 * @buflen: the length (in bytes) of the buffer
430 * @state: the state of the block we're looking for
431 *
432 * Returns: The number of bits
433 */
434
435static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
436 unsigned int buflen, u8 state)
437{
438 const u8 *byte = buffer;
439 const u8 *end = buffer + buflen;
440 const u8 state1 = state << 2;
441 const u8 state2 = state << 4;
442 const u8 state3 = state << 6;
443 u32 count = 0;
444
445 for (; byte < end; byte++) {
446 if (((*byte) & 0x03) == state)
447 count++;
448 if (((*byte) & 0x0C) == state1)
449 count++;
450 if (((*byte) & 0x30) == state2)
451 count++;
452 if (((*byte) & 0xC0) == state3)
453 count++;
454 }
455
456 return count;
457}
458
459/**
460 * gfs2_rgrp_verify - Verify that a resource group is consistent
461 * @rgd: the rgrp
462 *
463 */
464
465void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
466{
467 struct gfs2_sbd *sdp = rgd->rd_sbd;
468 struct gfs2_bitmap *bi = NULL;
469 u32 length = rgd->rd_length;
470 u32 count[4], tmp;
471 int buf, x;
472
473 memset(count, 0, 4 * sizeof(u32));
474
475 /* Count # blocks in each of 4 possible allocation states */
476 for (buf = 0; buf < length; buf++) {
477 bi = rgd->rd_bits + buf;
478 for (x = 0; x < 4; x++)
479 count[x] += gfs2_bitcount(rgd,
480 bi->bi_bh->b_data +
481 bi->bi_offset,
482 bi->bi_bytes, x);
483 }
484
485 if (count[0] != rgd->rd_free) {
486 gfs2_lm(sdp, "free data mismatch: %u != %u\n",
487 count[0], rgd->rd_free);
488 gfs2_consist_rgrpd(rgd);
489 return;
490 }
491
492 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
493 if (count[1] != tmp) {
494 gfs2_lm(sdp, "used data mismatch: %u != %u\n",
495 count[1], tmp);
496 gfs2_consist_rgrpd(rgd);
497 return;
498 }
499
500 if (count[2] + count[3] != rgd->rd_dinodes) {
501 gfs2_lm(sdp, "used metadata mismatch: %u != %u\n",
502 count[2] + count[3], rgd->rd_dinodes);
503 gfs2_consist_rgrpd(rgd);
504 return;
505 }
506}
507
508/**
509 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
510 * @sdp: The GFS2 superblock
511 * @blk: The data block number
512 * @exact: True if this needs to be an exact match
513 *
514 * The @exact argument should be set to true by most callers. The exception
515 * is when we need to match blocks which are not represented by the rgrp
516 * bitmap, but which are part of the rgrp (i.e. padding blocks) which are
517 * there for alignment purposes. Another way of looking at it is that @exact
518 * matches only valid data/metadata blocks, but with @exact false, it will
519 * match any block within the extent of the rgrp.
520 *
521 * Returns: The resource group, or NULL if not found
522 */
523
524struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
525{
526 struct rb_node *n, *next;
527 struct gfs2_rgrpd *cur;
528
529 spin_lock(&sdp->sd_rindex_spin);
530 n = sdp->sd_rindex_tree.rb_node;
531 while (n) {
532 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
533 next = NULL;
534 if (blk < cur->rd_addr)
535 next = n->rb_left;
536 else if (blk >= cur->rd_data0 + cur->rd_data)
537 next = n->rb_right;
538 if (next == NULL) {
539 spin_unlock(&sdp->sd_rindex_spin);
540 if (exact) {
541 if (blk < cur->rd_addr)
542 return NULL;
543 if (blk >= cur->rd_data0 + cur->rd_data)
544 return NULL;
545 }
546 return cur;
547 }
548 n = next;
549 }
550 spin_unlock(&sdp->sd_rindex_spin);
551
552 return NULL;
553}
554
555/**
556 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
557 * @sdp: The GFS2 superblock
558 *
559 * Returns: The first rgrp in the filesystem
560 */
561
562struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
563{
564 const struct rb_node *n;
565 struct gfs2_rgrpd *rgd;
566
567 spin_lock(&sdp->sd_rindex_spin);
568 n = rb_first(&sdp->sd_rindex_tree);
569 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
570 spin_unlock(&sdp->sd_rindex_spin);
571
572 return rgd;
573}
574
575/**
576 * gfs2_rgrpd_get_next - get the next RG
577 * @rgd: the resource group descriptor
578 *
579 * Returns: The next rgrp
580 */
581
582struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
583{
584 struct gfs2_sbd *sdp = rgd->rd_sbd;
585 const struct rb_node *n;
586
587 spin_lock(&sdp->sd_rindex_spin);
588 n = rb_next(&rgd->rd_node);
589 if (n == NULL)
590 n = rb_first(&sdp->sd_rindex_tree);
591
592 if (unlikely(&rgd->rd_node == n)) {
593 spin_unlock(&sdp->sd_rindex_spin);
594 return NULL;
595 }
596 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
597 spin_unlock(&sdp->sd_rindex_spin);
598 return rgd;
599}
600
601void check_and_update_goal(struct gfs2_inode *ip)
602{
603 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
604 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
605 ip->i_goal = ip->i_no_addr;
606}
607
608void gfs2_free_clones(struct gfs2_rgrpd *rgd)
609{
610 int x;
611
612 for (x = 0; x < rgd->rd_length; x++) {
613 struct gfs2_bitmap *bi = rgd->rd_bits + x;
614 kfree(bi->bi_clone);
615 bi->bi_clone = NULL;
616 }
617}
618
619static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs,
620 const char *fs_id_buf)
621{
622 struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res);
623
624 gfs2_print_dbg(seq, "%s B: n:%llu s:%llu f:%u\n",
625 fs_id_buf,
626 (unsigned long long)ip->i_no_addr,
627 (unsigned long long)rs->rs_start,
628 rs->rs_requested);
629}
630
631/**
632 * __rs_deltree - remove a multi-block reservation from the rgd tree
633 * @rs: The reservation to remove
634 *
635 */
636static void __rs_deltree(struct gfs2_blkreserv *rs)
637{
638 struct gfs2_rgrpd *rgd;
639
640 if (!gfs2_rs_active(rs))
641 return;
642
643 rgd = rs->rs_rgd;
644 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
645 rb_erase(&rs->rs_node, &rgd->rd_rstree);
646 RB_CLEAR_NODE(&rs->rs_node);
647
648 if (rs->rs_requested) {
649 /* return requested blocks to the rgrp */
650 BUG_ON(rs->rs_rgd->rd_requested < rs->rs_requested);
651 rs->rs_rgd->rd_requested -= rs->rs_requested;
652
653 /* The rgrp extent failure point is likely not to increase;
654 it will only do so if the freed blocks are somehow
655 contiguous with a span of free blocks that follows. Still,
656 it will force the number to be recalculated later. */
657 rgd->rd_extfail_pt += rs->rs_requested;
658 rs->rs_requested = 0;
659 }
660}
661
662/**
663 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
664 * @rs: The reservation to remove
665 *
666 */
667void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
668{
669 struct gfs2_rgrpd *rgd;
670
671 rgd = rs->rs_rgd;
672 if (rgd) {
673 spin_lock(&rgd->rd_rsspin);
674 __rs_deltree(rs);
675 BUG_ON(rs->rs_requested);
676 spin_unlock(&rgd->rd_rsspin);
677 }
678}
679
680/**
681 * gfs2_rs_delete - delete a multi-block reservation
682 * @ip: The inode for this reservation
683 *
684 */
685void gfs2_rs_delete(struct gfs2_inode *ip)
686{
687 struct inode *inode = &ip->i_inode;
688
689 down_write(&ip->i_rw_mutex);
690 if (atomic_read(&inode->i_writecount) <= 1)
691 gfs2_rs_deltree(&ip->i_res);
692 up_write(&ip->i_rw_mutex);
693}
694
695/**
696 * return_all_reservations - return all reserved blocks back to the rgrp.
697 * @rgd: the rgrp that needs its space back
698 *
699 * We previously reserved a bunch of blocks for allocation. Now we need to
700 * give them back. This leave the reservation structures in tact, but removes
701 * all of their corresponding "no-fly zones".
702 */
703static void return_all_reservations(struct gfs2_rgrpd *rgd)
704{
705 struct rb_node *n;
706 struct gfs2_blkreserv *rs;
707
708 spin_lock(&rgd->rd_rsspin);
709 while ((n = rb_first(&rgd->rd_rstree))) {
710 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
711 __rs_deltree(rs);
712 }
713 spin_unlock(&rgd->rd_rsspin);
714}
715
716void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
717{
718 struct rb_node *n;
719 struct gfs2_rgrpd *rgd;
720 struct gfs2_glock *gl;
721
722 while ((n = rb_first(&sdp->sd_rindex_tree))) {
723 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
724 gl = rgd->rd_gl;
725
726 rb_erase(n, &sdp->sd_rindex_tree);
727
728 if (gl) {
729 if (gl->gl_state != LM_ST_UNLOCKED) {
730 gfs2_glock_cb(gl, LM_ST_UNLOCKED);
731 flush_delayed_work(&gl->gl_work);
732 }
733 gfs2_rgrp_brelse(rgd);
734 glock_clear_object(gl, rgd);
735 gfs2_glock_put(gl);
736 }
737
738 gfs2_free_clones(rgd);
739 return_all_reservations(rgd);
740 kfree(rgd->rd_bits);
741 rgd->rd_bits = NULL;
742 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
743 }
744}
745
746/**
747 * compute_bitstructs - Compute the bitmap sizes
748 * @rgd: The resource group descriptor
749 *
750 * Calculates bitmap descriptors, one for each block that contains bitmap data
751 *
752 * Returns: errno
753 */
754
755static int compute_bitstructs(struct gfs2_rgrpd *rgd)
756{
757 struct gfs2_sbd *sdp = rgd->rd_sbd;
758 struct gfs2_bitmap *bi;
759 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
760 u32 bytes_left, bytes;
761 int x;
762
763 if (!length)
764 return -EINVAL;
765
766 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
767 if (!rgd->rd_bits)
768 return -ENOMEM;
769
770 bytes_left = rgd->rd_bitbytes;
771
772 for (x = 0; x < length; x++) {
773 bi = rgd->rd_bits + x;
774
775 bi->bi_flags = 0;
776 /* small rgrp; bitmap stored completely in header block */
777 if (length == 1) {
778 bytes = bytes_left;
779 bi->bi_offset = sizeof(struct gfs2_rgrp);
780 bi->bi_start = 0;
781 bi->bi_bytes = bytes;
782 bi->bi_blocks = bytes * GFS2_NBBY;
783 /* header block */
784 } else if (x == 0) {
785 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
786 bi->bi_offset = sizeof(struct gfs2_rgrp);
787 bi->bi_start = 0;
788 bi->bi_bytes = bytes;
789 bi->bi_blocks = bytes * GFS2_NBBY;
790 /* last block */
791 } else if (x + 1 == length) {
792 bytes = bytes_left;
793 bi->bi_offset = sizeof(struct gfs2_meta_header);
794 bi->bi_start = rgd->rd_bitbytes - bytes_left;
795 bi->bi_bytes = bytes;
796 bi->bi_blocks = bytes * GFS2_NBBY;
797 /* other blocks */
798 } else {
799 bytes = sdp->sd_sb.sb_bsize -
800 sizeof(struct gfs2_meta_header);
801 bi->bi_offset = sizeof(struct gfs2_meta_header);
802 bi->bi_start = rgd->rd_bitbytes - bytes_left;
803 bi->bi_bytes = bytes;
804 bi->bi_blocks = bytes * GFS2_NBBY;
805 }
806
807 bytes_left -= bytes;
808 }
809
810 if (bytes_left) {
811 gfs2_consist_rgrpd(rgd);
812 return -EIO;
813 }
814 bi = rgd->rd_bits + (length - 1);
815 if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
816 gfs2_lm(sdp,
817 "ri_addr=%llu "
818 "ri_length=%u "
819 "ri_data0=%llu "
820 "ri_data=%u "
821 "ri_bitbytes=%u "
822 "start=%u len=%u offset=%u\n",
823 (unsigned long long)rgd->rd_addr,
824 rgd->rd_length,
825 (unsigned long long)rgd->rd_data0,
826 rgd->rd_data,
827 rgd->rd_bitbytes,
828 bi->bi_start, bi->bi_bytes, bi->bi_offset);
829 gfs2_consist_rgrpd(rgd);
830 return -EIO;
831 }
832
833 return 0;
834}
835
836/**
837 * gfs2_ri_total - Total up the file system space, according to the rindex.
838 * @sdp: the filesystem
839 *
840 */
841u64 gfs2_ri_total(struct gfs2_sbd *sdp)
842{
843 u64 total_data = 0;
844 struct inode *inode = sdp->sd_rindex;
845 struct gfs2_inode *ip = GFS2_I(inode);
846 char buf[sizeof(struct gfs2_rindex)];
847 int error, rgrps;
848
849 for (rgrps = 0;; rgrps++) {
850 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
851
852 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
853 break;
854 error = gfs2_internal_read(ip, buf, &pos,
855 sizeof(struct gfs2_rindex));
856 if (error != sizeof(struct gfs2_rindex))
857 break;
858 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
859 }
860 return total_data;
861}
862
863static int rgd_insert(struct gfs2_rgrpd *rgd)
864{
865 struct gfs2_sbd *sdp = rgd->rd_sbd;
866 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
867
868 /* Figure out where to put new node */
869 while (*newn) {
870 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
871 rd_node);
872
873 parent = *newn;
874 if (rgd->rd_addr < cur->rd_addr)
875 newn = &((*newn)->rb_left);
876 else if (rgd->rd_addr > cur->rd_addr)
877 newn = &((*newn)->rb_right);
878 else
879 return -EEXIST;
880 }
881
882 rb_link_node(&rgd->rd_node, parent, newn);
883 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
884 sdp->sd_rgrps++;
885 return 0;
886}
887
888/**
889 * read_rindex_entry - Pull in a new resource index entry from the disk
890 * @ip: Pointer to the rindex inode
891 *
892 * Returns: 0 on success, > 0 on EOF, error code otherwise
893 */
894
895static int read_rindex_entry(struct gfs2_inode *ip)
896{
897 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
898 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
899 struct gfs2_rindex buf;
900 int error;
901 struct gfs2_rgrpd *rgd;
902
903 if (pos >= i_size_read(&ip->i_inode))
904 return 1;
905
906 error = gfs2_internal_read(ip, (char *)&buf, &pos,
907 sizeof(struct gfs2_rindex));
908
909 if (error != sizeof(struct gfs2_rindex))
910 return (error == 0) ? 1 : error;
911
912 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
913 error = -ENOMEM;
914 if (!rgd)
915 return error;
916
917 rgd->rd_sbd = sdp;
918 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
919 rgd->rd_length = be32_to_cpu(buf.ri_length);
920 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
921 rgd->rd_data = be32_to_cpu(buf.ri_data);
922 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
923 spin_lock_init(&rgd->rd_rsspin);
924 mutex_init(&rgd->rd_mutex);
925
926 error = gfs2_glock_get(sdp, rgd->rd_addr,
927 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
928 if (error)
929 goto fail;
930
931 error = compute_bitstructs(rgd);
932 if (error)
933 goto fail_glock;
934
935 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
936 rgd->rd_flags &= ~GFS2_RDF_PREFERRED;
937 if (rgd->rd_data > sdp->sd_max_rg_data)
938 sdp->sd_max_rg_data = rgd->rd_data;
939 spin_lock(&sdp->sd_rindex_spin);
940 error = rgd_insert(rgd);
941 spin_unlock(&sdp->sd_rindex_spin);
942 if (!error) {
943 glock_set_object(rgd->rd_gl, rgd);
944 return 0;
945 }
946
947 error = 0; /* someone else read in the rgrp; free it and ignore it */
948fail_glock:
949 gfs2_glock_put(rgd->rd_gl);
950
951fail:
952 kfree(rgd->rd_bits);
953 rgd->rd_bits = NULL;
954 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
955 return error;
956}
957
958/**
959 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
960 * @sdp: the GFS2 superblock
961 *
962 * The purpose of this function is to select a subset of the resource groups
963 * and mark them as PREFERRED. We do it in such a way that each node prefers
964 * to use a unique set of rgrps to minimize glock contention.
965 */
966static void set_rgrp_preferences(struct gfs2_sbd *sdp)
967{
968 struct gfs2_rgrpd *rgd, *first;
969 int i;
970
971 /* Skip an initial number of rgrps, based on this node's journal ID.
972 That should start each node out on its own set. */
973 rgd = gfs2_rgrpd_get_first(sdp);
974 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
975 rgd = gfs2_rgrpd_get_next(rgd);
976 first = rgd;
977
978 do {
979 rgd->rd_flags |= GFS2_RDF_PREFERRED;
980 for (i = 0; i < sdp->sd_journals; i++) {
981 rgd = gfs2_rgrpd_get_next(rgd);
982 if (!rgd || rgd == first)
983 break;
984 }
985 } while (rgd && rgd != first);
986}
987
988/**
989 * gfs2_ri_update - Pull in a new resource index from the disk
990 * @ip: pointer to the rindex inode
991 *
992 * Returns: 0 on successful update, error code otherwise
993 */
994
995static int gfs2_ri_update(struct gfs2_inode *ip)
996{
997 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
998 int error;
999
1000 do {
1001 error = read_rindex_entry(ip);
1002 } while (error == 0);
1003
1004 if (error < 0)
1005 return error;
1006
1007 if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
1008 fs_err(sdp, "no resource groups found in the file system.\n");
1009 return -ENOENT;
1010 }
1011 set_rgrp_preferences(sdp);
1012
1013 sdp->sd_rindex_uptodate = 1;
1014 return 0;
1015}
1016
1017/**
1018 * gfs2_rindex_update - Update the rindex if required
1019 * @sdp: The GFS2 superblock
1020 *
1021 * We grab a lock on the rindex inode to make sure that it doesn't
1022 * change whilst we are performing an operation. We keep this lock
1023 * for quite long periods of time compared to other locks. This
1024 * doesn't matter, since it is shared and it is very, very rarely
1025 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1026 *
1027 * This makes sure that we're using the latest copy of the resource index
1028 * special file, which might have been updated if someone expanded the
1029 * filesystem (via gfs2_grow utility), which adds new resource groups.
1030 *
1031 * Returns: 0 on succeess, error code otherwise
1032 */
1033
1034int gfs2_rindex_update(struct gfs2_sbd *sdp)
1035{
1036 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1037 struct gfs2_glock *gl = ip->i_gl;
1038 struct gfs2_holder ri_gh;
1039 int error = 0;
1040 int unlock_required = 0;
1041
1042 /* Read new copy from disk if we don't have the latest */
1043 if (!sdp->sd_rindex_uptodate) {
1044 if (!gfs2_glock_is_locked_by_me(gl)) {
1045 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1046 if (error)
1047 return error;
1048 unlock_required = 1;
1049 }
1050 if (!sdp->sd_rindex_uptodate)
1051 error = gfs2_ri_update(ip);
1052 if (unlock_required)
1053 gfs2_glock_dq_uninit(&ri_gh);
1054 }
1055
1056 return error;
1057}
1058
1059static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1060{
1061 const struct gfs2_rgrp *str = buf;
1062 u32 rg_flags;
1063
1064 rg_flags = be32_to_cpu(str->rg_flags);
1065 rg_flags &= ~GFS2_RDF_MASK;
1066 rgd->rd_flags &= GFS2_RDF_MASK;
1067 rgd->rd_flags |= rg_flags;
1068 rgd->rd_free = be32_to_cpu(str->rg_free);
1069 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1070 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1071 /* rd_data0, rd_data and rd_bitbytes already set from rindex */
1072}
1073
1074static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1075{
1076 const struct gfs2_rgrp *str = buf;
1077
1078 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1079 rgl->rl_flags = str->rg_flags;
1080 rgl->rl_free = str->rg_free;
1081 rgl->rl_dinodes = str->rg_dinodes;
1082 rgl->rl_igeneration = str->rg_igeneration;
1083 rgl->__pad = 0UL;
1084}
1085
1086static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1087{
1088 struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
1089 struct gfs2_rgrp *str = buf;
1090 u32 crc;
1091
1092 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1093 str->rg_free = cpu_to_be32(rgd->rd_free);
1094 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1095 if (next == NULL)
1096 str->rg_skip = 0;
1097 else if (next->rd_addr > rgd->rd_addr)
1098 str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr);
1099 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1100 str->rg_data0 = cpu_to_be64(rgd->rd_data0);
1101 str->rg_data = cpu_to_be32(rgd->rd_data);
1102 str->rg_bitbytes = cpu_to_be32(rgd->rd_bitbytes);
1103 str->rg_crc = 0;
1104 crc = gfs2_disk_hash(buf, sizeof(struct gfs2_rgrp));
1105 str->rg_crc = cpu_to_be32(crc);
1106
1107 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1108 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, buf);
1109}
1110
1111static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1112{
1113 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1114 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1115 struct gfs2_sbd *sdp = rgd->rd_sbd;
1116 int valid = 1;
1117
1118 if (rgl->rl_flags != str->rg_flags) {
1119 fs_warn(sdp, "GFS2: rgd: %llu lvb flag mismatch %u/%u",
1120 (unsigned long long)rgd->rd_addr,
1121 be32_to_cpu(rgl->rl_flags), be32_to_cpu(str->rg_flags));
1122 valid = 0;
1123 }
1124 if (rgl->rl_free != str->rg_free) {
1125 fs_warn(sdp, "GFS2: rgd: %llu lvb free mismatch %u/%u",
1126 (unsigned long long)rgd->rd_addr,
1127 be32_to_cpu(rgl->rl_free), be32_to_cpu(str->rg_free));
1128 valid = 0;
1129 }
1130 if (rgl->rl_dinodes != str->rg_dinodes) {
1131 fs_warn(sdp, "GFS2: rgd: %llu lvb dinode mismatch %u/%u",
1132 (unsigned long long)rgd->rd_addr,
1133 be32_to_cpu(rgl->rl_dinodes),
1134 be32_to_cpu(str->rg_dinodes));
1135 valid = 0;
1136 }
1137 if (rgl->rl_igeneration != str->rg_igeneration) {
1138 fs_warn(sdp, "GFS2: rgd: %llu lvb igen mismatch %llu/%llu",
1139 (unsigned long long)rgd->rd_addr,
1140 (unsigned long long)be64_to_cpu(rgl->rl_igeneration),
1141 (unsigned long long)be64_to_cpu(str->rg_igeneration));
1142 valid = 0;
1143 }
1144 return valid;
1145}
1146
1147static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1148{
1149 struct gfs2_bitmap *bi;
1150 const u32 length = rgd->rd_length;
1151 const u8 *buffer = NULL;
1152 u32 i, goal, count = 0;
1153
1154 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1155 goal = 0;
1156 buffer = bi->bi_bh->b_data + bi->bi_offset;
1157 WARN_ON(!buffer_uptodate(bi->bi_bh));
1158 while (goal < bi->bi_blocks) {
1159 goal = gfs2_bitfit(buffer, bi->bi_bytes, goal,
1160 GFS2_BLKST_UNLINKED);
1161 if (goal == BFITNOENT)
1162 break;
1163 count++;
1164 goal++;
1165 }
1166 }
1167
1168 return count;
1169}
1170
1171static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd)
1172{
1173 struct gfs2_bitmap *bi;
1174 int x;
1175
1176 if (rgd->rd_free) {
1177 for (x = 0; x < rgd->rd_length; x++) {
1178 bi = rgd->rd_bits + x;
1179 clear_bit(GBF_FULL, &bi->bi_flags);
1180 }
1181 } else {
1182 for (x = 0; x < rgd->rd_length; x++) {
1183 bi = rgd->rd_bits + x;
1184 set_bit(GBF_FULL, &bi->bi_flags);
1185 }
1186 }
1187}
1188
1189/**
1190 * gfs2_rgrp_go_instantiate - Read in a RG's header and bitmaps
1191 * @gl: the glock representing the rgrpd to read in
1192 *
1193 * Read in all of a Resource Group's header and bitmap blocks.
1194 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1195 *
1196 * Returns: errno
1197 */
1198
1199int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl)
1200{
1201 struct gfs2_rgrpd *rgd = gl->gl_object;
1202 struct gfs2_sbd *sdp = rgd->rd_sbd;
1203 unsigned int length = rgd->rd_length;
1204 struct gfs2_bitmap *bi;
1205 unsigned int x, y;
1206 int error;
1207
1208 if (rgd->rd_bits[0].bi_bh != NULL)
1209 return 0;
1210
1211 for (x = 0; x < length; x++) {
1212 bi = rgd->rd_bits + x;
1213 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
1214 if (error)
1215 goto fail;
1216 }
1217
1218 for (y = length; y--;) {
1219 bi = rgd->rd_bits + y;
1220 error = gfs2_meta_wait(sdp, bi->bi_bh);
1221 if (error)
1222 goto fail;
1223 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1224 GFS2_METATYPE_RG)) {
1225 error = -EIO;
1226 goto fail;
1227 }
1228 }
1229
1230 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1231 rgrp_set_bitmap_flags(rgd);
1232 rgd->rd_flags |= GFS2_RDF_CHECK;
1233 rgd->rd_free_clone = rgd->rd_free;
1234 GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved);
1235 /* max out the rgrp allocation failure point */
1236 rgd->rd_extfail_pt = rgd->rd_free;
1237 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1238 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1239 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1240 rgd->rd_bits[0].bi_bh->b_data);
1241 } else if (sdp->sd_args.ar_rgrplvb) {
1242 if (!gfs2_rgrp_lvb_valid(rgd)){
1243 gfs2_consist_rgrpd(rgd);
1244 error = -EIO;
1245 goto fail;
1246 }
1247 if (rgd->rd_rgl->rl_unlinked == 0)
1248 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1249 }
1250 return 0;
1251
1252fail:
1253 while (x--) {
1254 bi = rgd->rd_bits + x;
1255 brelse(bi->bi_bh);
1256 bi->bi_bh = NULL;
1257 gfs2_assert_warn(sdp, !bi->bi_clone);
1258 }
1259 return error;
1260}
1261
1262static int update_rgrp_lvb(struct gfs2_rgrpd *rgd, struct gfs2_holder *gh)
1263{
1264 u32 rl_flags;
1265
1266 if (!test_bit(GLF_INSTANTIATE_NEEDED, &gh->gh_gl->gl_flags))
1267 return 0;
1268
1269 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1270 return gfs2_instantiate(gh);
1271
1272 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1273 rl_flags &= ~GFS2_RDF_MASK;
1274 rgd->rd_flags &= GFS2_RDF_MASK;
1275 rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
1276 if (rgd->rd_rgl->rl_unlinked == 0)
1277 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1278 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1279 rgrp_set_bitmap_flags(rgd);
1280 rgd->rd_free_clone = rgd->rd_free;
1281 GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved);
1282 /* max out the rgrp allocation failure point */
1283 rgd->rd_extfail_pt = rgd->rd_free;
1284 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1285 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1286 return 0;
1287}
1288
1289/**
1290 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1291 * @rgd: The resource group
1292 *
1293 */
1294
1295void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1296{
1297 int x, length = rgd->rd_length;
1298
1299 for (x = 0; x < length; x++) {
1300 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1301 if (bi->bi_bh) {
1302 brelse(bi->bi_bh);
1303 bi->bi_bh = NULL;
1304 }
1305 }
1306 set_bit(GLF_INSTANTIATE_NEEDED, &rgd->rd_gl->gl_flags);
1307}
1308
1309int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1310 struct buffer_head *bh,
1311 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1312{
1313 struct super_block *sb = sdp->sd_vfs;
1314 u64 blk;
1315 sector_t start = 0;
1316 sector_t nr_blks = 0;
1317 int rv = -EIO;
1318 unsigned int x;
1319 u32 trimmed = 0;
1320 u8 diff;
1321
1322 for (x = 0; x < bi->bi_bytes; x++) {
1323 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1324 clone += bi->bi_offset;
1325 clone += x;
1326 if (bh) {
1327 const u8 *orig = bh->b_data + bi->bi_offset + x;
1328 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1329 } else {
1330 diff = ~(*clone | (*clone >> 1));
1331 }
1332 diff &= 0x55;
1333 if (diff == 0)
1334 continue;
1335 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1336 while(diff) {
1337 if (diff & 1) {
1338 if (nr_blks == 0)
1339 goto start_new_extent;
1340 if ((start + nr_blks) != blk) {
1341 if (nr_blks >= minlen) {
1342 rv = sb_issue_discard(sb,
1343 start, nr_blks,
1344 GFP_NOFS, 0);
1345 if (rv)
1346 goto fail;
1347 trimmed += nr_blks;
1348 }
1349 nr_blks = 0;
1350start_new_extent:
1351 start = blk;
1352 }
1353 nr_blks++;
1354 }
1355 diff >>= 2;
1356 blk++;
1357 }
1358 }
1359 if (nr_blks >= minlen) {
1360 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1361 if (rv)
1362 goto fail;
1363 trimmed += nr_blks;
1364 }
1365 if (ptrimmed)
1366 *ptrimmed = trimmed;
1367 return 0;
1368
1369fail:
1370 if (sdp->sd_args.ar_discard)
1371 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
1372 sdp->sd_args.ar_discard = 0;
1373 return rv;
1374}
1375
1376/**
1377 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1378 * @filp: Any file on the filesystem
1379 * @argp: Pointer to the arguments (also used to pass result)
1380 *
1381 * Returns: 0 on success, otherwise error code
1382 */
1383
1384int gfs2_fitrim(struct file *filp, void __user *argp)
1385{
1386 struct inode *inode = file_inode(filp);
1387 struct gfs2_sbd *sdp = GFS2_SB(inode);
1388 struct block_device *bdev = sdp->sd_vfs->s_bdev;
1389 struct buffer_head *bh;
1390 struct gfs2_rgrpd *rgd;
1391 struct gfs2_rgrpd *rgd_end;
1392 struct gfs2_holder gh;
1393 struct fstrim_range r;
1394 int ret = 0;
1395 u64 amt;
1396 u64 trimmed = 0;
1397 u64 start, end, minlen;
1398 unsigned int x;
1399 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1400
1401 if (!capable(CAP_SYS_ADMIN))
1402 return -EPERM;
1403
1404 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1405 return -EROFS;
1406
1407 if (!bdev_max_discard_sectors(bdev))
1408 return -EOPNOTSUPP;
1409
1410 if (copy_from_user(&r, argp, sizeof(r)))
1411 return -EFAULT;
1412
1413 ret = gfs2_rindex_update(sdp);
1414 if (ret)
1415 return ret;
1416
1417 start = r.start >> bs_shift;
1418 end = start + (r.len >> bs_shift);
1419 minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize);
1420 minlen = max_t(u64, minlen, bdev_discard_granularity(bdev)) >> bs_shift;
1421
1422 if (end <= start || minlen > sdp->sd_max_rg_data)
1423 return -EINVAL;
1424
1425 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1426 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1427
1428 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1429 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1430 return -EINVAL; /* start is beyond the end of the fs */
1431
1432 while (1) {
1433
1434 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1435 LM_FLAG_NODE_SCOPE, &gh);
1436 if (ret)
1437 goto out;
1438
1439 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1440 /* Trim each bitmap in the rgrp */
1441 for (x = 0; x < rgd->rd_length; x++) {
1442 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1443 rgrp_lock_local(rgd);
1444 ret = gfs2_rgrp_send_discards(sdp,
1445 rgd->rd_data0, NULL, bi, minlen,
1446 &amt);
1447 rgrp_unlock_local(rgd);
1448 if (ret) {
1449 gfs2_glock_dq_uninit(&gh);
1450 goto out;
1451 }
1452 trimmed += amt;
1453 }
1454
1455 /* Mark rgrp as having been trimmed */
1456 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1457 if (ret == 0) {
1458 bh = rgd->rd_bits[0].bi_bh;
1459 rgrp_lock_local(rgd);
1460 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1461 gfs2_trans_add_meta(rgd->rd_gl, bh);
1462 gfs2_rgrp_out(rgd, bh->b_data);
1463 rgrp_unlock_local(rgd);
1464 gfs2_trans_end(sdp);
1465 }
1466 }
1467 gfs2_glock_dq_uninit(&gh);
1468
1469 if (rgd == rgd_end)
1470 break;
1471
1472 rgd = gfs2_rgrpd_get_next(rgd);
1473 }
1474
1475out:
1476 r.len = trimmed << bs_shift;
1477 if (copy_to_user(argp, &r, sizeof(r)))
1478 return -EFAULT;
1479
1480 return ret;
1481}
1482
1483/**
1484 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1485 * @ip: the inode structure
1486 *
1487 */
1488static void rs_insert(struct gfs2_inode *ip)
1489{
1490 struct rb_node **newn, *parent = NULL;
1491 int rc;
1492 struct gfs2_blkreserv *rs = &ip->i_res;
1493 struct gfs2_rgrpd *rgd = rs->rs_rgd;
1494
1495 BUG_ON(gfs2_rs_active(rs));
1496
1497 spin_lock(&rgd->rd_rsspin);
1498 newn = &rgd->rd_rstree.rb_node;
1499 while (*newn) {
1500 struct gfs2_blkreserv *cur =
1501 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1502
1503 parent = *newn;
1504 rc = rs_cmp(rs->rs_start, rs->rs_requested, cur);
1505 if (rc > 0)
1506 newn = &((*newn)->rb_right);
1507 else if (rc < 0)
1508 newn = &((*newn)->rb_left);
1509 else {
1510 spin_unlock(&rgd->rd_rsspin);
1511 WARN_ON(1);
1512 return;
1513 }
1514 }
1515
1516 rb_link_node(&rs->rs_node, parent, newn);
1517 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1518
1519 /* Do our rgrp accounting for the reservation */
1520 rgd->rd_requested += rs->rs_requested; /* blocks requested */
1521 spin_unlock(&rgd->rd_rsspin);
1522 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1523}
1524
1525/**
1526 * rgd_free - return the number of free blocks we can allocate
1527 * @rgd: the resource group
1528 * @rs: The reservation to free
1529 *
1530 * This function returns the number of free blocks for an rgrp.
1531 * That's the clone-free blocks (blocks that are free, not including those
1532 * still being used for unlinked files that haven't been deleted.)
1533 *
1534 * It also subtracts any blocks reserved by someone else, but does not
1535 * include free blocks that are still part of our current reservation,
1536 * because obviously we can (and will) allocate them.
1537 */
1538static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
1539{
1540 u32 tot_reserved, tot_free;
1541
1542 if (WARN_ON_ONCE(rgd->rd_requested < rs->rs_requested))
1543 return 0;
1544 tot_reserved = rgd->rd_requested - rs->rs_requested;
1545
1546 if (rgd->rd_free_clone < tot_reserved)
1547 tot_reserved = 0;
1548
1549 tot_free = rgd->rd_free_clone - tot_reserved;
1550
1551 return tot_free;
1552}
1553
1554/**
1555 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1556 * @rgd: the resource group descriptor
1557 * @ip: pointer to the inode for which we're reserving blocks
1558 * @ap: the allocation parameters
1559 *
1560 */
1561
1562static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1563 const struct gfs2_alloc_parms *ap)
1564{
1565 struct gfs2_rbm rbm = { .rgd = rgd, };
1566 u64 goal;
1567 struct gfs2_blkreserv *rs = &ip->i_res;
1568 u32 extlen;
1569 u32 free_blocks, blocks_available;
1570 int ret;
1571 struct inode *inode = &ip->i_inode;
1572
1573 spin_lock(&rgd->rd_rsspin);
1574 free_blocks = rgd_free(rgd, rs);
1575 if (rgd->rd_free_clone < rgd->rd_requested)
1576 free_blocks = 0;
1577 blocks_available = rgd->rd_free_clone - rgd->rd_reserved;
1578 if (rgd == rs->rs_rgd)
1579 blocks_available += rs->rs_reserved;
1580 spin_unlock(&rgd->rd_rsspin);
1581
1582 if (S_ISDIR(inode->i_mode))
1583 extlen = 1;
1584 else {
1585 extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
1586 extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
1587 }
1588 if (free_blocks < extlen || blocks_available < extlen)
1589 return;
1590
1591 /* Find bitmap block that contains bits for goal block */
1592 if (rgrp_contains_block(rgd, ip->i_goal))
1593 goal = ip->i_goal;
1594 else
1595 goal = rgd->rd_last_alloc + rgd->rd_data0;
1596
1597 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1598 return;
1599
1600 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, &ip->i_res, true);
1601 if (ret == 0) {
1602 rs->rs_start = gfs2_rbm_to_block(&rbm);
1603 rs->rs_requested = extlen;
1604 rs_insert(ip);
1605 } else {
1606 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1607 rgd->rd_last_alloc = 0;
1608 }
1609}
1610
1611/**
1612 * gfs2_next_unreserved_block - Return next block that is not reserved
1613 * @rgd: The resource group
1614 * @block: The starting block
1615 * @length: The required length
1616 * @ignore_rs: Reservation to ignore
1617 *
1618 * If the block does not appear in any reservation, then return the
1619 * block number unchanged. If it does appear in the reservation, then
1620 * keep looking through the tree of reservations in order to find the
1621 * first block number which is not reserved.
1622 */
1623
1624static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1625 u32 length,
1626 struct gfs2_blkreserv *ignore_rs)
1627{
1628 struct gfs2_blkreserv *rs;
1629 struct rb_node *n;
1630 int rc;
1631
1632 spin_lock(&rgd->rd_rsspin);
1633 n = rgd->rd_rstree.rb_node;
1634 while (n) {
1635 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1636 rc = rs_cmp(block, length, rs);
1637 if (rc < 0)
1638 n = n->rb_left;
1639 else if (rc > 0)
1640 n = n->rb_right;
1641 else
1642 break;
1643 }
1644
1645 if (n) {
1646 while (rs_cmp(block, length, rs) == 0 && rs != ignore_rs) {
1647 block = rs->rs_start + rs->rs_requested;
1648 n = n->rb_right;
1649 if (n == NULL)
1650 break;
1651 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1652 }
1653 }
1654
1655 spin_unlock(&rgd->rd_rsspin);
1656 return block;
1657}
1658
1659/**
1660 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1661 * @rbm: The current position in the resource group
1662 * @rs: Our own reservation
1663 * @minext: The minimum extent length
1664 * @maxext: A pointer to the maximum extent structure
1665 *
1666 * This checks the current position in the rgrp to see whether there is
1667 * a reservation covering this block. If not then this function is a
1668 * no-op. If there is, then the position is moved to the end of the
1669 * contiguous reservation(s) so that we are pointing at the first
1670 * non-reserved block.
1671 *
1672 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1673 */
1674
1675static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1676 struct gfs2_blkreserv *rs,
1677 u32 minext,
1678 struct gfs2_extent *maxext)
1679{
1680 u64 block = gfs2_rbm_to_block(rbm);
1681 u32 extlen = 1;
1682 u64 nblock;
1683
1684 /*
1685 * If we have a minimum extent length, then skip over any extent
1686 * which is less than the min extent length in size.
1687 */
1688 if (minext > 1) {
1689 extlen = gfs2_free_extlen(rbm, minext);
1690 if (extlen <= maxext->len)
1691 goto fail;
1692 }
1693
1694 /*
1695 * Check the extent which has been found against the reservations
1696 * and skip if parts of it are already reserved
1697 */
1698 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, rs);
1699 if (nblock == block) {
1700 if (!minext || extlen >= minext)
1701 return 0;
1702
1703 if (extlen > maxext->len) {
1704 maxext->len = extlen;
1705 maxext->rbm = *rbm;
1706 }
1707 } else {
1708 u64 len = nblock - block;
1709 if (len >= (u64)1 << 32)
1710 return -E2BIG;
1711 extlen = len;
1712 }
1713fail:
1714 if (gfs2_rbm_add(rbm, extlen))
1715 return -E2BIG;
1716 return 1;
1717}
1718
1719/**
1720 * gfs2_rbm_find - Look for blocks of a particular state
1721 * @rbm: Value/result starting position and final position
1722 * @state: The state which we want to find
1723 * @minext: Pointer to the requested extent length
1724 * This is updated to be the actual reservation size.
1725 * @rs: Our own reservation (NULL to skip checking for reservations)
1726 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1727 * around until we've reached the starting point.
1728 *
1729 * Side effects:
1730 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1731 * has no free blocks in it.
1732 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1733 * has come up short on a free block search.
1734 *
1735 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1736 */
1737
1738static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1739 struct gfs2_blkreserv *rs, bool nowrap)
1740{
1741 bool scan_from_start = rbm->bii == 0 && rbm->offset == 0;
1742 struct buffer_head *bh;
1743 int last_bii;
1744 u32 offset;
1745 u8 *buffer;
1746 bool wrapped = false;
1747 int ret;
1748 struct gfs2_bitmap *bi;
1749 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1750
1751 /*
1752 * Determine the last bitmap to search. If we're not starting at the
1753 * beginning of a bitmap, we need to search that bitmap twice to scan
1754 * the entire resource group.
1755 */
1756 last_bii = rbm->bii - (rbm->offset == 0);
1757
1758 while(1) {
1759 bi = rbm_bi(rbm);
1760 if (test_bit(GBF_FULL, &bi->bi_flags) &&
1761 (state == GFS2_BLKST_FREE))
1762 goto next_bitmap;
1763
1764 bh = bi->bi_bh;
1765 buffer = bh->b_data + bi->bi_offset;
1766 WARN_ON(!buffer_uptodate(bh));
1767 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1768 buffer = bi->bi_clone + bi->bi_offset;
1769 offset = gfs2_bitfit(buffer, bi->bi_bytes, rbm->offset, state);
1770 if (offset == BFITNOENT) {
1771 if (state == GFS2_BLKST_FREE && rbm->offset == 0)
1772 set_bit(GBF_FULL, &bi->bi_flags);
1773 goto next_bitmap;
1774 }
1775 rbm->offset = offset;
1776 if (!rs || !minext)
1777 return 0;
1778
1779 ret = gfs2_reservation_check_and_update(rbm, rs, *minext,
1780 &maxext);
1781 if (ret == 0)
1782 return 0;
1783 if (ret > 0)
1784 goto next_iter;
1785 if (ret == -E2BIG) {
1786 rbm->bii = 0;
1787 rbm->offset = 0;
1788 goto res_covered_end_of_rgrp;
1789 }
1790 return ret;
1791
1792next_bitmap: /* Find next bitmap in the rgrp */
1793 rbm->offset = 0;
1794 rbm->bii++;
1795 if (rbm->bii == rbm->rgd->rd_length)
1796 rbm->bii = 0;
1797res_covered_end_of_rgrp:
1798 if (rbm->bii == 0) {
1799 if (wrapped)
1800 break;
1801 wrapped = true;
1802 if (nowrap)
1803 break;
1804 }
1805next_iter:
1806 /* Have we scanned the entire resource group? */
1807 if (wrapped && rbm->bii > last_bii)
1808 break;
1809 }
1810
1811 if (state != GFS2_BLKST_FREE)
1812 return -ENOSPC;
1813
1814 /* If the extent was too small, and it's smaller than the smallest
1815 to have failed before, remember for future reference that it's
1816 useless to search this rgrp again for this amount or more. */
1817 if (wrapped && (scan_from_start || rbm->bii > last_bii) &&
1818 *minext < rbm->rgd->rd_extfail_pt)
1819 rbm->rgd->rd_extfail_pt = *minext - 1;
1820
1821 /* If the maximum extent we found is big enough to fulfill the
1822 minimum requirements, use it anyway. */
1823 if (maxext.len) {
1824 *rbm = maxext.rbm;
1825 *minext = maxext.len;
1826 return 0;
1827 }
1828
1829 return -ENOSPC;
1830}
1831
1832/**
1833 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1834 * @rgd: The rgrp
1835 * @last_unlinked: block address of the last dinode we unlinked
1836 * @skip: block address we should explicitly not unlink
1837 *
1838 * Returns: 0 if no error
1839 * The inode, if one has been found, in inode.
1840 */
1841
1842static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1843{
1844 u64 block;
1845 struct gfs2_sbd *sdp = rgd->rd_sbd;
1846 struct gfs2_glock *gl;
1847 struct gfs2_inode *ip;
1848 int error;
1849 int found = 0;
1850 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1851
1852 while (1) {
1853 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1854 true);
1855 if (error == -ENOSPC)
1856 break;
1857 if (WARN_ON_ONCE(error))
1858 break;
1859
1860 block = gfs2_rbm_to_block(&rbm);
1861 if (gfs2_rbm_from_block(&rbm, block + 1))
1862 break;
1863 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1864 continue;
1865 if (block == skip)
1866 continue;
1867 *last_unlinked = block;
1868
1869 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
1870 if (error)
1871 continue;
1872
1873 /* If the inode is already in cache, we can ignore it here
1874 * because the existing inode disposal code will deal with
1875 * it when all refs have gone away. Accessing gl_object like
1876 * this is not safe in general. Here it is ok because we do
1877 * not dereference the pointer, and we only need an approx
1878 * answer to whether it is NULL or not.
1879 */
1880 ip = gl->gl_object;
1881
1882 if (ip || !gfs2_queue_verify_delete(gl, false))
1883 gfs2_glock_put(gl);
1884 else
1885 found++;
1886
1887 /* Limit reclaim to sensible number of tasks */
1888 if (found > NR_CPUS)
1889 return;
1890 }
1891
1892 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1893 return;
1894}
1895
1896/**
1897 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1898 * @rgd: The rgrp in question
1899 * @loops: An indication of how picky we can be (0=very, 1=less so)
1900 *
1901 * This function uses the recently added glock statistics in order to
1902 * figure out whether a parciular resource group is suffering from
1903 * contention from multiple nodes. This is done purely on the basis
1904 * of timings, since this is the only data we have to work with and
1905 * our aim here is to reject a resource group which is highly contended
1906 * but (very important) not to do this too often in order to ensure that
1907 * we do not land up introducing fragmentation by changing resource
1908 * groups when not actually required.
1909 *
1910 * The calculation is fairly simple, we want to know whether the SRTTB
1911 * (i.e. smoothed round trip time for blocking operations) to acquire
1912 * the lock for this rgrp's glock is significantly greater than the
1913 * time taken for resource groups on average. We introduce a margin in
1914 * the form of the variable @var which is computed as the sum of the two
1915 * respective variences, and multiplied by a factor depending on @loops
1916 * and whether we have a lot of data to base the decision on. This is
1917 * then tested against the square difference of the means in order to
1918 * decide whether the result is statistically significant or not.
1919 *
1920 * Returns: A boolean verdict on the congestion status
1921 */
1922
1923static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1924{
1925 const struct gfs2_glock *gl = rgd->rd_gl;
1926 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1927 struct gfs2_lkstats *st;
1928 u64 r_dcount, l_dcount;
1929 u64 l_srttb, a_srttb = 0;
1930 s64 srttb_diff;
1931 u64 sqr_diff;
1932 u64 var;
1933 int cpu, nonzero = 0;
1934
1935 preempt_disable();
1936 for_each_present_cpu(cpu) {
1937 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1938 if (st->stats[GFS2_LKS_SRTTB]) {
1939 a_srttb += st->stats[GFS2_LKS_SRTTB];
1940 nonzero++;
1941 }
1942 }
1943 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1944 if (nonzero)
1945 do_div(a_srttb, nonzero);
1946 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1947 var = st->stats[GFS2_LKS_SRTTVARB] +
1948 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1949 preempt_enable();
1950
1951 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1952 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1953
1954 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1955 return false;
1956
1957 srttb_diff = a_srttb - l_srttb;
1958 sqr_diff = srttb_diff * srttb_diff;
1959
1960 var *= 2;
1961 if (l_dcount < 8 || r_dcount < 8)
1962 var *= 2;
1963 if (loops == 1)
1964 var *= 2;
1965
1966 return ((srttb_diff < 0) && (sqr_diff > var));
1967}
1968
1969/**
1970 * gfs2_rgrp_used_recently - test if an rgrp has been used recently
1971 * @rs: The block reservation with the rgrp to test
1972 * @msecs: The time limit in milliseconds
1973 *
1974 * Returns: True if the rgrp glock has been used within the time limit
1975 */
1976static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1977 u64 msecs)
1978{
1979 u64 tdiff;
1980
1981 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1982 rs->rs_rgd->rd_gl->gl_dstamp));
1983
1984 return tdiff > (msecs * 1000 * 1000);
1985}
1986
1987static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1988{
1989 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1990
1991 return get_random_u32() % sdp->sd_rgrps;
1992}
1993
1994static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1995{
1996 struct gfs2_rgrpd *rgd = *pos;
1997 struct gfs2_sbd *sdp = rgd->rd_sbd;
1998
1999 rgd = gfs2_rgrpd_get_next(rgd);
2000 if (rgd == NULL)
2001 rgd = gfs2_rgrpd_get_first(sdp);
2002 *pos = rgd;
2003 if (rgd != begin) /* If we didn't wrap */
2004 return true;
2005 return false;
2006}
2007
2008/**
2009 * fast_to_acquire - determine if a resource group will be fast to acquire
2010 * @rgd: The rgrp
2011 *
2012 * If this is one of our preferred rgrps, it should be quicker to acquire,
2013 * because we tried to set ourselves up as dlm lock master.
2014 */
2015static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
2016{
2017 struct gfs2_glock *gl = rgd->rd_gl;
2018
2019 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
2020 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
2021 !test_bit(GLF_DEMOTE, &gl->gl_flags))
2022 return 1;
2023 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
2024 return 1;
2025 return 0;
2026}
2027
2028/**
2029 * gfs2_inplace_reserve - Reserve space in the filesystem
2030 * @ip: the inode to reserve space for
2031 * @ap: the allocation parameters
2032 *
2033 * We try our best to find an rgrp that has at least ap->target blocks
2034 * available. After a couple of passes (loops == 2), the prospects of finding
2035 * such an rgrp diminish. At this stage, we return the first rgrp that has
2036 * at least ap->min_target blocks available.
2037 *
2038 * Returns: 0 on success,
2039 * -ENOMEM if a suitable rgrp can't be found
2040 * errno otherwise
2041 */
2042
2043int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
2044{
2045 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2046 struct gfs2_rgrpd *begin = NULL;
2047 struct gfs2_blkreserv *rs = &ip->i_res;
2048 int error = 0, flags = LM_FLAG_NODE_SCOPE;
2049 bool rg_locked;
2050 u64 last_unlinked = NO_BLOCK;
2051 u32 target = ap->target;
2052 int loops = 0;
2053 u32 free_blocks, blocks_available, skip = 0;
2054
2055 BUG_ON(rs->rs_reserved);
2056
2057 if (sdp->sd_args.ar_rgrplvb)
2058 flags |= GL_SKIP;
2059 if (gfs2_assert_warn(sdp, target))
2060 return -EINVAL;
2061 if (gfs2_rs_active(rs)) {
2062 begin = rs->rs_rgd;
2063 } else if (rs->rs_rgd &&
2064 rgrp_contains_block(rs->rs_rgd, ip->i_goal)) {
2065 begin = rs->rs_rgd;
2066 } else {
2067 check_and_update_goal(ip);
2068 rs->rs_rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
2069 }
2070 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
2071 skip = gfs2_orlov_skip(ip);
2072 if (rs->rs_rgd == NULL)
2073 return -EBADSLT;
2074
2075 while (loops < 3) {
2076 struct gfs2_rgrpd *rgd;
2077
2078 rg_locked = gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl);
2079 if (rg_locked) {
2080 rgrp_lock_local(rs->rs_rgd);
2081 } else {
2082 if (skip && skip--)
2083 goto next_rgrp;
2084 if (!gfs2_rs_active(rs)) {
2085 if (loops == 0 &&
2086 !fast_to_acquire(rs->rs_rgd))
2087 goto next_rgrp;
2088 if ((loops < 2) &&
2089 gfs2_rgrp_used_recently(rs, 1000) &&
2090 gfs2_rgrp_congested(rs->rs_rgd, loops))
2091 goto next_rgrp;
2092 }
2093 error = gfs2_glock_nq_init(rs->rs_rgd->rd_gl,
2094 LM_ST_EXCLUSIVE, flags,
2095 &ip->i_rgd_gh);
2096 if (unlikely(error))
2097 return error;
2098 rgrp_lock_local(rs->rs_rgd);
2099 if (!gfs2_rs_active(rs) && (loops < 2) &&
2100 gfs2_rgrp_congested(rs->rs_rgd, loops))
2101 goto skip_rgrp;
2102 if (sdp->sd_args.ar_rgrplvb) {
2103 error = update_rgrp_lvb(rs->rs_rgd,
2104 &ip->i_rgd_gh);
2105 if (unlikely(error)) {
2106 rgrp_unlock_local(rs->rs_rgd);
2107 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
2108 return error;
2109 }
2110 }
2111 }
2112
2113 /* Skip unusable resource groups */
2114 if ((rs->rs_rgd->rd_flags & (GFS2_RGF_NOALLOC |
2115 GFS2_RDF_ERROR)) ||
2116 (loops == 0 && target > rs->rs_rgd->rd_extfail_pt))
2117 goto skip_rgrp;
2118
2119 if (sdp->sd_args.ar_rgrplvb) {
2120 error = gfs2_instantiate(&ip->i_rgd_gh);
2121 if (error)
2122 goto skip_rgrp;
2123 }
2124
2125 /* Get a reservation if we don't already have one */
2126 if (!gfs2_rs_active(rs))
2127 rg_mblk_search(rs->rs_rgd, ip, ap);
2128
2129 /* Skip rgrps when we can't get a reservation on first pass */
2130 if (!gfs2_rs_active(rs) && (loops < 1))
2131 goto check_rgrp;
2132
2133 /* If rgrp has enough free space, use it */
2134 rgd = rs->rs_rgd;
2135 spin_lock(&rgd->rd_rsspin);
2136 free_blocks = rgd_free(rgd, rs);
2137 blocks_available = rgd->rd_free_clone - rgd->rd_reserved;
2138 if (free_blocks < target || blocks_available < target) {
2139 spin_unlock(&rgd->rd_rsspin);
2140 goto check_rgrp;
2141 }
2142 rs->rs_reserved = ap->target;
2143 if (rs->rs_reserved > blocks_available)
2144 rs->rs_reserved = blocks_available;
2145 rgd->rd_reserved += rs->rs_reserved;
2146 spin_unlock(&rgd->rd_rsspin);
2147 rgrp_unlock_local(rs->rs_rgd);
2148 return 0;
2149check_rgrp:
2150 /* Check for unlinked inodes which can be reclaimed */
2151 if (rs->rs_rgd->rd_flags & GFS2_RDF_CHECK)
2152 try_rgrp_unlink(rs->rs_rgd, &last_unlinked,
2153 ip->i_no_addr);
2154skip_rgrp:
2155 rgrp_unlock_local(rs->rs_rgd);
2156
2157 /* Drop reservation, if we couldn't use reserved rgrp */
2158 if (gfs2_rs_active(rs))
2159 gfs2_rs_deltree(rs);
2160
2161 /* Unlock rgrp if required */
2162 if (!rg_locked)
2163 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
2164next_rgrp:
2165 /* Find the next rgrp, and continue looking */
2166 if (gfs2_select_rgrp(&rs->rs_rgd, begin))
2167 continue;
2168 if (skip)
2169 continue;
2170
2171 /* If we've scanned all the rgrps, but found no free blocks
2172 * then this checks for some less likely conditions before
2173 * trying again.
2174 */
2175 loops++;
2176 /* Check that fs hasn't grown if writing to rindex */
2177 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2178 error = gfs2_ri_update(ip);
2179 if (error)
2180 return error;
2181 }
2182 /* Flushing the log may release space */
2183 if (loops == 2) {
2184 if (ap->min_target)
2185 target = ap->min_target;
2186 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
2187 GFS2_LFC_INPLACE_RESERVE);
2188 }
2189 }
2190
2191 return -ENOSPC;
2192}
2193
2194/**
2195 * gfs2_inplace_release - release an inplace reservation
2196 * @ip: the inode the reservation was taken out on
2197 *
2198 * Release a reservation made by gfs2_inplace_reserve().
2199 */
2200
2201void gfs2_inplace_release(struct gfs2_inode *ip)
2202{
2203 struct gfs2_blkreserv *rs = &ip->i_res;
2204
2205 if (rs->rs_reserved) {
2206 struct gfs2_rgrpd *rgd = rs->rs_rgd;
2207
2208 spin_lock(&rgd->rd_rsspin);
2209 GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved < rs->rs_reserved);
2210 rgd->rd_reserved -= rs->rs_reserved;
2211 spin_unlock(&rgd->rd_rsspin);
2212 rs->rs_reserved = 0;
2213 }
2214 if (gfs2_holder_initialized(&ip->i_rgd_gh))
2215 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
2216}
2217
2218/**
2219 * gfs2_alloc_extent - allocate an extent from a given bitmap
2220 * @rbm: the resource group information
2221 * @dinode: TRUE if the first block we allocate is for a dinode
2222 * @n: The extent length (value/result)
2223 *
2224 * Add the bitmap buffer to the transaction.
2225 * Set the found bits to @new_state to change block's allocation state.
2226 */
2227static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2228 unsigned int *n)
2229{
2230 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2231 const unsigned int elen = *n;
2232 u64 block;
2233 int ret;
2234
2235 *n = 1;
2236 block = gfs2_rbm_to_block(rbm);
2237 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2238 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2239 block++;
2240 while (*n < elen) {
2241 ret = gfs2_rbm_from_block(&pos, block);
2242 if (ret || gfs2_testbit(&pos, true) != GFS2_BLKST_FREE)
2243 break;
2244 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2245 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2246 (*n)++;
2247 block++;
2248 }
2249}
2250
2251/**
2252 * rgblk_free - Change alloc state of given block(s)
2253 * @sdp: the filesystem
2254 * @rgd: the resource group the blocks are in
2255 * @bstart: the start of a run of blocks to free
2256 * @blen: the length of the block run (all must lie within ONE RG!)
2257 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2258 */
2259
2260static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
2261 u64 bstart, u32 blen, unsigned char new_state)
2262{
2263 struct gfs2_rbm rbm;
2264 struct gfs2_bitmap *bi, *bi_prev = NULL;
2265
2266 rbm.rgd = rgd;
2267 if (WARN_ON_ONCE(gfs2_rbm_from_block(&rbm, bstart)))
2268 return;
2269 while (blen--) {
2270 bi = rbm_bi(&rbm);
2271 if (bi != bi_prev) {
2272 if (!bi->bi_clone) {
2273 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2274 GFP_NOFS | __GFP_NOFAIL);
2275 memcpy(bi->bi_clone + bi->bi_offset,
2276 bi->bi_bh->b_data + bi->bi_offset,
2277 bi->bi_bytes);
2278 }
2279 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2280 bi_prev = bi;
2281 }
2282 gfs2_setbit(&rbm, false, new_state);
2283 gfs2_rbm_add(&rbm, 1);
2284 }
2285}
2286
2287/**
2288 * gfs2_rgrp_dump - print out an rgrp
2289 * @seq: The iterator
2290 * @rgd: The rgrp in question
2291 * @fs_id_buf: pointer to file system id (if requested)
2292 *
2293 */
2294
2295void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
2296 const char *fs_id_buf)
2297{
2298 struct gfs2_blkreserv *trs;
2299 const struct rb_node *n;
2300
2301 spin_lock(&rgd->rd_rsspin);
2302 gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u q:%u r:%u e:%u\n",
2303 fs_id_buf,
2304 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2305 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2306 rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt);
2307 if (rgd->rd_sbd->sd_args.ar_rgrplvb && rgd->rd_rgl) {
2308 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
2309
2310 gfs2_print_dbg(seq, "%s L: f:%02x b:%u i:%u\n", fs_id_buf,
2311 be32_to_cpu(rgl->rl_flags),
2312 be32_to_cpu(rgl->rl_free),
2313 be32_to_cpu(rgl->rl_dinodes));
2314 }
2315 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2316 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2317 dump_rs(seq, trs, fs_id_buf);
2318 }
2319 spin_unlock(&rgd->rd_rsspin);
2320}
2321
2322static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2323{
2324 struct gfs2_sbd *sdp = rgd->rd_sbd;
2325 char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
2326
2327 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2328 (unsigned long long)rgd->rd_addr);
2329 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2330 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
2331 gfs2_rgrp_dump(NULL, rgd, fs_id_buf);
2332 rgd->rd_flags |= GFS2_RDF_ERROR;
2333}
2334
2335/**
2336 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2337 * @ip: The inode we have just allocated blocks for
2338 * @rbm: The start of the allocated blocks
2339 * @len: The extent length
2340 *
2341 * Adjusts a reservation after an allocation has taken place. If the
2342 * reservation does not match the allocation, or if it is now empty
2343 * then it is removed.
2344 */
2345
2346static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2347 const struct gfs2_rbm *rbm, unsigned len)
2348{
2349 struct gfs2_blkreserv *rs = &ip->i_res;
2350 struct gfs2_rgrpd *rgd = rbm->rgd;
2351
2352 BUG_ON(rs->rs_reserved < len);
2353 rs->rs_reserved -= len;
2354 if (gfs2_rs_active(rs)) {
2355 u64 start = gfs2_rbm_to_block(rbm);
2356
2357 if (rs->rs_start == start) {
2358 unsigned int rlen;
2359
2360 rs->rs_start += len;
2361 rlen = min(rs->rs_requested, len);
2362 rs->rs_requested -= rlen;
2363 rgd->rd_requested -= rlen;
2364 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2365 if (rs->rs_start < rgd->rd_data0 + rgd->rd_data &&
2366 rs->rs_requested)
2367 return;
2368 /* We used up our block reservation, so we should
2369 reserve more blocks next time. */
2370 atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint);
2371 }
2372 __rs_deltree(rs);
2373 }
2374}
2375
2376/**
2377 * gfs2_set_alloc_start - Set starting point for block allocation
2378 * @rbm: The rbm which will be set to the required location
2379 * @ip: The gfs2 inode
2380 * @dinode: Flag to say if allocation includes a new inode
2381 *
2382 * This sets the starting point from the reservation if one is active
2383 * otherwise it falls back to guessing a start point based on the
2384 * inode's goal block or the last allocation point in the rgrp.
2385 */
2386
2387static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2388 const struct gfs2_inode *ip, bool dinode)
2389{
2390 u64 goal;
2391
2392 if (gfs2_rs_active(&ip->i_res)) {
2393 goal = ip->i_res.rs_start;
2394 } else {
2395 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2396 goal = ip->i_goal;
2397 else
2398 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2399 }
2400 if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) {
2401 rbm->bii = 0;
2402 rbm->offset = 0;
2403 }
2404}
2405
2406/**
2407 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2408 * @ip: the inode to allocate the block for
2409 * @bn: Used to return the starting block number
2410 * @nblocks: requested number of blocks/extent length (value/result)
2411 * @dinode: 1 if we're allocating a dinode block, else 0
2412 *
2413 * Returns: 0 or error
2414 */
2415
2416int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2417 bool dinode)
2418{
2419 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2420 struct buffer_head *dibh;
2421 struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rgd, };
2422 u64 block; /* block, within the file system scope */
2423 u32 minext = 1;
2424 int error = -ENOSPC;
2425
2426 BUG_ON(ip->i_res.rs_reserved < *nblocks);
2427
2428 rgrp_lock_local(rbm.rgd);
2429 if (gfs2_rs_active(&ip->i_res)) {
2430 gfs2_set_alloc_start(&rbm, ip, dinode);
2431 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, &ip->i_res, false);
2432 }
2433 if (error == -ENOSPC) {
2434 gfs2_set_alloc_start(&rbm, ip, dinode);
2435 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, NULL, false);
2436 }
2437
2438 /* Since all blocks are reserved in advance, this shouldn't happen */
2439 if (error) {
2440 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2441 (unsigned long long)ip->i_no_addr, error, *nblocks,
2442 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2443 rbm.rgd->rd_extfail_pt);
2444 goto rgrp_error;
2445 }
2446
2447 gfs2_alloc_extent(&rbm, dinode, nblocks);
2448 block = gfs2_rbm_to_block(&rbm);
2449 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2450 if (!dinode) {
2451 ip->i_goal = block + *nblocks - 1;
2452 error = gfs2_meta_inode_buffer(ip, &dibh);
2453 if (error == 0) {
2454 struct gfs2_dinode *di =
2455 (struct gfs2_dinode *)dibh->b_data;
2456 gfs2_trans_add_meta(ip->i_gl, dibh);
2457 di->di_goal_meta = di->di_goal_data =
2458 cpu_to_be64(ip->i_goal);
2459 brelse(dibh);
2460 }
2461 }
2462 spin_lock(&rbm.rgd->rd_rsspin);
2463 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2464 if (rbm.rgd->rd_free < *nblocks || rbm.rgd->rd_reserved < *nblocks) {
2465 fs_warn(sdp, "nblocks=%u\n", *nblocks);
2466 spin_unlock(&rbm.rgd->rd_rsspin);
2467 goto rgrp_error;
2468 }
2469 GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_reserved < *nblocks);
2470 GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_free_clone < *nblocks);
2471 GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_free < *nblocks);
2472 rbm.rgd->rd_reserved -= *nblocks;
2473 rbm.rgd->rd_free_clone -= *nblocks;
2474 rbm.rgd->rd_free -= *nblocks;
2475 spin_unlock(&rbm.rgd->rd_rsspin);
2476 if (dinode) {
2477 u64 generation;
2478
2479 rbm.rgd->rd_dinodes++;
2480 generation = rbm.rgd->rd_igeneration++;
2481 if (generation == 0)
2482 generation = rbm.rgd->rd_igeneration++;
2483 ip->i_generation = generation;
2484 }
2485
2486 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2487 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2488 rgrp_unlock_local(rbm.rgd);
2489
2490 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2491 if (dinode)
2492 gfs2_trans_remove_revoke(sdp, block, *nblocks);
2493
2494 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2495
2496 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2497 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2498 *bn = block;
2499 return 0;
2500
2501rgrp_error:
2502 rgrp_unlock_local(rbm.rgd);
2503 gfs2_rgrp_error(rbm.rgd);
2504 return -EIO;
2505}
2506
2507/**
2508 * __gfs2_free_blocks - free a contiguous run of block(s)
2509 * @ip: the inode these blocks are being freed from
2510 * @rgd: the resource group the blocks are in
2511 * @bstart: first block of a run of contiguous blocks
2512 * @blen: the length of the block run
2513 * @meta: 1 if the blocks represent metadata
2514 *
2515 */
2516
2517void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
2518 u64 bstart, u32 blen, int meta)
2519{
2520 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2521
2522 rgrp_lock_local(rgd);
2523 rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE);
2524 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2525 rgd->rd_free += blen;
2526 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2527 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2528 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2529 rgrp_unlock_local(rgd);
2530
2531 /* Directories keep their data in the metadata address space */
2532 if (meta || ip->i_depth || gfs2_is_jdata(ip))
2533 gfs2_journal_wipe(ip, bstart, blen);
2534}
2535
2536/**
2537 * gfs2_free_meta - free a contiguous run of data block(s)
2538 * @ip: the inode these blocks are being freed from
2539 * @rgd: the resource group the blocks are in
2540 * @bstart: first block of a run of contiguous blocks
2541 * @blen: the length of the block run
2542 *
2543 */
2544
2545void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
2546 u64 bstart, u32 blen)
2547{
2548 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2549
2550 __gfs2_free_blocks(ip, rgd, bstart, blen, 1);
2551 gfs2_statfs_change(sdp, 0, +blen, 0);
2552 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2553}
2554
2555void gfs2_unlink_di(struct inode *inode)
2556{
2557 struct gfs2_inode *ip = GFS2_I(inode);
2558 struct gfs2_sbd *sdp = GFS2_SB(inode);
2559 struct gfs2_rgrpd *rgd;
2560 u64 blkno = ip->i_no_addr;
2561
2562 rgd = gfs2_blk2rgrpd(sdp, blkno, true);
2563 if (!rgd)
2564 return;
2565 rgrp_lock_local(rgd);
2566 rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2567 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2568 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2569 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2570 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1);
2571 rgrp_unlock_local(rgd);
2572}
2573
2574void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2575{
2576 struct gfs2_sbd *sdp = rgd->rd_sbd;
2577
2578 rgrp_lock_local(rgd);
2579 rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2580 if (!rgd->rd_dinodes)
2581 gfs2_consist_rgrpd(rgd);
2582 rgd->rd_dinodes--;
2583 rgd->rd_free++;
2584
2585 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2586 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2587 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
2588 rgrp_unlock_local(rgd);
2589
2590 gfs2_statfs_change(sdp, 0, +1, -1);
2591 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2592 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2593 gfs2_journal_wipe(ip, ip->i_no_addr, 1);
2594}
2595
2596/**
2597 * gfs2_check_blk_type - Check the type of a block
2598 * @sdp: The superblock
2599 * @no_addr: The block number to check
2600 * @type: The block type we are looking for
2601 *
2602 * The inode glock of @no_addr must be held. The @type to check for is either
2603 * GFS2_BLKST_DINODE or GFS2_BLKST_UNLINKED; checking for type GFS2_BLKST_FREE
2604 * or GFS2_BLKST_USED would make no sense.
2605 *
2606 * Returns: 0 if the block type matches the expected type
2607 * -ESTALE if it doesn't match
2608 * or -ve errno if something went wrong while checking
2609 */
2610
2611int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2612{
2613 struct gfs2_rgrpd *rgd;
2614 struct gfs2_holder rgd_gh;
2615 struct gfs2_rbm rbm;
2616 int error = -EINVAL;
2617
2618 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2619 if (!rgd)
2620 goto fail;
2621
2622 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2623 if (error)
2624 goto fail;
2625
2626 rbm.rgd = rgd;
2627 error = gfs2_rbm_from_block(&rbm, no_addr);
2628 if (!WARN_ON_ONCE(error)) {
2629 /*
2630 * No need to take the local resource group lock here; the
2631 * inode glock of @no_addr provides the necessary
2632 * synchronization in case the block is an inode. (In case
2633 * the block is not an inode, the block type will not match
2634 * the @type we are looking for.)
2635 */
2636 if (gfs2_testbit(&rbm, false) != type)
2637 error = -ESTALE;
2638 }
2639
2640 gfs2_glock_dq_uninit(&rgd_gh);
2641
2642fail:
2643 return error;
2644}
2645
2646/**
2647 * gfs2_rlist_add - add a RG to a list of RGs
2648 * @ip: the inode
2649 * @rlist: the list of resource groups
2650 * @block: the block
2651 *
2652 * Figure out what RG a block belongs to and add that RG to the list
2653 *
2654 * FIXME: Don't use NOFAIL
2655 *
2656 */
2657
2658void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2659 u64 block)
2660{
2661 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2662 struct gfs2_rgrpd *rgd;
2663 struct gfs2_rgrpd **tmp;
2664 unsigned int new_space;
2665 unsigned int x;
2666
2667 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2668 return;
2669
2670 /*
2671 * The resource group last accessed is kept in the last position.
2672 */
2673
2674 if (rlist->rl_rgrps) {
2675 rgd = rlist->rl_rgd[rlist->rl_rgrps - 1];
2676 if (rgrp_contains_block(rgd, block))
2677 return;
2678 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2679 } else {
2680 rgd = ip->i_res.rs_rgd;
2681 if (!rgd || !rgrp_contains_block(rgd, block))
2682 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2683 }
2684
2685 if (!rgd) {
2686 fs_err(sdp, "rlist_add: no rgrp for block %llu\n",
2687 (unsigned long long)block);
2688 return;
2689 }
2690
2691 for (x = 0; x < rlist->rl_rgrps; x++) {
2692 if (rlist->rl_rgd[x] == rgd) {
2693 swap(rlist->rl_rgd[x],
2694 rlist->rl_rgd[rlist->rl_rgrps - 1]);
2695 return;
2696 }
2697 }
2698
2699 if (rlist->rl_rgrps == rlist->rl_space) {
2700 new_space = rlist->rl_space + 10;
2701
2702 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2703 GFP_NOFS | __GFP_NOFAIL);
2704
2705 if (rlist->rl_rgd) {
2706 memcpy(tmp, rlist->rl_rgd,
2707 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2708 kfree(rlist->rl_rgd);
2709 }
2710
2711 rlist->rl_space = new_space;
2712 rlist->rl_rgd = tmp;
2713 }
2714
2715 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2716}
2717
2718/**
2719 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2720 * and initialize an array of glock holders for them
2721 * @rlist: the list of resource groups
2722 * @state: the state we're requesting
2723 * @flags: the modifier flags
2724 *
2725 * FIXME: Don't use NOFAIL
2726 *
2727 */
2728
2729void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
2730 unsigned int state, u16 flags)
2731{
2732 unsigned int x;
2733
2734 rlist->rl_ghs = kmalloc_array(rlist->rl_rgrps,
2735 sizeof(struct gfs2_holder),
2736 GFP_NOFS | __GFP_NOFAIL);
2737 for (x = 0; x < rlist->rl_rgrps; x++)
2738 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, state, flags,
2739 &rlist->rl_ghs[x]);
2740}
2741
2742/**
2743 * gfs2_rlist_free - free a resource group list
2744 * @rlist: the list of resource groups
2745 *
2746 */
2747
2748void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2749{
2750 unsigned int x;
2751
2752 kfree(rlist->rl_rgd);
2753
2754 if (rlist->rl_ghs) {
2755 for (x = 0; x < rlist->rl_rgrps; x++)
2756 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2757 kfree(rlist->rl_ghs);
2758 rlist->rl_ghs = NULL;
2759 }
2760}
2761
2762void rgrp_lock_local(struct gfs2_rgrpd *rgd)
2763{
2764 mutex_lock(&rgd->rd_mutex);
2765}
2766
2767void rgrp_unlock_local(struct gfs2_rgrpd *rgd)
2768{
2769 mutex_unlock(&rgd->rd_mutex);
2770}