Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/completion.h>
12#include <linux/buffer_head.h>
13#include <linux/blkdev.h>
14#include <linux/gfs2_ondisk.h>
15#include <linux/crc32.h>
16
17#include "gfs2.h"
18#include "incore.h"
19#include "bmap.h"
20#include "glock.h"
21#include "inode.h"
22#include "meta_io.h"
23#include "quota.h"
24#include "rgrp.h"
25#include "log.h"
26#include "super.h"
27#include "trans.h"
28#include "dir.h"
29#include "util.h"
30#include "trace_gfs2.h"
31
32/* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
34 * keep it small.
35 */
36struct metapath {
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39};
40
41struct strip_mine {
42 int sm_first;
43 unsigned int sm_height;
44};
45
46/**
47 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
48 * @ip: the inode
49 * @dibh: the dinode buffer
50 * @block: the block number that was allocated
51 * @page: The (optional) page. This is looked up if @page is NULL
52 *
53 * Returns: errno
54 */
55
56static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
57 u64 block, struct page *page)
58{
59 struct inode *inode = &ip->i_inode;
60 struct buffer_head *bh;
61 int release = 0;
62
63 if (!page || page->index) {
64 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
65 if (!page)
66 return -ENOMEM;
67 release = 1;
68 }
69
70 if (!PageUptodate(page)) {
71 void *kaddr = kmap(page);
72 u64 dsize = i_size_read(inode);
73
74 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
75 dsize = dibh->b_size - sizeof(struct gfs2_dinode);
76
77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
78 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
79 kunmap(page);
80
81 SetPageUptodate(page);
82 }
83
84 if (!page_has_buffers(page))
85 create_empty_buffers(page, 1 << inode->i_blkbits,
86 (1 << BH_Uptodate));
87
88 bh = page_buffers(page);
89
90 if (!buffer_mapped(bh))
91 map_bh(bh, inode->i_sb, block);
92
93 set_buffer_uptodate(bh);
94 if (!gfs2_is_jdata(ip))
95 mark_buffer_dirty(bh);
96 if (!gfs2_is_writeback(ip))
97 gfs2_trans_add_data(ip->i_gl, bh);
98
99 if (release) {
100 unlock_page(page);
101 put_page(page);
102 }
103
104 return 0;
105}
106
107/**
108 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
109 * @ip: The GFS2 inode to unstuff
110 * @page: The (optional) page. This is looked up if the @page is NULL
111 *
112 * This routine unstuffs a dinode and returns it to a "normal" state such
113 * that the height can be grown in the traditional way.
114 *
115 * Returns: errno
116 */
117
118int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
119{
120 struct buffer_head *bh, *dibh;
121 struct gfs2_dinode *di;
122 u64 block = 0;
123 int isdir = gfs2_is_dir(ip);
124 int error;
125
126 down_write(&ip->i_rw_mutex);
127
128 error = gfs2_meta_inode_buffer(ip, &dibh);
129 if (error)
130 goto out;
131
132 if (i_size_read(&ip->i_inode)) {
133 /* Get a free block, fill it with the stuffed data,
134 and write it out to disk */
135
136 unsigned int n = 1;
137 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
138 if (error)
139 goto out_brelse;
140 if (isdir) {
141 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
142 error = gfs2_dir_get_new_buffer(ip, block, &bh);
143 if (error)
144 goto out_brelse;
145 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
146 dibh, sizeof(struct gfs2_dinode));
147 brelse(bh);
148 } else {
149 error = gfs2_unstuffer_page(ip, dibh, block, page);
150 if (error)
151 goto out_brelse;
152 }
153 }
154
155 /* Set up the pointer to the new block */
156
157 gfs2_trans_add_meta(ip->i_gl, dibh);
158 di = (struct gfs2_dinode *)dibh->b_data;
159 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
160
161 if (i_size_read(&ip->i_inode)) {
162 *(__be64 *)(di + 1) = cpu_to_be64(block);
163 gfs2_add_inode_blocks(&ip->i_inode, 1);
164 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
165 }
166
167 ip->i_height = 1;
168 di->di_height = cpu_to_be16(1);
169
170out_brelse:
171 brelse(dibh);
172out:
173 up_write(&ip->i_rw_mutex);
174 return error;
175}
176
177
178/**
179 * find_metapath - Find path through the metadata tree
180 * @sdp: The superblock
181 * @mp: The metapath to return the result in
182 * @block: The disk block to look up
183 * @height: The pre-calculated height of the metadata tree
184 *
185 * This routine returns a struct metapath structure that defines a path
186 * through the metadata of inode "ip" to get to block "block".
187 *
188 * Example:
189 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
190 * filesystem with a blocksize of 4096.
191 *
192 * find_metapath() would return a struct metapath structure set to:
193 * mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48,
194 * and mp_list[2] = 165.
195 *
196 * That means that in order to get to the block containing the byte at
197 * offset 101342453, we would load the indirect block pointed to by pointer
198 * 0 in the dinode. We would then load the indirect block pointed to by
199 * pointer 48 in that indirect block. We would then load the data block
200 * pointed to by pointer 165 in that indirect block.
201 *
202 * ----------------------------------------
203 * | Dinode | |
204 * | | 4|
205 * | |0 1 2 3 4 5 9|
206 * | | 6|
207 * ----------------------------------------
208 * |
209 * |
210 * V
211 * ----------------------------------------
212 * | Indirect Block |
213 * | 5|
214 * | 4 4 4 4 4 5 5 1|
215 * |0 5 6 7 8 9 0 1 2|
216 * ----------------------------------------
217 * |
218 * |
219 * V
220 * ----------------------------------------
221 * | Indirect Block |
222 * | 1 1 1 1 1 5|
223 * | 6 6 6 6 6 1|
224 * |0 3 4 5 6 7 2|
225 * ----------------------------------------
226 * |
227 * |
228 * V
229 * ----------------------------------------
230 * | Data block containing offset |
231 * | 101342453 |
232 * | |
233 * | |
234 * ----------------------------------------
235 *
236 */
237
238static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 struct metapath *mp, unsigned int height)
240{
241 unsigned int i;
242
243 for (i = height; i--;)
244 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
245
246}
247
248static inline unsigned int metapath_branch_start(const struct metapath *mp)
249{
250 if (mp->mp_list[0] == 0)
251 return 2;
252 return 1;
253}
254
255/**
256 * metapointer - Return pointer to start of metadata in a buffer
257 * @height: The metadata height (0 = dinode)
258 * @mp: The metapath
259 *
260 * Return a pointer to the block number of the next height of the metadata
261 * tree given a buffer containing the pointer to the current height of the
262 * metadata tree.
263 */
264
265static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
266{
267 struct buffer_head *bh = mp->mp_bh[height];
268 unsigned int head_size = (height > 0) ?
269 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
270 return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
271}
272
273static void gfs2_metapath_ra(struct gfs2_glock *gl,
274 const struct buffer_head *bh, const __be64 *pos)
275{
276 struct buffer_head *rabh;
277 const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
278 const __be64 *t;
279
280 for (t = pos; t < endp; t++) {
281 if (!*t)
282 continue;
283
284 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
285 if (trylock_buffer(rabh)) {
286 if (!buffer_uptodate(rabh)) {
287 rabh->b_end_io = end_buffer_read_sync;
288 submit_bh(READA | REQ_META, rabh);
289 continue;
290 }
291 unlock_buffer(rabh);
292 }
293 brelse(rabh);
294 }
295}
296
297/**
298 * lookup_metapath - Walk the metadata tree to a specific point
299 * @ip: The inode
300 * @mp: The metapath
301 *
302 * Assumes that the inode's buffer has already been looked up and
303 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
304 * by find_metapath().
305 *
306 * If this function encounters part of the tree which has not been
307 * allocated, it returns the current height of the tree at the point
308 * at which it found the unallocated block. Blocks which are found are
309 * added to the mp->mp_bh[] list.
310 *
311 * Returns: error or height of metadata tree
312 */
313
314static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
315{
316 unsigned int end_of_metadata = ip->i_height - 1;
317 unsigned int x;
318 __be64 *ptr;
319 u64 dblock;
320 int ret;
321
322 for (x = 0; x < end_of_metadata; x++) {
323 ptr = metapointer(x, mp);
324 dblock = be64_to_cpu(*ptr);
325 if (!dblock)
326 return x + 1;
327
328 ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]);
329 if (ret)
330 return ret;
331 }
332
333 return ip->i_height;
334}
335
336static inline void release_metapath(struct metapath *mp)
337{
338 int i;
339
340 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
341 if (mp->mp_bh[i] == NULL)
342 break;
343 brelse(mp->mp_bh[i]);
344 }
345}
346
347/**
348 * gfs2_extent_length - Returns length of an extent of blocks
349 * @start: Start of the buffer
350 * @len: Length of the buffer in bytes
351 * @ptr: Current position in the buffer
352 * @limit: Max extent length to return (0 = unlimited)
353 * @eob: Set to 1 if we hit "end of block"
354 *
355 * If the first block is zero (unallocated) it will return the number of
356 * unallocated blocks in the extent, otherwise it will return the number
357 * of contiguous blocks in the extent.
358 *
359 * Returns: The length of the extent (minimum of one block)
360 */
361
362static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob)
363{
364 const __be64 *end = (start + len);
365 const __be64 *first = ptr;
366 u64 d = be64_to_cpu(*ptr);
367
368 *eob = 0;
369 do {
370 ptr++;
371 if (ptr >= end)
372 break;
373 if (limit && --limit == 0)
374 break;
375 if (d)
376 d++;
377 } while(be64_to_cpu(*ptr) == d);
378 if (ptr >= end)
379 *eob = 1;
380 return (ptr - first);
381}
382
383static inline void bmap_lock(struct gfs2_inode *ip, int create)
384{
385 if (create)
386 down_write(&ip->i_rw_mutex);
387 else
388 down_read(&ip->i_rw_mutex);
389}
390
391static inline void bmap_unlock(struct gfs2_inode *ip, int create)
392{
393 if (create)
394 up_write(&ip->i_rw_mutex);
395 else
396 up_read(&ip->i_rw_mutex);
397}
398
399static inline __be64 *gfs2_indirect_init(struct metapath *mp,
400 struct gfs2_glock *gl, unsigned int i,
401 unsigned offset, u64 bn)
402{
403 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
404 ((i > 1) ? sizeof(struct gfs2_meta_header) :
405 sizeof(struct gfs2_dinode)));
406 BUG_ON(i < 1);
407 BUG_ON(mp->mp_bh[i] != NULL);
408 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
409 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
410 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
411 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
412 ptr += offset;
413 *ptr = cpu_to_be64(bn);
414 return ptr;
415}
416
417enum alloc_state {
418 ALLOC_DATA = 0,
419 ALLOC_GROW_DEPTH = 1,
420 ALLOC_GROW_HEIGHT = 2,
421 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
422};
423
424/**
425 * gfs2_bmap_alloc - Build a metadata tree of the requested height
426 * @inode: The GFS2 inode
427 * @lblock: The logical starting block of the extent
428 * @bh_map: This is used to return the mapping details
429 * @mp: The metapath
430 * @sheight: The starting height (i.e. whats already mapped)
431 * @height: The height to build to
432 * @maxlen: The max number of data blocks to alloc
433 *
434 * In this routine we may have to alloc:
435 * i) Indirect blocks to grow the metadata tree height
436 * ii) Indirect blocks to fill in lower part of the metadata tree
437 * iii) Data blocks
438 *
439 * The function is in two parts. The first part works out the total
440 * number of blocks which we need. The second part does the actual
441 * allocation asking for an extent at a time (if enough contiguous free
442 * blocks are available, there will only be one request per bmap call)
443 * and uses the state machine to initialise the blocks in order.
444 *
445 * Returns: errno on error
446 */
447
448static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
449 struct buffer_head *bh_map, struct metapath *mp,
450 const unsigned int sheight,
451 const unsigned int height,
452 const size_t maxlen)
453{
454 struct gfs2_inode *ip = GFS2_I(inode);
455 struct gfs2_sbd *sdp = GFS2_SB(inode);
456 struct super_block *sb = sdp->sd_vfs;
457 struct buffer_head *dibh = mp->mp_bh[0];
458 u64 bn, dblock = 0;
459 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
460 unsigned dblks = 0;
461 unsigned ptrs_per_blk;
462 const unsigned end_of_metadata = height - 1;
463 int ret;
464 int eob = 0;
465 enum alloc_state state;
466 __be64 *ptr;
467 __be64 zero_bn = 0;
468
469 BUG_ON(sheight < 1);
470 BUG_ON(dibh == NULL);
471
472 gfs2_trans_add_meta(ip->i_gl, dibh);
473
474 if (height == sheight) {
475 struct buffer_head *bh;
476 /* Bottom indirect block exists, find unalloced extent size */
477 ptr = metapointer(end_of_metadata, mp);
478 bh = mp->mp_bh[end_of_metadata];
479 dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
480 &eob);
481 BUG_ON(dblks < 1);
482 state = ALLOC_DATA;
483 } else {
484 /* Need to allocate indirect blocks */
485 ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
486 dblks = min(maxlen, (size_t)(ptrs_per_blk -
487 mp->mp_list[end_of_metadata]));
488 if (height == ip->i_height) {
489 /* Writing into existing tree, extend tree down */
490 iblks = height - sheight;
491 state = ALLOC_GROW_DEPTH;
492 } else {
493 /* Building up tree height */
494 state = ALLOC_GROW_HEIGHT;
495 iblks = height - ip->i_height;
496 branch_start = metapath_branch_start(mp);
497 iblks += (height - branch_start);
498 }
499 }
500
501 /* start of the second part of the function (state machine) */
502
503 blks = dblks + iblks;
504 i = sheight;
505 do {
506 int error;
507 n = blks - alloced;
508 error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
509 if (error)
510 return error;
511 alloced += n;
512 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
513 gfs2_trans_add_unrevoke(sdp, bn, n);
514 switch (state) {
515 /* Growing height of tree */
516 case ALLOC_GROW_HEIGHT:
517 if (i == 1) {
518 ptr = (__be64 *)(dibh->b_data +
519 sizeof(struct gfs2_dinode));
520 zero_bn = *ptr;
521 }
522 for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
523 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
524 if (i - 1 == height - ip->i_height) {
525 i--;
526 gfs2_buffer_copy_tail(mp->mp_bh[i],
527 sizeof(struct gfs2_meta_header),
528 dibh, sizeof(struct gfs2_dinode));
529 gfs2_buffer_clear_tail(dibh,
530 sizeof(struct gfs2_dinode) +
531 sizeof(__be64));
532 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
533 sizeof(struct gfs2_meta_header));
534 *ptr = zero_bn;
535 state = ALLOC_GROW_DEPTH;
536 for(i = branch_start; i < height; i++) {
537 if (mp->mp_bh[i] == NULL)
538 break;
539 brelse(mp->mp_bh[i]);
540 mp->mp_bh[i] = NULL;
541 }
542 i = branch_start;
543 }
544 if (n == 0)
545 break;
546 /* Branching from existing tree */
547 case ALLOC_GROW_DEPTH:
548 if (i > 1 && i < height)
549 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
550 for (; i < height && n > 0; i++, n--)
551 gfs2_indirect_init(mp, ip->i_gl, i,
552 mp->mp_list[i-1], bn++);
553 if (i == height)
554 state = ALLOC_DATA;
555 if (n == 0)
556 break;
557 /* Tree complete, adding data blocks */
558 case ALLOC_DATA:
559 BUG_ON(n > dblks);
560 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
561 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
562 dblks = n;
563 ptr = metapointer(end_of_metadata, mp);
564 dblock = bn;
565 while (n-- > 0)
566 *ptr++ = cpu_to_be64(bn++);
567 if (buffer_zeronew(bh_map)) {
568 ret = sb_issue_zeroout(sb, dblock, dblks,
569 GFP_NOFS);
570 if (ret) {
571 fs_err(sdp,
572 "Failed to zero data buffers\n");
573 clear_buffer_zeronew(bh_map);
574 }
575 }
576 break;
577 }
578 } while ((state != ALLOC_DATA) || !dblock);
579
580 ip->i_height = height;
581 gfs2_add_inode_blocks(&ip->i_inode, alloced);
582 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
583 map_bh(bh_map, inode->i_sb, dblock);
584 bh_map->b_size = dblks << inode->i_blkbits;
585 set_buffer_new(bh_map);
586 return 0;
587}
588
589/**
590 * gfs2_block_map - Map a block from an inode to a disk block
591 * @inode: The inode
592 * @lblock: The logical block number
593 * @bh_map: The bh to be mapped
594 * @create: True if its ok to alloc blocks to satify the request
595 *
596 * Sets buffer_mapped() if successful, sets buffer_boundary() if a
597 * read of metadata will be required before the next block can be
598 * mapped. Sets buffer_new() if new blocks were allocated.
599 *
600 * Returns: errno
601 */
602
603int gfs2_block_map(struct inode *inode, sector_t lblock,
604 struct buffer_head *bh_map, int create)
605{
606 struct gfs2_inode *ip = GFS2_I(inode);
607 struct gfs2_sbd *sdp = GFS2_SB(inode);
608 unsigned int bsize = sdp->sd_sb.sb_bsize;
609 const size_t maxlen = bh_map->b_size >> inode->i_blkbits;
610 const u64 *arr = sdp->sd_heightsize;
611 __be64 *ptr;
612 u64 size;
613 struct metapath mp;
614 int ret;
615 int eob;
616 unsigned int len;
617 struct buffer_head *bh;
618 u8 height;
619
620 BUG_ON(maxlen == 0);
621
622 memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
623 bmap_lock(ip, create);
624 clear_buffer_mapped(bh_map);
625 clear_buffer_new(bh_map);
626 clear_buffer_boundary(bh_map);
627 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
628 if (gfs2_is_dir(ip)) {
629 bsize = sdp->sd_jbsize;
630 arr = sdp->sd_jheightsize;
631 }
632
633 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
634 if (ret)
635 goto out;
636
637 height = ip->i_height;
638 size = (lblock + 1) * bsize;
639 while (size > arr[height])
640 height++;
641 find_metapath(sdp, lblock, &mp, height);
642 ret = 1;
643 if (height > ip->i_height || gfs2_is_stuffed(ip))
644 goto do_alloc;
645 ret = lookup_metapath(ip, &mp);
646 if (ret < 0)
647 goto out;
648 if (ret != ip->i_height)
649 goto do_alloc;
650 ptr = metapointer(ip->i_height - 1, &mp);
651 if (*ptr == 0)
652 goto do_alloc;
653 map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
654 bh = mp.mp_bh[ip->i_height - 1];
655 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
656 bh_map->b_size = (len << inode->i_blkbits);
657 if (eob)
658 set_buffer_boundary(bh_map);
659 ret = 0;
660out:
661 release_metapath(&mp);
662 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
663 bmap_unlock(ip, create);
664 return ret;
665
666do_alloc:
667 /* All allocations are done here, firstly check create flag */
668 if (!create) {
669 BUG_ON(gfs2_is_stuffed(ip));
670 ret = 0;
671 goto out;
672 }
673
674 /* At this point ret is the tree depth of already allocated blocks */
675 ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
676 goto out;
677}
678
679/*
680 * Deprecated: do not use in new code
681 */
682int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
683{
684 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
685 int ret;
686 int create = *new;
687
688 BUG_ON(!extlen);
689 BUG_ON(!dblock);
690 BUG_ON(!new);
691
692 bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
693 ret = gfs2_block_map(inode, lblock, &bh, create);
694 *extlen = bh.b_size >> inode->i_blkbits;
695 *dblock = bh.b_blocknr;
696 if (buffer_new(&bh))
697 *new = 1;
698 else
699 *new = 0;
700 return ret;
701}
702
703/**
704 * do_strip - Look for a layer a particular layer of the file and strip it off
705 * @ip: the inode
706 * @dibh: the dinode buffer
707 * @bh: A buffer of pointers
708 * @top: The first pointer in the buffer
709 * @bottom: One more than the last pointer
710 * @height: the height this buffer is at
711 * @sm: a pointer to a struct strip_mine
712 *
713 * Returns: errno
714 */
715
716static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
717 struct buffer_head *bh, __be64 *top, __be64 *bottom,
718 unsigned int height, struct strip_mine *sm)
719{
720 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
721 struct gfs2_rgrp_list rlist;
722 u64 bn, bstart;
723 u32 blen, btotal;
724 __be64 *p;
725 unsigned int rg_blocks = 0;
726 int metadata;
727 unsigned int revokes = 0;
728 int x;
729 int error;
730
731 error = gfs2_rindex_update(sdp);
732 if (error)
733 return error;
734
735 if (!*top)
736 sm->sm_first = 0;
737
738 if (height != sm->sm_height)
739 return 0;
740
741 if (sm->sm_first) {
742 top++;
743 sm->sm_first = 0;
744 }
745
746 metadata = (height != ip->i_height - 1);
747 if (metadata)
748 revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
749 else if (ip->i_depth)
750 revokes = sdp->sd_inptrs;
751
752 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
753 bstart = 0;
754 blen = 0;
755
756 for (p = top; p < bottom; p++) {
757 if (!*p)
758 continue;
759
760 bn = be64_to_cpu(*p);
761
762 if (bstart + blen == bn)
763 blen++;
764 else {
765 if (bstart)
766 gfs2_rlist_add(ip, &rlist, bstart);
767
768 bstart = bn;
769 blen = 1;
770 }
771 }
772
773 if (bstart)
774 gfs2_rlist_add(ip, &rlist, bstart);
775 else
776 goto out; /* Nothing to do */
777
778 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
779
780 for (x = 0; x < rlist.rl_rgrps; x++) {
781 struct gfs2_rgrpd *rgd;
782 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
783 rg_blocks += rgd->rd_length;
784 }
785
786 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
787 if (error)
788 goto out_rlist;
789
790 if (gfs2_rs_active(&ip->i_res)) /* needs to be done with the rgrp glock held */
791 gfs2_rs_deltree(&ip->i_res);
792
793 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
794 RES_INDIRECT + RES_STATFS + RES_QUOTA,
795 revokes);
796 if (error)
797 goto out_rg_gunlock;
798
799 down_write(&ip->i_rw_mutex);
800
801 gfs2_trans_add_meta(ip->i_gl, dibh);
802 gfs2_trans_add_meta(ip->i_gl, bh);
803
804 bstart = 0;
805 blen = 0;
806 btotal = 0;
807
808 for (p = top; p < bottom; p++) {
809 if (!*p)
810 continue;
811
812 bn = be64_to_cpu(*p);
813
814 if (bstart + blen == bn)
815 blen++;
816 else {
817 if (bstart) {
818 __gfs2_free_blocks(ip, bstart, blen, metadata);
819 btotal += blen;
820 }
821
822 bstart = bn;
823 blen = 1;
824 }
825
826 *p = 0;
827 gfs2_add_inode_blocks(&ip->i_inode, -1);
828 }
829 if (bstart) {
830 __gfs2_free_blocks(ip, bstart, blen, metadata);
831 btotal += blen;
832 }
833
834 gfs2_statfs_change(sdp, 0, +btotal, 0);
835 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
836 ip->i_inode.i_gid);
837
838 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
839
840 gfs2_dinode_out(ip, dibh->b_data);
841
842 up_write(&ip->i_rw_mutex);
843
844 gfs2_trans_end(sdp);
845
846out_rg_gunlock:
847 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
848out_rlist:
849 gfs2_rlist_free(&rlist);
850out:
851 return error;
852}
853
854/**
855 * recursive_scan - recursively scan through the end of a file
856 * @ip: the inode
857 * @dibh: the dinode buffer
858 * @mp: the path through the metadata to the point to start
859 * @height: the height the recursion is at
860 * @block: the indirect block to look at
861 * @first: 1 if this is the first block
862 * @sm: data opaque to this function to pass to @bc
863 *
864 * When this is first called @height and @block should be zero and
865 * @first should be 1.
866 *
867 * Returns: errno
868 */
869
870static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
871 struct metapath *mp, unsigned int height,
872 u64 block, int first, struct strip_mine *sm)
873{
874 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
875 struct buffer_head *bh = NULL;
876 __be64 *top, *bottom;
877 u64 bn;
878 int error;
879 int mh_size = sizeof(struct gfs2_meta_header);
880
881 if (!height) {
882 error = gfs2_meta_inode_buffer(ip, &bh);
883 if (error)
884 return error;
885 dibh = bh;
886
887 top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
888 bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
889 } else {
890 error = gfs2_meta_indirect_buffer(ip, height, block, &bh);
891 if (error)
892 return error;
893
894 top = (__be64 *)(bh->b_data + mh_size) +
895 (first ? mp->mp_list[height] : 0);
896
897 bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
898 }
899
900 error = do_strip(ip, dibh, bh, top, bottom, height, sm);
901 if (error)
902 goto out;
903
904 if (height < ip->i_height - 1) {
905
906 gfs2_metapath_ra(ip->i_gl, bh, top);
907
908 for (; top < bottom; top++, first = 0) {
909 if (!*top)
910 continue;
911
912 bn = be64_to_cpu(*top);
913
914 error = recursive_scan(ip, dibh, mp, height + 1, bn,
915 first, sm);
916 if (error)
917 break;
918 }
919 }
920out:
921 brelse(bh);
922 return error;
923}
924
925
926/**
927 * gfs2_block_truncate_page - Deal with zeroing out data for truncate
928 *
929 * This is partly borrowed from ext3.
930 */
931static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
932{
933 struct inode *inode = mapping->host;
934 struct gfs2_inode *ip = GFS2_I(inode);
935 unsigned long index = from >> PAGE_SHIFT;
936 unsigned offset = from & (PAGE_SIZE-1);
937 unsigned blocksize, iblock, length, pos;
938 struct buffer_head *bh;
939 struct page *page;
940 int err;
941
942 page = find_or_create_page(mapping, index, GFP_NOFS);
943 if (!page)
944 return 0;
945
946 blocksize = inode->i_sb->s_blocksize;
947 length = blocksize - (offset & (blocksize - 1));
948 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
949
950 if (!page_has_buffers(page))
951 create_empty_buffers(page, blocksize, 0);
952
953 /* Find the buffer that contains "offset" */
954 bh = page_buffers(page);
955 pos = blocksize;
956 while (offset >= pos) {
957 bh = bh->b_this_page;
958 iblock++;
959 pos += blocksize;
960 }
961
962 err = 0;
963
964 if (!buffer_mapped(bh)) {
965 gfs2_block_map(inode, iblock, bh, 0);
966 /* unmapped? It's a hole - nothing to do */
967 if (!buffer_mapped(bh))
968 goto unlock;
969 }
970
971 /* Ok, it's mapped. Make sure it's up-to-date */
972 if (PageUptodate(page))
973 set_buffer_uptodate(bh);
974
975 if (!buffer_uptodate(bh)) {
976 err = -EIO;
977 ll_rw_block(READ, 1, &bh);
978 wait_on_buffer(bh);
979 /* Uhhuh. Read error. Complain and punt. */
980 if (!buffer_uptodate(bh))
981 goto unlock;
982 err = 0;
983 }
984
985 if (!gfs2_is_writeback(ip))
986 gfs2_trans_add_data(ip->i_gl, bh);
987
988 zero_user(page, offset, length);
989 mark_buffer_dirty(bh);
990unlock:
991 unlock_page(page);
992 put_page(page);
993 return err;
994}
995
996#define GFS2_JTRUNC_REVOKES 8192
997
998/**
999 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1000 * @inode: The inode being truncated
1001 * @oldsize: The original (larger) size
1002 * @newsize: The new smaller size
1003 *
1004 * With jdata files, we have to journal a revoke for each block which is
1005 * truncated. As a result, we need to split this into separate transactions
1006 * if the number of pages being truncated gets too large.
1007 */
1008
1009static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1010{
1011 struct gfs2_sbd *sdp = GFS2_SB(inode);
1012 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1013 u64 chunk;
1014 int error;
1015
1016 while (oldsize != newsize) {
1017 chunk = oldsize - newsize;
1018 if (chunk > max_chunk)
1019 chunk = max_chunk;
1020 truncate_pagecache(inode, oldsize - chunk);
1021 oldsize -= chunk;
1022 gfs2_trans_end(sdp);
1023 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1024 if (error)
1025 return error;
1026 }
1027
1028 return 0;
1029}
1030
1031static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
1032{
1033 struct gfs2_inode *ip = GFS2_I(inode);
1034 struct gfs2_sbd *sdp = GFS2_SB(inode);
1035 struct address_space *mapping = inode->i_mapping;
1036 struct buffer_head *dibh;
1037 int journaled = gfs2_is_jdata(ip);
1038 int error;
1039
1040 if (journaled)
1041 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1042 else
1043 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1044 if (error)
1045 return error;
1046
1047 error = gfs2_meta_inode_buffer(ip, &dibh);
1048 if (error)
1049 goto out;
1050
1051 gfs2_trans_add_meta(ip->i_gl, dibh);
1052
1053 if (gfs2_is_stuffed(ip)) {
1054 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1055 } else {
1056 if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
1057 error = gfs2_block_truncate_page(mapping, newsize);
1058 if (error)
1059 goto out_brelse;
1060 }
1061 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1062 }
1063
1064 i_size_write(inode, newsize);
1065 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1066 gfs2_dinode_out(ip, dibh->b_data);
1067
1068 if (journaled)
1069 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1070 else
1071 truncate_pagecache(inode, newsize);
1072
1073 if (error) {
1074 brelse(dibh);
1075 return error;
1076 }
1077
1078out_brelse:
1079 brelse(dibh);
1080out:
1081 gfs2_trans_end(sdp);
1082 return error;
1083}
1084
1085static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
1086{
1087 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1088 unsigned int height = ip->i_height;
1089 u64 lblock;
1090 struct metapath mp;
1091 int error;
1092
1093 if (!size)
1094 lblock = 0;
1095 else
1096 lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
1097
1098 find_metapath(sdp, lblock, &mp, ip->i_height);
1099 error = gfs2_rindex_update(sdp);
1100 if (error)
1101 return error;
1102
1103 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1104 if (error)
1105 return error;
1106
1107 while (height--) {
1108 struct strip_mine sm;
1109 sm.sm_first = !!size;
1110 sm.sm_height = height;
1111
1112 error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm);
1113 if (error)
1114 break;
1115 }
1116
1117 gfs2_quota_unhold(ip);
1118
1119 return error;
1120}
1121
1122static int trunc_end(struct gfs2_inode *ip)
1123{
1124 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1125 struct buffer_head *dibh;
1126 int error;
1127
1128 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1129 if (error)
1130 return error;
1131
1132 down_write(&ip->i_rw_mutex);
1133
1134 error = gfs2_meta_inode_buffer(ip, &dibh);
1135 if (error)
1136 goto out;
1137
1138 if (!i_size_read(&ip->i_inode)) {
1139 ip->i_height = 0;
1140 ip->i_goal = ip->i_no_addr;
1141 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1142 gfs2_ordered_del_inode(ip);
1143 }
1144 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1145 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1146
1147 gfs2_trans_add_meta(ip->i_gl, dibh);
1148 gfs2_dinode_out(ip, dibh->b_data);
1149 brelse(dibh);
1150
1151out:
1152 up_write(&ip->i_rw_mutex);
1153 gfs2_trans_end(sdp);
1154 return error;
1155}
1156
1157/**
1158 * do_shrink - make a file smaller
1159 * @inode: the inode
1160 * @oldsize: the current inode size
1161 * @newsize: the size to make the file
1162 *
1163 * Called with an exclusive lock on @inode. The @size must
1164 * be equal to or smaller than the current inode size.
1165 *
1166 * Returns: errno
1167 */
1168
1169static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
1170{
1171 struct gfs2_inode *ip = GFS2_I(inode);
1172 int error;
1173
1174 error = trunc_start(inode, oldsize, newsize);
1175 if (error < 0)
1176 return error;
1177 if (gfs2_is_stuffed(ip))
1178 return 0;
1179
1180 error = trunc_dealloc(ip, newsize);
1181 if (error == 0)
1182 error = trunc_end(ip);
1183
1184 return error;
1185}
1186
1187void gfs2_trim_blocks(struct inode *inode)
1188{
1189 u64 size = inode->i_size;
1190 int ret;
1191
1192 ret = do_shrink(inode, size, size);
1193 WARN_ON(ret != 0);
1194}
1195
1196/**
1197 * do_grow - Touch and update inode size
1198 * @inode: The inode
1199 * @size: The new size
1200 *
1201 * This function updates the timestamps on the inode and
1202 * may also increase the size of the inode. This function
1203 * must not be called with @size any smaller than the current
1204 * inode size.
1205 *
1206 * Although it is not strictly required to unstuff files here,
1207 * earlier versions of GFS2 have a bug in the stuffed file reading
1208 * code which will result in a buffer overrun if the size is larger
1209 * than the max stuffed file size. In order to prevent this from
1210 * occurring, such files are unstuffed, but in other cases we can
1211 * just update the inode size directly.
1212 *
1213 * Returns: 0 on success, or -ve on error
1214 */
1215
1216static int do_grow(struct inode *inode, u64 size)
1217{
1218 struct gfs2_inode *ip = GFS2_I(inode);
1219 struct gfs2_sbd *sdp = GFS2_SB(inode);
1220 struct gfs2_alloc_parms ap = { .target = 1, };
1221 struct buffer_head *dibh;
1222 int error;
1223 int unstuff = 0;
1224
1225 if (gfs2_is_stuffed(ip) &&
1226 (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
1227 error = gfs2_quota_lock_check(ip, &ap);
1228 if (error)
1229 return error;
1230
1231 error = gfs2_inplace_reserve(ip, &ap);
1232 if (error)
1233 goto do_grow_qunlock;
1234 unstuff = 1;
1235 }
1236
1237 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
1238 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
1239 0 : RES_QUOTA), 0);
1240 if (error)
1241 goto do_grow_release;
1242
1243 if (unstuff) {
1244 error = gfs2_unstuff_dinode(ip, NULL);
1245 if (error)
1246 goto do_end_trans;
1247 }
1248
1249 error = gfs2_meta_inode_buffer(ip, &dibh);
1250 if (error)
1251 goto do_end_trans;
1252
1253 i_size_write(inode, size);
1254 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1255 gfs2_trans_add_meta(ip->i_gl, dibh);
1256 gfs2_dinode_out(ip, dibh->b_data);
1257 brelse(dibh);
1258
1259do_end_trans:
1260 gfs2_trans_end(sdp);
1261do_grow_release:
1262 if (unstuff) {
1263 gfs2_inplace_release(ip);
1264do_grow_qunlock:
1265 gfs2_quota_unlock(ip);
1266 }
1267 return error;
1268}
1269
1270/**
1271 * gfs2_setattr_size - make a file a given size
1272 * @inode: the inode
1273 * @newsize: the size to make the file
1274 *
1275 * The file size can grow, shrink, or stay the same size. This
1276 * is called holding i_mutex and an exclusive glock on the inode
1277 * in question.
1278 *
1279 * Returns: errno
1280 */
1281
1282int gfs2_setattr_size(struct inode *inode, u64 newsize)
1283{
1284 struct gfs2_inode *ip = GFS2_I(inode);
1285 int ret;
1286 u64 oldsize;
1287
1288 BUG_ON(!S_ISREG(inode->i_mode));
1289
1290 ret = inode_newsize_ok(inode, newsize);
1291 if (ret)
1292 return ret;
1293
1294 inode_dio_wait(inode);
1295
1296 ret = gfs2_rsqa_alloc(ip);
1297 if (ret)
1298 goto out;
1299
1300 oldsize = inode->i_size;
1301 if (newsize >= oldsize) {
1302 ret = do_grow(inode, newsize);
1303 goto out;
1304 }
1305
1306 ret = do_shrink(inode, oldsize, newsize);
1307out:
1308 gfs2_rsqa_delete(ip, NULL);
1309 return ret;
1310}
1311
1312int gfs2_truncatei_resume(struct gfs2_inode *ip)
1313{
1314 int error;
1315 error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
1316 if (!error)
1317 error = trunc_end(ip);
1318 return error;
1319}
1320
1321int gfs2_file_dealloc(struct gfs2_inode *ip)
1322{
1323 return trunc_dealloc(ip, 0);
1324}
1325
1326/**
1327 * gfs2_free_journal_extents - Free cached journal bmap info
1328 * @jd: The journal
1329 *
1330 */
1331
1332void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
1333{
1334 struct gfs2_journal_extent *jext;
1335
1336 while(!list_empty(&jd->extent_list)) {
1337 jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
1338 list_del(&jext->list);
1339 kfree(jext);
1340 }
1341}
1342
1343/**
1344 * gfs2_add_jextent - Add or merge a new extent to extent cache
1345 * @jd: The journal descriptor
1346 * @lblock: The logical block at start of new extent
1347 * @dblock: The physical block at start of new extent
1348 * @blocks: Size of extent in fs blocks
1349 *
1350 * Returns: 0 on success or -ENOMEM
1351 */
1352
1353static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
1354{
1355 struct gfs2_journal_extent *jext;
1356
1357 if (!list_empty(&jd->extent_list)) {
1358 jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
1359 if ((jext->dblock + jext->blocks) == dblock) {
1360 jext->blocks += blocks;
1361 return 0;
1362 }
1363 }
1364
1365 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
1366 if (jext == NULL)
1367 return -ENOMEM;
1368 jext->dblock = dblock;
1369 jext->lblock = lblock;
1370 jext->blocks = blocks;
1371 list_add_tail(&jext->list, &jd->extent_list);
1372 jd->nr_extents++;
1373 return 0;
1374}
1375
1376/**
1377 * gfs2_map_journal_extents - Cache journal bmap info
1378 * @sdp: The super block
1379 * @jd: The journal to map
1380 *
1381 * Create a reusable "extent" mapping from all logical
1382 * blocks to all physical blocks for the given journal. This will save
1383 * us time when writing journal blocks. Most journals will have only one
1384 * extent that maps all their logical blocks. That's because gfs2.mkfs
1385 * arranges the journal blocks sequentially to maximize performance.
1386 * So the extent would map the first block for the entire file length.
1387 * However, gfs2_jadd can happen while file activity is happening, so
1388 * those journals may not be sequential. Less likely is the case where
1389 * the users created their own journals by mounting the metafs and
1390 * laying it out. But it's still possible. These journals might have
1391 * several extents.
1392 *
1393 * Returns: 0 on success, or error on failure
1394 */
1395
1396int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
1397{
1398 u64 lblock = 0;
1399 u64 lblock_stop;
1400 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1401 struct buffer_head bh;
1402 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
1403 u64 size;
1404 int rc;
1405
1406 lblock_stop = i_size_read(jd->jd_inode) >> shift;
1407 size = (lblock_stop - lblock) << shift;
1408 jd->nr_extents = 0;
1409 WARN_ON(!list_empty(&jd->extent_list));
1410
1411 do {
1412 bh.b_state = 0;
1413 bh.b_blocknr = 0;
1414 bh.b_size = size;
1415 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
1416 if (rc || !buffer_mapped(&bh))
1417 goto fail;
1418 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
1419 if (rc)
1420 goto fail;
1421 size -= bh.b_size;
1422 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1423 } while(size > 0);
1424
1425 fs_info(sdp, "journal %d mapped with %u extents\n", jd->jd_jid,
1426 jd->nr_extents);
1427 return 0;
1428
1429fail:
1430 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
1431 rc, jd->jd_jid,
1432 (unsigned long long)(i_size_read(jd->jd_inode) - size),
1433 jd->nr_extents);
1434 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
1435 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
1436 bh.b_state, (unsigned long long)bh.b_size);
1437 gfs2_free_journal_extents(jd);
1438 return rc;
1439}
1440
1441/**
1442 * gfs2_write_alloc_required - figure out if a write will require an allocation
1443 * @ip: the file being written to
1444 * @offset: the offset to write to
1445 * @len: the number of bytes being written
1446 *
1447 * Returns: 1 if an alloc is required, 0 otherwise
1448 */
1449
1450int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1451 unsigned int len)
1452{
1453 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1454 struct buffer_head bh;
1455 unsigned int shift;
1456 u64 lblock, lblock_stop, size;
1457 u64 end_of_file;
1458
1459 if (!len)
1460 return 0;
1461
1462 if (gfs2_is_stuffed(ip)) {
1463 if (offset + len >
1464 sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
1465 return 1;
1466 return 0;
1467 }
1468
1469 shift = sdp->sd_sb.sb_bsize_shift;
1470 BUG_ON(gfs2_is_dir(ip));
1471 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
1472 lblock = offset >> shift;
1473 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1474 if (lblock_stop > end_of_file)
1475 return 1;
1476
1477 size = (lblock_stop - lblock) << shift;
1478 do {
1479 bh.b_state = 0;
1480 bh.b_size = size;
1481 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
1482 if (!buffer_mapped(&bh))
1483 return 1;
1484 size -= bh.b_size;
1485 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1486 } while(size > 0);
1487
1488 return 0;
1489}
1490
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/spinlock.h>
8#include <linux/completion.h>
9#include <linux/buffer_head.h>
10#include <linux/blkdev.h>
11#include <linux/gfs2_ondisk.h>
12#include <linux/crc32.h>
13#include <linux/iomap.h>
14#include <linux/ktime.h>
15
16#include "gfs2.h"
17#include "incore.h"
18#include "bmap.h"
19#include "glock.h"
20#include "inode.h"
21#include "meta_io.h"
22#include "quota.h"
23#include "rgrp.h"
24#include "log.h"
25#include "super.h"
26#include "trans.h"
27#include "dir.h"
28#include "util.h"
29#include "aops.h"
30#include "trace_gfs2.h"
31
32/* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
34 * keep it small.
35 */
36struct metapath {
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39 int mp_fheight; /* find_metapath height */
40 int mp_aheight; /* actual height (lookup height) */
41};
42
43static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
44
45/**
46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
47 * @ip: the inode
48 * @dibh: the dinode buffer
49 * @block: the block number that was allocated
50 * @page: The (optional) page. This is looked up if @page is NULL
51 *
52 * Returns: errno
53 */
54
55static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 u64 block, struct page *page)
57{
58 struct inode *inode = &ip->i_inode;
59 struct buffer_head *bh;
60 int release = 0;
61
62 if (!page || page->index) {
63 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
64 if (!page)
65 return -ENOMEM;
66 release = 1;
67 }
68
69 if (!PageUptodate(page)) {
70 void *kaddr = kmap(page);
71 u64 dsize = i_size_read(inode);
72
73 if (dsize > gfs2_max_stuffed_size(ip))
74 dsize = gfs2_max_stuffed_size(ip);
75
76 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
77 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
78 kunmap(page);
79
80 SetPageUptodate(page);
81 }
82
83 if (!page_has_buffers(page))
84 create_empty_buffers(page, BIT(inode->i_blkbits),
85 BIT(BH_Uptodate));
86
87 bh = page_buffers(page);
88
89 if (!buffer_mapped(bh))
90 map_bh(bh, inode->i_sb, block);
91
92 set_buffer_uptodate(bh);
93 if (gfs2_is_jdata(ip))
94 gfs2_trans_add_data(ip->i_gl, bh);
95 else {
96 mark_buffer_dirty(bh);
97 gfs2_ordered_add_inode(ip);
98 }
99
100 if (release) {
101 unlock_page(page);
102 put_page(page);
103 }
104
105 return 0;
106}
107
108/**
109 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
110 * @ip: The GFS2 inode to unstuff
111 * @page: The (optional) page. This is looked up if the @page is NULL
112 *
113 * This routine unstuffs a dinode and returns it to a "normal" state such
114 * that the height can be grown in the traditional way.
115 *
116 * Returns: errno
117 */
118
119int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
120{
121 struct buffer_head *bh, *dibh;
122 struct gfs2_dinode *di;
123 u64 block = 0;
124 int isdir = gfs2_is_dir(ip);
125 int error;
126
127 down_write(&ip->i_rw_mutex);
128
129 error = gfs2_meta_inode_buffer(ip, &dibh);
130 if (error)
131 goto out;
132
133 if (i_size_read(&ip->i_inode)) {
134 /* Get a free block, fill it with the stuffed data,
135 and write it out to disk */
136
137 unsigned int n = 1;
138 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
139 if (error)
140 goto out_brelse;
141 if (isdir) {
142 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
143 error = gfs2_dir_get_new_buffer(ip, block, &bh);
144 if (error)
145 goto out_brelse;
146 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
147 dibh, sizeof(struct gfs2_dinode));
148 brelse(bh);
149 } else {
150 error = gfs2_unstuffer_page(ip, dibh, block, page);
151 if (error)
152 goto out_brelse;
153 }
154 }
155
156 /* Set up the pointer to the new block */
157
158 gfs2_trans_add_meta(ip->i_gl, dibh);
159 di = (struct gfs2_dinode *)dibh->b_data;
160 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
161
162 if (i_size_read(&ip->i_inode)) {
163 *(__be64 *)(di + 1) = cpu_to_be64(block);
164 gfs2_add_inode_blocks(&ip->i_inode, 1);
165 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
166 }
167
168 ip->i_height = 1;
169 di->di_height = cpu_to_be16(1);
170
171out_brelse:
172 brelse(dibh);
173out:
174 up_write(&ip->i_rw_mutex);
175 return error;
176}
177
178
179/**
180 * find_metapath - Find path through the metadata tree
181 * @sdp: The superblock
182 * @block: The disk block to look up
183 * @mp: The metapath to return the result in
184 * @height: The pre-calculated height of the metadata tree
185 *
186 * This routine returns a struct metapath structure that defines a path
187 * through the metadata of inode "ip" to get to block "block".
188 *
189 * Example:
190 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
191 * filesystem with a blocksize of 4096.
192 *
193 * find_metapath() would return a struct metapath structure set to:
194 * mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
195 *
196 * That means that in order to get to the block containing the byte at
197 * offset 101342453, we would load the indirect block pointed to by pointer
198 * 0 in the dinode. We would then load the indirect block pointed to by
199 * pointer 48 in that indirect block. We would then load the data block
200 * pointed to by pointer 165 in that indirect block.
201 *
202 * ----------------------------------------
203 * | Dinode | |
204 * | | 4|
205 * | |0 1 2 3 4 5 9|
206 * | | 6|
207 * ----------------------------------------
208 * |
209 * |
210 * V
211 * ----------------------------------------
212 * | Indirect Block |
213 * | 5|
214 * | 4 4 4 4 4 5 5 1|
215 * |0 5 6 7 8 9 0 1 2|
216 * ----------------------------------------
217 * |
218 * |
219 * V
220 * ----------------------------------------
221 * | Indirect Block |
222 * | 1 1 1 1 1 5|
223 * | 6 6 6 6 6 1|
224 * |0 3 4 5 6 7 2|
225 * ----------------------------------------
226 * |
227 * |
228 * V
229 * ----------------------------------------
230 * | Data block containing offset |
231 * | 101342453 |
232 * | |
233 * | |
234 * ----------------------------------------
235 *
236 */
237
238static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 struct metapath *mp, unsigned int height)
240{
241 unsigned int i;
242
243 mp->mp_fheight = height;
244 for (i = height; i--;)
245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
246}
247
248static inline unsigned int metapath_branch_start(const struct metapath *mp)
249{
250 if (mp->mp_list[0] == 0)
251 return 2;
252 return 1;
253}
254
255/**
256 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
257 * @height: The metadata height (0 = dinode)
258 * @mp: The metapath
259 */
260static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
261{
262 struct buffer_head *bh = mp->mp_bh[height];
263 if (height == 0)
264 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
265 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
266}
267
268/**
269 * metapointer - Return pointer to start of metadata in a buffer
270 * @height: The metadata height (0 = dinode)
271 * @mp: The metapath
272 *
273 * Return a pointer to the block number of the next height of the metadata
274 * tree given a buffer containing the pointer to the current height of the
275 * metadata tree.
276 */
277
278static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
279{
280 __be64 *p = metaptr1(height, mp);
281 return p + mp->mp_list[height];
282}
283
284static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
285{
286 const struct buffer_head *bh = mp->mp_bh[height];
287 return (const __be64 *)(bh->b_data + bh->b_size);
288}
289
290static void clone_metapath(struct metapath *clone, struct metapath *mp)
291{
292 unsigned int hgt;
293
294 *clone = *mp;
295 for (hgt = 0; hgt < mp->mp_aheight; hgt++)
296 get_bh(clone->mp_bh[hgt]);
297}
298
299static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
300{
301 const __be64 *t;
302
303 for (t = start; t < end; t++) {
304 struct buffer_head *rabh;
305
306 if (!*t)
307 continue;
308
309 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
310 if (trylock_buffer(rabh)) {
311 if (!buffer_uptodate(rabh)) {
312 rabh->b_end_io = end_buffer_read_sync;
313 submit_bh(REQ_OP_READ,
314 REQ_RAHEAD | REQ_META | REQ_PRIO,
315 rabh);
316 continue;
317 }
318 unlock_buffer(rabh);
319 }
320 brelse(rabh);
321 }
322}
323
324static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
325 unsigned int x, unsigned int h)
326{
327 for (; x < h; x++) {
328 __be64 *ptr = metapointer(x, mp);
329 u64 dblock = be64_to_cpu(*ptr);
330 int ret;
331
332 if (!dblock)
333 break;
334 ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
335 if (ret)
336 return ret;
337 }
338 mp->mp_aheight = x + 1;
339 return 0;
340}
341
342/**
343 * lookup_metapath - Walk the metadata tree to a specific point
344 * @ip: The inode
345 * @mp: The metapath
346 *
347 * Assumes that the inode's buffer has already been looked up and
348 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
349 * by find_metapath().
350 *
351 * If this function encounters part of the tree which has not been
352 * allocated, it returns the current height of the tree at the point
353 * at which it found the unallocated block. Blocks which are found are
354 * added to the mp->mp_bh[] list.
355 *
356 * Returns: error
357 */
358
359static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
360{
361 return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
362}
363
364/**
365 * fillup_metapath - fill up buffers for the metadata path to a specific height
366 * @ip: The inode
367 * @mp: The metapath
368 * @h: The height to which it should be mapped
369 *
370 * Similar to lookup_metapath, but does lookups for a range of heights
371 *
372 * Returns: error or the number of buffers filled
373 */
374
375static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
376{
377 unsigned int x = 0;
378 int ret;
379
380 if (h) {
381 /* find the first buffer we need to look up. */
382 for (x = h - 1; x > 0; x--) {
383 if (mp->mp_bh[x])
384 break;
385 }
386 }
387 ret = __fillup_metapath(ip, mp, x, h);
388 if (ret)
389 return ret;
390 return mp->mp_aheight - x - 1;
391}
392
393static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
394{
395 sector_t factor = 1, block = 0;
396 int hgt;
397
398 for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
399 if (hgt < mp->mp_aheight)
400 block += mp->mp_list[hgt] * factor;
401 factor *= sdp->sd_inptrs;
402 }
403 return block;
404}
405
406static void release_metapath(struct metapath *mp)
407{
408 int i;
409
410 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
411 if (mp->mp_bh[i] == NULL)
412 break;
413 brelse(mp->mp_bh[i]);
414 mp->mp_bh[i] = NULL;
415 }
416}
417
418/**
419 * gfs2_extent_length - Returns length of an extent of blocks
420 * @bh: The metadata block
421 * @ptr: Current position in @bh
422 * @limit: Max extent length to return
423 * @eob: Set to 1 if we hit "end of block"
424 *
425 * Returns: The length of the extent (minimum of one block)
426 */
427
428static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
429{
430 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
431 const __be64 *first = ptr;
432 u64 d = be64_to_cpu(*ptr);
433
434 *eob = 0;
435 do {
436 ptr++;
437 if (ptr >= end)
438 break;
439 d++;
440 } while(be64_to_cpu(*ptr) == d);
441 if (ptr >= end)
442 *eob = 1;
443 return ptr - first;
444}
445
446enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
447
448/*
449 * gfs2_metadata_walker - walk an indirect block
450 * @mp: Metapath to indirect block
451 * @ptrs: Number of pointers to look at
452 *
453 * When returning WALK_FOLLOW, the walker must update @mp to point at the right
454 * indirect block to follow.
455 */
456typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
457 unsigned int ptrs);
458
459/*
460 * gfs2_walk_metadata - walk a tree of indirect blocks
461 * @inode: The inode
462 * @mp: Starting point of walk
463 * @max_len: Maximum number of blocks to walk
464 * @walker: Called during the walk
465 *
466 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
467 * past the end of metadata, and a negative error code otherwise.
468 */
469
470static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
471 u64 max_len, gfs2_metadata_walker walker)
472{
473 struct gfs2_inode *ip = GFS2_I(inode);
474 struct gfs2_sbd *sdp = GFS2_SB(inode);
475 u64 factor = 1;
476 unsigned int hgt;
477 int ret;
478
479 /*
480 * The walk starts in the lowest allocated indirect block, which may be
481 * before the position indicated by @mp. Adjust @max_len accordingly
482 * to avoid a short walk.
483 */
484 for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
485 max_len += mp->mp_list[hgt] * factor;
486 mp->mp_list[hgt] = 0;
487 factor *= sdp->sd_inptrs;
488 }
489
490 for (;;) {
491 u16 start = mp->mp_list[hgt];
492 enum walker_status status;
493 unsigned int ptrs;
494 u64 len;
495
496 /* Walk indirect block. */
497 ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
498 len = ptrs * factor;
499 if (len > max_len)
500 ptrs = DIV_ROUND_UP_ULL(max_len, factor);
501 status = walker(mp, ptrs);
502 switch (status) {
503 case WALK_STOP:
504 return 1;
505 case WALK_FOLLOW:
506 BUG_ON(mp->mp_aheight == mp->mp_fheight);
507 ptrs = mp->mp_list[hgt] - start;
508 len = ptrs * factor;
509 break;
510 case WALK_CONTINUE:
511 break;
512 }
513 if (len >= max_len)
514 break;
515 max_len -= len;
516 if (status == WALK_FOLLOW)
517 goto fill_up_metapath;
518
519lower_metapath:
520 /* Decrease height of metapath. */
521 brelse(mp->mp_bh[hgt]);
522 mp->mp_bh[hgt] = NULL;
523 mp->mp_list[hgt] = 0;
524 if (!hgt)
525 break;
526 hgt--;
527 factor *= sdp->sd_inptrs;
528
529 /* Advance in metadata tree. */
530 (mp->mp_list[hgt])++;
531 if (hgt) {
532 if (mp->mp_list[hgt] >= sdp->sd_inptrs)
533 goto lower_metapath;
534 } else {
535 if (mp->mp_list[hgt] >= sdp->sd_diptrs)
536 break;
537 }
538
539fill_up_metapath:
540 /* Increase height of metapath. */
541 ret = fillup_metapath(ip, mp, ip->i_height - 1);
542 if (ret < 0)
543 return ret;
544 hgt += ret;
545 for (; ret; ret--)
546 do_div(factor, sdp->sd_inptrs);
547 mp->mp_aheight = hgt + 1;
548 }
549 return 0;
550}
551
552static enum walker_status gfs2_hole_walker(struct metapath *mp,
553 unsigned int ptrs)
554{
555 const __be64 *start, *ptr, *end;
556 unsigned int hgt;
557
558 hgt = mp->mp_aheight - 1;
559 start = metapointer(hgt, mp);
560 end = start + ptrs;
561
562 for (ptr = start; ptr < end; ptr++) {
563 if (*ptr) {
564 mp->mp_list[hgt] += ptr - start;
565 if (mp->mp_aheight == mp->mp_fheight)
566 return WALK_STOP;
567 return WALK_FOLLOW;
568 }
569 }
570 return WALK_CONTINUE;
571}
572
573/**
574 * gfs2_hole_size - figure out the size of a hole
575 * @inode: The inode
576 * @lblock: The logical starting block number
577 * @len: How far to look (in blocks)
578 * @mp: The metapath at lblock
579 * @iomap: The iomap to store the hole size in
580 *
581 * This function modifies @mp.
582 *
583 * Returns: errno on error
584 */
585static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
586 struct metapath *mp, struct iomap *iomap)
587{
588 struct metapath clone;
589 u64 hole_size;
590 int ret;
591
592 clone_metapath(&clone, mp);
593 ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
594 if (ret < 0)
595 goto out;
596
597 if (ret == 1)
598 hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
599 else
600 hole_size = len;
601 iomap->length = hole_size << inode->i_blkbits;
602 ret = 0;
603
604out:
605 release_metapath(&clone);
606 return ret;
607}
608
609static inline __be64 *gfs2_indirect_init(struct metapath *mp,
610 struct gfs2_glock *gl, unsigned int i,
611 unsigned offset, u64 bn)
612{
613 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
614 ((i > 1) ? sizeof(struct gfs2_meta_header) :
615 sizeof(struct gfs2_dinode)));
616 BUG_ON(i < 1);
617 BUG_ON(mp->mp_bh[i] != NULL);
618 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
619 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
620 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
621 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
622 ptr += offset;
623 *ptr = cpu_to_be64(bn);
624 return ptr;
625}
626
627enum alloc_state {
628 ALLOC_DATA = 0,
629 ALLOC_GROW_DEPTH = 1,
630 ALLOC_GROW_HEIGHT = 2,
631 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
632};
633
634/**
635 * gfs2_iomap_alloc - Build a metadata tree of the requested height
636 * @inode: The GFS2 inode
637 * @iomap: The iomap structure
638 * @mp: The metapath, with proper height information calculated
639 *
640 * In this routine we may have to alloc:
641 * i) Indirect blocks to grow the metadata tree height
642 * ii) Indirect blocks to fill in lower part of the metadata tree
643 * iii) Data blocks
644 *
645 * This function is called after gfs2_iomap_get, which works out the
646 * total number of blocks which we need via gfs2_alloc_size.
647 *
648 * We then do the actual allocation asking for an extent at a time (if
649 * enough contiguous free blocks are available, there will only be one
650 * allocation request per call) and uses the state machine to initialise
651 * the blocks in order.
652 *
653 * Right now, this function will allocate at most one indirect block
654 * worth of data -- with a default block size of 4K, that's slightly
655 * less than 2M. If this limitation is ever removed to allow huge
656 * allocations, we would probably still want to limit the iomap size we
657 * return to avoid stalling other tasks during huge writes; the next
658 * iomap iteration would then find the blocks already allocated.
659 *
660 * Returns: errno on error
661 */
662
663static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
664 struct metapath *mp)
665{
666 struct gfs2_inode *ip = GFS2_I(inode);
667 struct gfs2_sbd *sdp = GFS2_SB(inode);
668 struct buffer_head *dibh = mp->mp_bh[0];
669 u64 bn;
670 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
671 size_t dblks = iomap->length >> inode->i_blkbits;
672 const unsigned end_of_metadata = mp->mp_fheight - 1;
673 int ret;
674 enum alloc_state state;
675 __be64 *ptr;
676 __be64 zero_bn = 0;
677
678 BUG_ON(mp->mp_aheight < 1);
679 BUG_ON(dibh == NULL);
680 BUG_ON(dblks < 1);
681
682 gfs2_trans_add_meta(ip->i_gl, dibh);
683
684 down_write(&ip->i_rw_mutex);
685
686 if (mp->mp_fheight == mp->mp_aheight) {
687 /* Bottom indirect block exists */
688 state = ALLOC_DATA;
689 } else {
690 /* Need to allocate indirect blocks */
691 if (mp->mp_fheight == ip->i_height) {
692 /* Writing into existing tree, extend tree down */
693 iblks = mp->mp_fheight - mp->mp_aheight;
694 state = ALLOC_GROW_DEPTH;
695 } else {
696 /* Building up tree height */
697 state = ALLOC_GROW_HEIGHT;
698 iblks = mp->mp_fheight - ip->i_height;
699 branch_start = metapath_branch_start(mp);
700 iblks += (mp->mp_fheight - branch_start);
701 }
702 }
703
704 /* start of the second part of the function (state machine) */
705
706 blks = dblks + iblks;
707 i = mp->mp_aheight;
708 do {
709 n = blks - alloced;
710 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
711 if (ret)
712 goto out;
713 alloced += n;
714 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
715 gfs2_trans_remove_revoke(sdp, bn, n);
716 switch (state) {
717 /* Growing height of tree */
718 case ALLOC_GROW_HEIGHT:
719 if (i == 1) {
720 ptr = (__be64 *)(dibh->b_data +
721 sizeof(struct gfs2_dinode));
722 zero_bn = *ptr;
723 }
724 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
725 i++, n--)
726 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
727 if (i - 1 == mp->mp_fheight - ip->i_height) {
728 i--;
729 gfs2_buffer_copy_tail(mp->mp_bh[i],
730 sizeof(struct gfs2_meta_header),
731 dibh, sizeof(struct gfs2_dinode));
732 gfs2_buffer_clear_tail(dibh,
733 sizeof(struct gfs2_dinode) +
734 sizeof(__be64));
735 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
736 sizeof(struct gfs2_meta_header));
737 *ptr = zero_bn;
738 state = ALLOC_GROW_DEPTH;
739 for(i = branch_start; i < mp->mp_fheight; i++) {
740 if (mp->mp_bh[i] == NULL)
741 break;
742 brelse(mp->mp_bh[i]);
743 mp->mp_bh[i] = NULL;
744 }
745 i = branch_start;
746 }
747 if (n == 0)
748 break;
749 fallthrough; /* To branching from existing tree */
750 case ALLOC_GROW_DEPTH:
751 if (i > 1 && i < mp->mp_fheight)
752 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
753 for (; i < mp->mp_fheight && n > 0; i++, n--)
754 gfs2_indirect_init(mp, ip->i_gl, i,
755 mp->mp_list[i-1], bn++);
756 if (i == mp->mp_fheight)
757 state = ALLOC_DATA;
758 if (n == 0)
759 break;
760 fallthrough; /* To tree complete, adding data blocks */
761 case ALLOC_DATA:
762 BUG_ON(n > dblks);
763 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
764 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
765 dblks = n;
766 ptr = metapointer(end_of_metadata, mp);
767 iomap->addr = bn << inode->i_blkbits;
768 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
769 while (n-- > 0)
770 *ptr++ = cpu_to_be64(bn++);
771 break;
772 }
773 } while (iomap->addr == IOMAP_NULL_ADDR);
774
775 iomap->type = IOMAP_MAPPED;
776 iomap->length = (u64)dblks << inode->i_blkbits;
777 ip->i_height = mp->mp_fheight;
778 gfs2_add_inode_blocks(&ip->i_inode, alloced);
779 gfs2_dinode_out(ip, dibh->b_data);
780out:
781 up_write(&ip->i_rw_mutex);
782 return ret;
783}
784
785#define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
786
787/**
788 * gfs2_alloc_size - Compute the maximum allocation size
789 * @inode: The inode
790 * @mp: The metapath
791 * @size: Requested size in blocks
792 *
793 * Compute the maximum size of the next allocation at @mp.
794 *
795 * Returns: size in blocks
796 */
797static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
798{
799 struct gfs2_inode *ip = GFS2_I(inode);
800 struct gfs2_sbd *sdp = GFS2_SB(inode);
801 const __be64 *first, *ptr, *end;
802
803 /*
804 * For writes to stuffed files, this function is called twice via
805 * gfs2_iomap_get, before and after unstuffing. The size we return the
806 * first time needs to be large enough to get the reservation and
807 * allocation sizes right. The size we return the second time must
808 * be exact or else gfs2_iomap_alloc won't do the right thing.
809 */
810
811 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
812 unsigned int maxsize = mp->mp_fheight > 1 ?
813 sdp->sd_inptrs : sdp->sd_diptrs;
814 maxsize -= mp->mp_list[mp->mp_fheight - 1];
815 if (size > maxsize)
816 size = maxsize;
817 return size;
818 }
819
820 first = metapointer(ip->i_height - 1, mp);
821 end = metaend(ip->i_height - 1, mp);
822 if (end - first > size)
823 end = first + size;
824 for (ptr = first; ptr < end; ptr++) {
825 if (*ptr)
826 break;
827 }
828 return ptr - first;
829}
830
831/**
832 * gfs2_iomap_get - Map blocks from an inode to disk blocks
833 * @inode: The inode
834 * @pos: Starting position in bytes
835 * @length: Length to map, in bytes
836 * @flags: iomap flags
837 * @iomap: The iomap structure
838 * @mp: The metapath
839 *
840 * Returns: errno
841 */
842static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
843 unsigned flags, struct iomap *iomap,
844 struct metapath *mp)
845{
846 struct gfs2_inode *ip = GFS2_I(inode);
847 struct gfs2_sbd *sdp = GFS2_SB(inode);
848 loff_t size = i_size_read(inode);
849 __be64 *ptr;
850 sector_t lblock;
851 sector_t lblock_stop;
852 int ret;
853 int eob;
854 u64 len;
855 struct buffer_head *dibh = NULL, *bh;
856 u8 height;
857
858 if (!length)
859 return -EINVAL;
860
861 down_read(&ip->i_rw_mutex);
862
863 ret = gfs2_meta_inode_buffer(ip, &dibh);
864 if (ret)
865 goto unlock;
866 mp->mp_bh[0] = dibh;
867
868 if (gfs2_is_stuffed(ip)) {
869 if (flags & IOMAP_WRITE) {
870 loff_t max_size = gfs2_max_stuffed_size(ip);
871
872 if (pos + length > max_size)
873 goto unstuff;
874 iomap->length = max_size;
875 } else {
876 if (pos >= size) {
877 if (flags & IOMAP_REPORT) {
878 ret = -ENOENT;
879 goto unlock;
880 } else {
881 iomap->offset = pos;
882 iomap->length = length;
883 goto hole_found;
884 }
885 }
886 iomap->length = size;
887 }
888 iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
889 sizeof(struct gfs2_dinode);
890 iomap->type = IOMAP_INLINE;
891 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
892 goto out;
893 }
894
895unstuff:
896 lblock = pos >> inode->i_blkbits;
897 iomap->offset = lblock << inode->i_blkbits;
898 lblock_stop = (pos + length - 1) >> inode->i_blkbits;
899 len = lblock_stop - lblock + 1;
900 iomap->length = len << inode->i_blkbits;
901
902 height = ip->i_height;
903 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
904 height++;
905 find_metapath(sdp, lblock, mp, height);
906 if (height > ip->i_height || gfs2_is_stuffed(ip))
907 goto do_alloc;
908
909 ret = lookup_metapath(ip, mp);
910 if (ret)
911 goto unlock;
912
913 if (mp->mp_aheight != ip->i_height)
914 goto do_alloc;
915
916 ptr = metapointer(ip->i_height - 1, mp);
917 if (*ptr == 0)
918 goto do_alloc;
919
920 bh = mp->mp_bh[ip->i_height - 1];
921 len = gfs2_extent_length(bh, ptr, len, &eob);
922
923 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
924 iomap->length = len << inode->i_blkbits;
925 iomap->type = IOMAP_MAPPED;
926 iomap->flags |= IOMAP_F_MERGED;
927 if (eob)
928 iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
929
930out:
931 iomap->bdev = inode->i_sb->s_bdev;
932unlock:
933 up_read(&ip->i_rw_mutex);
934 return ret;
935
936do_alloc:
937 if (flags & IOMAP_REPORT) {
938 if (pos >= size)
939 ret = -ENOENT;
940 else if (height == ip->i_height)
941 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
942 else
943 iomap->length = size - pos;
944 } else if (flags & IOMAP_WRITE) {
945 u64 alloc_size;
946
947 if (flags & IOMAP_DIRECT)
948 goto out; /* (see gfs2_file_direct_write) */
949
950 len = gfs2_alloc_size(inode, mp, len);
951 alloc_size = len << inode->i_blkbits;
952 if (alloc_size < iomap->length)
953 iomap->length = alloc_size;
954 } else {
955 if (pos < size && height == ip->i_height)
956 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
957 }
958hole_found:
959 iomap->addr = IOMAP_NULL_ADDR;
960 iomap->type = IOMAP_HOLE;
961 goto out;
962}
963
964/**
965 * gfs2_lblk_to_dblk - convert logical block to disk block
966 * @inode: the inode of the file we're mapping
967 * @lblock: the block relative to the start of the file
968 * @dblock: the returned dblock, if no error
969 *
970 * This function maps a single block from a file logical block (relative to
971 * the start of the file) to a file system absolute block using iomap.
972 *
973 * Returns: the absolute file system block, or an error
974 */
975int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock)
976{
977 struct iomap iomap = { };
978 struct metapath mp = { .mp_aheight = 1, };
979 loff_t pos = (loff_t)lblock << inode->i_blkbits;
980 int ret;
981
982 ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp);
983 release_metapath(&mp);
984 if (ret == 0)
985 *dblock = iomap.addr >> inode->i_blkbits;
986
987 return ret;
988}
989
990static int gfs2_write_lock(struct inode *inode)
991{
992 struct gfs2_inode *ip = GFS2_I(inode);
993 struct gfs2_sbd *sdp = GFS2_SB(inode);
994 int error;
995
996 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
997 error = gfs2_glock_nq(&ip->i_gh);
998 if (error)
999 goto out_uninit;
1000 if (&ip->i_inode == sdp->sd_rindex) {
1001 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1002
1003 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
1004 GL_NOCACHE, &m_ip->i_gh);
1005 if (error)
1006 goto out_unlock;
1007 }
1008 return 0;
1009
1010out_unlock:
1011 gfs2_glock_dq(&ip->i_gh);
1012out_uninit:
1013 gfs2_holder_uninit(&ip->i_gh);
1014 return error;
1015}
1016
1017static void gfs2_write_unlock(struct inode *inode)
1018{
1019 struct gfs2_inode *ip = GFS2_I(inode);
1020 struct gfs2_sbd *sdp = GFS2_SB(inode);
1021
1022 if (&ip->i_inode == sdp->sd_rindex) {
1023 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1024
1025 gfs2_glock_dq_uninit(&m_ip->i_gh);
1026 }
1027 gfs2_glock_dq_uninit(&ip->i_gh);
1028}
1029
1030static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
1031 unsigned len, struct iomap *iomap)
1032{
1033 unsigned int blockmask = i_blocksize(inode) - 1;
1034 struct gfs2_sbd *sdp = GFS2_SB(inode);
1035 unsigned int blocks;
1036
1037 blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
1038 return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
1039}
1040
1041static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1042 unsigned copied, struct page *page,
1043 struct iomap *iomap)
1044{
1045 struct gfs2_trans *tr = current->journal_info;
1046 struct gfs2_inode *ip = GFS2_I(inode);
1047 struct gfs2_sbd *sdp = GFS2_SB(inode);
1048
1049 if (page && !gfs2_is_stuffed(ip))
1050 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1051
1052 if (tr->tr_num_buf_new)
1053 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1054
1055 gfs2_trans_end(sdp);
1056}
1057
1058static const struct iomap_page_ops gfs2_iomap_page_ops = {
1059 .page_prepare = gfs2_iomap_page_prepare,
1060 .page_done = gfs2_iomap_page_done,
1061};
1062
1063static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1064 loff_t length, unsigned flags,
1065 struct iomap *iomap,
1066 struct metapath *mp)
1067{
1068 struct gfs2_inode *ip = GFS2_I(inode);
1069 struct gfs2_sbd *sdp = GFS2_SB(inode);
1070 bool unstuff;
1071 int ret;
1072
1073 unstuff = gfs2_is_stuffed(ip) &&
1074 pos + length > gfs2_max_stuffed_size(ip);
1075
1076 if (unstuff || iomap->type == IOMAP_HOLE) {
1077 unsigned int data_blocks, ind_blocks;
1078 struct gfs2_alloc_parms ap = {};
1079 unsigned int rblocks;
1080 struct gfs2_trans *tr;
1081
1082 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1083 &ind_blocks);
1084 ap.target = data_blocks + ind_blocks;
1085 ret = gfs2_quota_lock_check(ip, &ap);
1086 if (ret)
1087 return ret;
1088
1089 ret = gfs2_inplace_reserve(ip, &ap);
1090 if (ret)
1091 goto out_qunlock;
1092
1093 rblocks = RES_DINODE + ind_blocks;
1094 if (gfs2_is_jdata(ip))
1095 rblocks += data_blocks;
1096 if (ind_blocks || data_blocks)
1097 rblocks += RES_STATFS + RES_QUOTA;
1098 if (inode == sdp->sd_rindex)
1099 rblocks += 2 * RES_STATFS;
1100 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1101
1102 ret = gfs2_trans_begin(sdp, rblocks,
1103 iomap->length >> inode->i_blkbits);
1104 if (ret)
1105 goto out_trans_fail;
1106
1107 if (unstuff) {
1108 ret = gfs2_unstuff_dinode(ip, NULL);
1109 if (ret)
1110 goto out_trans_end;
1111 release_metapath(mp);
1112 ret = gfs2_iomap_get(inode, iomap->offset,
1113 iomap->length, flags, iomap, mp);
1114 if (ret)
1115 goto out_trans_end;
1116 }
1117
1118 if (iomap->type == IOMAP_HOLE) {
1119 ret = gfs2_iomap_alloc(inode, iomap, mp);
1120 if (ret) {
1121 gfs2_trans_end(sdp);
1122 gfs2_inplace_release(ip);
1123 punch_hole(ip, iomap->offset, iomap->length);
1124 goto out_qunlock;
1125 }
1126 }
1127
1128 tr = current->journal_info;
1129 if (tr->tr_num_buf_new)
1130 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1131
1132 gfs2_trans_end(sdp);
1133 }
1134
1135 if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1136 iomap->page_ops = &gfs2_iomap_page_ops;
1137 return 0;
1138
1139out_trans_end:
1140 gfs2_trans_end(sdp);
1141out_trans_fail:
1142 gfs2_inplace_release(ip);
1143out_qunlock:
1144 gfs2_quota_unlock(ip);
1145 return ret;
1146}
1147
1148static inline bool gfs2_iomap_need_write_lock(unsigned flags)
1149{
1150 return (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT);
1151}
1152
1153static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1154 unsigned flags, struct iomap *iomap,
1155 struct iomap *srcmap)
1156{
1157 struct gfs2_inode *ip = GFS2_I(inode);
1158 struct metapath mp = { .mp_aheight = 1, };
1159 int ret;
1160
1161 iomap->flags |= IOMAP_F_BUFFER_HEAD;
1162
1163 trace_gfs2_iomap_start(ip, pos, length, flags);
1164 if (gfs2_iomap_need_write_lock(flags)) {
1165 ret = gfs2_write_lock(inode);
1166 if (ret)
1167 goto out;
1168 }
1169
1170 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1171 if (ret)
1172 goto out_unlock;
1173
1174 switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1175 case IOMAP_WRITE:
1176 if (flags & IOMAP_DIRECT) {
1177 /*
1178 * Silently fall back to buffered I/O for stuffed files
1179 * or if we've got a hole (see gfs2_file_direct_write).
1180 */
1181 if (iomap->type != IOMAP_MAPPED)
1182 ret = -ENOTBLK;
1183 goto out_unlock;
1184 }
1185 break;
1186 case IOMAP_ZERO:
1187 if (iomap->type == IOMAP_HOLE)
1188 goto out_unlock;
1189 break;
1190 default:
1191 goto out_unlock;
1192 }
1193
1194 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1195
1196out_unlock:
1197 if (ret && gfs2_iomap_need_write_lock(flags))
1198 gfs2_write_unlock(inode);
1199 release_metapath(&mp);
1200out:
1201 trace_gfs2_iomap_end(ip, iomap, ret);
1202 return ret;
1203}
1204
1205static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1206 ssize_t written, unsigned flags, struct iomap *iomap)
1207{
1208 struct gfs2_inode *ip = GFS2_I(inode);
1209 struct gfs2_sbd *sdp = GFS2_SB(inode);
1210
1211 switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1212 case IOMAP_WRITE:
1213 if (flags & IOMAP_DIRECT)
1214 return 0;
1215 break;
1216 case IOMAP_ZERO:
1217 if (iomap->type == IOMAP_HOLE)
1218 return 0;
1219 break;
1220 default:
1221 return 0;
1222 }
1223
1224 if (!gfs2_is_stuffed(ip))
1225 gfs2_ordered_add_inode(ip);
1226
1227 if (inode == sdp->sd_rindex)
1228 adjust_fs_space(inode);
1229
1230 gfs2_inplace_release(ip);
1231
1232 if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1233 /* Deallocate blocks that were just allocated. */
1234 loff_t blockmask = i_blocksize(inode) - 1;
1235 loff_t end = (pos + length) & ~blockmask;
1236
1237 pos = (pos + written + blockmask) & ~blockmask;
1238 if (pos < end) {
1239 truncate_pagecache_range(inode, pos, end - 1);
1240 punch_hole(ip, pos, end - pos);
1241 }
1242 }
1243
1244 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1245 gfs2_quota_unlock(ip);
1246
1247 if (unlikely(!written))
1248 goto out_unlock;
1249
1250 if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1251 mark_inode_dirty(inode);
1252 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1253
1254out_unlock:
1255 if (gfs2_iomap_need_write_lock(flags))
1256 gfs2_write_unlock(inode);
1257 return 0;
1258}
1259
1260const struct iomap_ops gfs2_iomap_ops = {
1261 .iomap_begin = gfs2_iomap_begin,
1262 .iomap_end = gfs2_iomap_end,
1263};
1264
1265/**
1266 * gfs2_block_map - Map one or more blocks of an inode to a disk block
1267 * @inode: The inode
1268 * @lblock: The logical block number
1269 * @bh_map: The bh to be mapped
1270 * @create: True if its ok to alloc blocks to satify the request
1271 *
1272 * The size of the requested mapping is defined in bh_map->b_size.
1273 *
1274 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1275 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and
1276 * bh_map->b_size to indicate the size of the mapping when @lblock and
1277 * successive blocks are mapped, up to the requested size.
1278 *
1279 * Sets buffer_boundary() if a read of metadata will be required
1280 * before the next block can be mapped. Sets buffer_new() if new
1281 * blocks were allocated.
1282 *
1283 * Returns: errno
1284 */
1285
1286int gfs2_block_map(struct inode *inode, sector_t lblock,
1287 struct buffer_head *bh_map, int create)
1288{
1289 struct gfs2_inode *ip = GFS2_I(inode);
1290 loff_t pos = (loff_t)lblock << inode->i_blkbits;
1291 loff_t length = bh_map->b_size;
1292 struct metapath mp = { .mp_aheight = 1, };
1293 struct iomap iomap = { };
1294 int ret;
1295
1296 clear_buffer_mapped(bh_map);
1297 clear_buffer_new(bh_map);
1298 clear_buffer_boundary(bh_map);
1299 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1300
1301 if (create) {
1302 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, &iomap, &mp);
1303 if (!ret && iomap.type == IOMAP_HOLE)
1304 ret = gfs2_iomap_alloc(inode, &iomap, &mp);
1305 release_metapath(&mp);
1306 } else {
1307 ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
1308 release_metapath(&mp);
1309 }
1310 if (ret)
1311 goto out;
1312
1313 if (iomap.length > bh_map->b_size) {
1314 iomap.length = bh_map->b_size;
1315 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1316 }
1317 if (iomap.addr != IOMAP_NULL_ADDR)
1318 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1319 bh_map->b_size = iomap.length;
1320 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1321 set_buffer_boundary(bh_map);
1322 if (iomap.flags & IOMAP_F_NEW)
1323 set_buffer_new(bh_map);
1324
1325out:
1326 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1327 return ret;
1328}
1329
1330/*
1331 * Deprecated: do not use in new code
1332 */
1333int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1334{
1335 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1336 int ret;
1337 int create = *new;
1338
1339 BUG_ON(!extlen);
1340 BUG_ON(!dblock);
1341 BUG_ON(!new);
1342
1343 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1344 ret = gfs2_block_map(inode, lblock, &bh, create);
1345 *extlen = bh.b_size >> inode->i_blkbits;
1346 *dblock = bh.b_blocknr;
1347 if (buffer_new(&bh))
1348 *new = 1;
1349 else
1350 *new = 0;
1351 return ret;
1352}
1353
1354/*
1355 * NOTE: Never call gfs2_block_zero_range with an open transaction because it
1356 * uses iomap write to perform its actions, which begin their own transactions
1357 * (iomap_begin, page_prepare, etc.)
1358 */
1359static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1360 unsigned int length)
1361{
1362 BUG_ON(current->journal_info);
1363 return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
1364}
1365
1366#define GFS2_JTRUNC_REVOKES 8192
1367
1368/**
1369 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1370 * @inode: The inode being truncated
1371 * @oldsize: The original (larger) size
1372 * @newsize: The new smaller size
1373 *
1374 * With jdata files, we have to journal a revoke for each block which is
1375 * truncated. As a result, we need to split this into separate transactions
1376 * if the number of pages being truncated gets too large.
1377 */
1378
1379static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1380{
1381 struct gfs2_sbd *sdp = GFS2_SB(inode);
1382 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1383 u64 chunk;
1384 int error;
1385
1386 while (oldsize != newsize) {
1387 struct gfs2_trans *tr;
1388 unsigned int offs;
1389
1390 chunk = oldsize - newsize;
1391 if (chunk > max_chunk)
1392 chunk = max_chunk;
1393
1394 offs = oldsize & ~PAGE_MASK;
1395 if (offs && chunk > PAGE_SIZE)
1396 chunk = offs + ((chunk - offs) & PAGE_MASK);
1397
1398 truncate_pagecache(inode, oldsize - chunk);
1399 oldsize -= chunk;
1400
1401 tr = current->journal_info;
1402 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1403 continue;
1404
1405 gfs2_trans_end(sdp);
1406 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1407 if (error)
1408 return error;
1409 }
1410
1411 return 0;
1412}
1413
1414static int trunc_start(struct inode *inode, u64 newsize)
1415{
1416 struct gfs2_inode *ip = GFS2_I(inode);
1417 struct gfs2_sbd *sdp = GFS2_SB(inode);
1418 struct buffer_head *dibh = NULL;
1419 int journaled = gfs2_is_jdata(ip);
1420 u64 oldsize = inode->i_size;
1421 int error;
1422
1423 if (!gfs2_is_stuffed(ip)) {
1424 unsigned int blocksize = i_blocksize(inode);
1425 unsigned int offs = newsize & (blocksize - 1);
1426 if (offs) {
1427 error = gfs2_block_zero_range(inode, newsize,
1428 blocksize - offs);
1429 if (error)
1430 return error;
1431 }
1432 }
1433 if (journaled)
1434 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1435 else
1436 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1437 if (error)
1438 return error;
1439
1440 error = gfs2_meta_inode_buffer(ip, &dibh);
1441 if (error)
1442 goto out;
1443
1444 gfs2_trans_add_meta(ip->i_gl, dibh);
1445
1446 if (gfs2_is_stuffed(ip))
1447 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1448 else
1449 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1450
1451 i_size_write(inode, newsize);
1452 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1453 gfs2_dinode_out(ip, dibh->b_data);
1454
1455 if (journaled)
1456 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1457 else
1458 truncate_pagecache(inode, newsize);
1459
1460out:
1461 brelse(dibh);
1462 if (current->journal_info)
1463 gfs2_trans_end(sdp);
1464 return error;
1465}
1466
1467int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1468 struct iomap *iomap)
1469{
1470 struct metapath mp = { .mp_aheight = 1, };
1471 int ret;
1472
1473 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1474 if (!ret && iomap->type == IOMAP_HOLE)
1475 ret = gfs2_iomap_alloc(inode, iomap, &mp);
1476 release_metapath(&mp);
1477 return ret;
1478}
1479
1480/**
1481 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1482 * @ip: inode
1483 * @rg_gh: holder of resource group glock
1484 * @bh: buffer head to sweep
1485 * @start: starting point in bh
1486 * @end: end point in bh
1487 * @meta: true if bh points to metadata (rather than data)
1488 * @btotal: place to keep count of total blocks freed
1489 *
1490 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1491 * free, and free them all. However, we do it one rgrp at a time. If this
1492 * block has references to multiple rgrps, we break it into individual
1493 * transactions. This allows other processes to use the rgrps while we're
1494 * focused on a single one, for better concurrency / performance.
1495 * At every transaction boundary, we rewrite the inode into the journal.
1496 * That way the bitmaps are kept consistent with the inode and we can recover
1497 * if we're interrupted by power-outages.
1498 *
1499 * Returns: 0, or return code if an error occurred.
1500 * *btotal has the total number of blocks freed
1501 */
1502static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1503 struct buffer_head *bh, __be64 *start, __be64 *end,
1504 bool meta, u32 *btotal)
1505{
1506 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1507 struct gfs2_rgrpd *rgd;
1508 struct gfs2_trans *tr;
1509 __be64 *p;
1510 int blks_outside_rgrp;
1511 u64 bn, bstart, isize_blks;
1512 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1513 int ret = 0;
1514 bool buf_in_tr = false; /* buffer was added to transaction */
1515
1516more_rgrps:
1517 rgd = NULL;
1518 if (gfs2_holder_initialized(rd_gh)) {
1519 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1520 gfs2_assert_withdraw(sdp,
1521 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1522 }
1523 blks_outside_rgrp = 0;
1524 bstart = 0;
1525 blen = 0;
1526
1527 for (p = start; p < end; p++) {
1528 if (!*p)
1529 continue;
1530 bn = be64_to_cpu(*p);
1531
1532 if (rgd) {
1533 if (!rgrp_contains_block(rgd, bn)) {
1534 blks_outside_rgrp++;
1535 continue;
1536 }
1537 } else {
1538 rgd = gfs2_blk2rgrpd(sdp, bn, true);
1539 if (unlikely(!rgd)) {
1540 ret = -EIO;
1541 goto out;
1542 }
1543 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1544 0, rd_gh);
1545 if (ret)
1546 goto out;
1547
1548 /* Must be done with the rgrp glock held: */
1549 if (gfs2_rs_active(&ip->i_res) &&
1550 rgd == ip->i_res.rs_rbm.rgd)
1551 gfs2_rs_deltree(&ip->i_res);
1552 }
1553
1554 /* The size of our transactions will be unknown until we
1555 actually process all the metadata blocks that relate to
1556 the rgrp. So we estimate. We know it can't be more than
1557 the dinode's i_blocks and we don't want to exceed the
1558 journal flush threshold, sd_log_thresh2. */
1559 if (current->journal_info == NULL) {
1560 unsigned int jblocks_rqsted, revokes;
1561
1562 jblocks_rqsted = rgd->rd_length + RES_DINODE +
1563 RES_INDIRECT;
1564 isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1565 if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1566 jblocks_rqsted +=
1567 atomic_read(&sdp->sd_log_thresh2);
1568 else
1569 jblocks_rqsted += isize_blks;
1570 revokes = jblocks_rqsted;
1571 if (meta)
1572 revokes += end - start;
1573 else if (ip->i_depth)
1574 revokes += sdp->sd_inptrs;
1575 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1576 if (ret)
1577 goto out_unlock;
1578 down_write(&ip->i_rw_mutex);
1579 }
1580 /* check if we will exceed the transaction blocks requested */
1581 tr = current->journal_info;
1582 if (tr->tr_num_buf_new + RES_STATFS +
1583 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1584 /* We set blks_outside_rgrp to ensure the loop will
1585 be repeated for the same rgrp, but with a new
1586 transaction. */
1587 blks_outside_rgrp++;
1588 /* This next part is tricky. If the buffer was added
1589 to the transaction, we've already set some block
1590 pointers to 0, so we better follow through and free
1591 them, or we will introduce corruption (so break).
1592 This may be impossible, or at least rare, but I
1593 decided to cover the case regardless.
1594
1595 If the buffer was not added to the transaction
1596 (this call), doing so would exceed our transaction
1597 size, so we need to end the transaction and start a
1598 new one (so goto). */
1599
1600 if (buf_in_tr)
1601 break;
1602 goto out_unlock;
1603 }
1604
1605 gfs2_trans_add_meta(ip->i_gl, bh);
1606 buf_in_tr = true;
1607 *p = 0;
1608 if (bstart + blen == bn) {
1609 blen++;
1610 continue;
1611 }
1612 if (bstart) {
1613 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1614 (*btotal) += blen;
1615 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1616 }
1617 bstart = bn;
1618 blen = 1;
1619 }
1620 if (bstart) {
1621 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1622 (*btotal) += blen;
1623 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1624 }
1625out_unlock:
1626 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1627 outside the rgrp we just processed,
1628 do it all over again. */
1629 if (current->journal_info) {
1630 struct buffer_head *dibh;
1631
1632 ret = gfs2_meta_inode_buffer(ip, &dibh);
1633 if (ret)
1634 goto out;
1635
1636 /* Every transaction boundary, we rewrite the dinode
1637 to keep its di_blocks current in case of failure. */
1638 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1639 current_time(&ip->i_inode);
1640 gfs2_trans_add_meta(ip->i_gl, dibh);
1641 gfs2_dinode_out(ip, dibh->b_data);
1642 brelse(dibh);
1643 up_write(&ip->i_rw_mutex);
1644 gfs2_trans_end(sdp);
1645 buf_in_tr = false;
1646 }
1647 gfs2_glock_dq_uninit(rd_gh);
1648 cond_resched();
1649 goto more_rgrps;
1650 }
1651out:
1652 return ret;
1653}
1654
1655static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1656{
1657 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1658 return false;
1659 return true;
1660}
1661
1662/**
1663 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1664 * @mp: starting metapath
1665 * @h: desired height to search
1666 *
1667 * Assumes the metapath is valid (with buffers) out to height h.
1668 * Returns: true if a non-null pointer was found in the metapath buffer
1669 * false if all remaining pointers are NULL in the buffer
1670 */
1671static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1672 unsigned int h,
1673 __u16 *end_list, unsigned int end_aligned)
1674{
1675 struct buffer_head *bh = mp->mp_bh[h];
1676 __be64 *first, *ptr, *end;
1677
1678 first = metaptr1(h, mp);
1679 ptr = first + mp->mp_list[h];
1680 end = (__be64 *)(bh->b_data + bh->b_size);
1681 if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1682 bool keep_end = h < end_aligned;
1683 end = first + end_list[h] + keep_end;
1684 }
1685
1686 while (ptr < end) {
1687 if (*ptr) { /* if we have a non-null pointer */
1688 mp->mp_list[h] = ptr - first;
1689 h++;
1690 if (h < GFS2_MAX_META_HEIGHT)
1691 mp->mp_list[h] = 0;
1692 return true;
1693 }
1694 ptr++;
1695 }
1696 return false;
1697}
1698
1699enum dealloc_states {
1700 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */
1701 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */
1702 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */
1703 DEALLOC_DONE = 3, /* process complete */
1704};
1705
1706static inline void
1707metapointer_range(struct metapath *mp, int height,
1708 __u16 *start_list, unsigned int start_aligned,
1709 __u16 *end_list, unsigned int end_aligned,
1710 __be64 **start, __be64 **end)
1711{
1712 struct buffer_head *bh = mp->mp_bh[height];
1713 __be64 *first;
1714
1715 first = metaptr1(height, mp);
1716 *start = first;
1717 if (mp_eq_to_hgt(mp, start_list, height)) {
1718 bool keep_start = height < start_aligned;
1719 *start = first + start_list[height] + keep_start;
1720 }
1721 *end = (__be64 *)(bh->b_data + bh->b_size);
1722 if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1723 bool keep_end = height < end_aligned;
1724 *end = first + end_list[height] + keep_end;
1725 }
1726}
1727
1728static inline bool walk_done(struct gfs2_sbd *sdp,
1729 struct metapath *mp, int height,
1730 __u16 *end_list, unsigned int end_aligned)
1731{
1732 __u16 end;
1733
1734 if (end_list) {
1735 bool keep_end = height < end_aligned;
1736 if (!mp_eq_to_hgt(mp, end_list, height))
1737 return false;
1738 end = end_list[height] + keep_end;
1739 } else
1740 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1741 return mp->mp_list[height] >= end;
1742}
1743
1744/**
1745 * punch_hole - deallocate blocks in a file
1746 * @ip: inode to truncate
1747 * @offset: the start of the hole
1748 * @length: the size of the hole (or 0 for truncate)
1749 *
1750 * Punch a hole into a file or truncate a file at a given position. This
1751 * function operates in whole blocks (@offset and @length are rounded
1752 * accordingly); partially filled blocks must be cleared otherwise.
1753 *
1754 * This function works from the bottom up, and from the right to the left. In
1755 * other words, it strips off the highest layer (data) before stripping any of
1756 * the metadata. Doing it this way is best in case the operation is interrupted
1757 * by power failure, etc. The dinode is rewritten in every transaction to
1758 * guarantee integrity.
1759 */
1760static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1761{
1762 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1763 u64 maxsize = sdp->sd_heightsize[ip->i_height];
1764 struct metapath mp = {};
1765 struct buffer_head *dibh, *bh;
1766 struct gfs2_holder rd_gh;
1767 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1768 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1769 __u16 start_list[GFS2_MAX_META_HEIGHT];
1770 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1771 unsigned int start_aligned, end_aligned;
1772 unsigned int strip_h = ip->i_height - 1;
1773 u32 btotal = 0;
1774 int ret, state;
1775 int mp_h; /* metapath buffers are read in to this height */
1776 u64 prev_bnr = 0;
1777 __be64 *start, *end;
1778
1779 if (offset >= maxsize) {
1780 /*
1781 * The starting point lies beyond the allocated meta-data;
1782 * there are no blocks do deallocate.
1783 */
1784 return 0;
1785 }
1786
1787 /*
1788 * The start position of the hole is defined by lblock, start_list, and
1789 * start_aligned. The end position of the hole is defined by lend,
1790 * end_list, and end_aligned.
1791 *
1792 * start_aligned and end_aligned define down to which height the start
1793 * and end positions are aligned to the metadata tree (i.e., the
1794 * position is a multiple of the metadata granularity at the height
1795 * above). This determines at which heights additional meta pointers
1796 * needs to be preserved for the remaining data.
1797 */
1798
1799 if (length) {
1800 u64 end_offset = offset + length;
1801 u64 lend;
1802
1803 /*
1804 * Clip the end at the maximum file size for the given height:
1805 * that's how far the metadata goes; files bigger than that
1806 * will have additional layers of indirection.
1807 */
1808 if (end_offset > maxsize)
1809 end_offset = maxsize;
1810 lend = end_offset >> bsize_shift;
1811
1812 if (lblock >= lend)
1813 return 0;
1814
1815 find_metapath(sdp, lend, &mp, ip->i_height);
1816 end_list = __end_list;
1817 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1818
1819 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1820 if (end_list[mp_h])
1821 break;
1822 }
1823 end_aligned = mp_h;
1824 }
1825
1826 find_metapath(sdp, lblock, &mp, ip->i_height);
1827 memcpy(start_list, mp.mp_list, sizeof(start_list));
1828
1829 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1830 if (start_list[mp_h])
1831 break;
1832 }
1833 start_aligned = mp_h;
1834
1835 ret = gfs2_meta_inode_buffer(ip, &dibh);
1836 if (ret)
1837 return ret;
1838
1839 mp.mp_bh[0] = dibh;
1840 ret = lookup_metapath(ip, &mp);
1841 if (ret)
1842 goto out_metapath;
1843
1844 /* issue read-ahead on metadata */
1845 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1846 metapointer_range(&mp, mp_h, start_list, start_aligned,
1847 end_list, end_aligned, &start, &end);
1848 gfs2_metapath_ra(ip->i_gl, start, end);
1849 }
1850
1851 if (mp.mp_aheight == ip->i_height)
1852 state = DEALLOC_MP_FULL; /* We have a complete metapath */
1853 else
1854 state = DEALLOC_FILL_MP; /* deal with partial metapath */
1855
1856 ret = gfs2_rindex_update(sdp);
1857 if (ret)
1858 goto out_metapath;
1859
1860 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1861 if (ret)
1862 goto out_metapath;
1863 gfs2_holder_mark_uninitialized(&rd_gh);
1864
1865 mp_h = strip_h;
1866
1867 while (state != DEALLOC_DONE) {
1868 switch (state) {
1869 /* Truncate a full metapath at the given strip height.
1870 * Note that strip_h == mp_h in order to be in this state. */
1871 case DEALLOC_MP_FULL:
1872 bh = mp.mp_bh[mp_h];
1873 gfs2_assert_withdraw(sdp, bh);
1874 if (gfs2_assert_withdraw(sdp,
1875 prev_bnr != bh->b_blocknr)) {
1876 fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1877 "s_h:%u, mp_h:%u\n",
1878 (unsigned long long)ip->i_no_addr,
1879 prev_bnr, ip->i_height, strip_h, mp_h);
1880 }
1881 prev_bnr = bh->b_blocknr;
1882
1883 if (gfs2_metatype_check(sdp, bh,
1884 (mp_h ? GFS2_METATYPE_IN :
1885 GFS2_METATYPE_DI))) {
1886 ret = -EIO;
1887 goto out;
1888 }
1889
1890 /*
1891 * Below, passing end_aligned as 0 gives us the
1892 * metapointer range excluding the end point: the end
1893 * point is the first metapath we must not deallocate!
1894 */
1895
1896 metapointer_range(&mp, mp_h, start_list, start_aligned,
1897 end_list, 0 /* end_aligned */,
1898 &start, &end);
1899 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1900 start, end,
1901 mp_h != ip->i_height - 1,
1902 &btotal);
1903
1904 /* If we hit an error or just swept dinode buffer,
1905 just exit. */
1906 if (ret || !mp_h) {
1907 state = DEALLOC_DONE;
1908 break;
1909 }
1910 state = DEALLOC_MP_LOWER;
1911 break;
1912
1913 /* lower the metapath strip height */
1914 case DEALLOC_MP_LOWER:
1915 /* We're done with the current buffer, so release it,
1916 unless it's the dinode buffer. Then back up to the
1917 previous pointer. */
1918 if (mp_h) {
1919 brelse(mp.mp_bh[mp_h]);
1920 mp.mp_bh[mp_h] = NULL;
1921 }
1922 /* If we can't get any lower in height, we've stripped
1923 off all we can. Next step is to back up and start
1924 stripping the previous level of metadata. */
1925 if (mp_h == 0) {
1926 strip_h--;
1927 memcpy(mp.mp_list, start_list, sizeof(start_list));
1928 mp_h = strip_h;
1929 state = DEALLOC_FILL_MP;
1930 break;
1931 }
1932 mp.mp_list[mp_h] = 0;
1933 mp_h--; /* search one metadata height down */
1934 mp.mp_list[mp_h]++;
1935 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1936 break;
1937 /* Here we've found a part of the metapath that is not
1938 * allocated. We need to search at that height for the
1939 * next non-null pointer. */
1940 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1941 state = DEALLOC_FILL_MP;
1942 mp_h++;
1943 }
1944 /* No more non-null pointers at this height. Back up
1945 to the previous height and try again. */
1946 break; /* loop around in the same state */
1947
1948 /* Fill the metapath with buffers to the given height. */
1949 case DEALLOC_FILL_MP:
1950 /* Fill the buffers out to the current height. */
1951 ret = fillup_metapath(ip, &mp, mp_h);
1952 if (ret < 0)
1953 goto out;
1954
1955 /* On the first pass, issue read-ahead on metadata. */
1956 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1957 unsigned int height = mp.mp_aheight - 1;
1958
1959 /* No read-ahead for data blocks. */
1960 if (mp.mp_aheight - 1 == strip_h)
1961 height--;
1962
1963 for (; height >= mp.mp_aheight - ret; height--) {
1964 metapointer_range(&mp, height,
1965 start_list, start_aligned,
1966 end_list, end_aligned,
1967 &start, &end);
1968 gfs2_metapath_ra(ip->i_gl, start, end);
1969 }
1970 }
1971
1972 /* If buffers found for the entire strip height */
1973 if (mp.mp_aheight - 1 == strip_h) {
1974 state = DEALLOC_MP_FULL;
1975 break;
1976 }
1977 if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1978 mp_h = mp.mp_aheight - 1;
1979
1980 /* If we find a non-null block pointer, crawl a bit
1981 higher up in the metapath and try again, otherwise
1982 we need to look lower for a new starting point. */
1983 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1984 mp_h++;
1985 else
1986 state = DEALLOC_MP_LOWER;
1987 break;
1988 }
1989 }
1990
1991 if (btotal) {
1992 if (current->journal_info == NULL) {
1993 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1994 RES_QUOTA, 0);
1995 if (ret)
1996 goto out;
1997 down_write(&ip->i_rw_mutex);
1998 }
1999 gfs2_statfs_change(sdp, 0, +btotal, 0);
2000 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
2001 ip->i_inode.i_gid);
2002 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2003 gfs2_trans_add_meta(ip->i_gl, dibh);
2004 gfs2_dinode_out(ip, dibh->b_data);
2005 up_write(&ip->i_rw_mutex);
2006 gfs2_trans_end(sdp);
2007 }
2008
2009out:
2010 if (gfs2_holder_initialized(&rd_gh))
2011 gfs2_glock_dq_uninit(&rd_gh);
2012 if (current->journal_info) {
2013 up_write(&ip->i_rw_mutex);
2014 gfs2_trans_end(sdp);
2015 cond_resched();
2016 }
2017 gfs2_quota_unhold(ip);
2018out_metapath:
2019 release_metapath(&mp);
2020 return ret;
2021}
2022
2023static int trunc_end(struct gfs2_inode *ip)
2024{
2025 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2026 struct buffer_head *dibh;
2027 int error;
2028
2029 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2030 if (error)
2031 return error;
2032
2033 down_write(&ip->i_rw_mutex);
2034
2035 error = gfs2_meta_inode_buffer(ip, &dibh);
2036 if (error)
2037 goto out;
2038
2039 if (!i_size_read(&ip->i_inode)) {
2040 ip->i_height = 0;
2041 ip->i_goal = ip->i_no_addr;
2042 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2043 gfs2_ordered_del_inode(ip);
2044 }
2045 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2046 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2047
2048 gfs2_trans_add_meta(ip->i_gl, dibh);
2049 gfs2_dinode_out(ip, dibh->b_data);
2050 brelse(dibh);
2051
2052out:
2053 up_write(&ip->i_rw_mutex);
2054 gfs2_trans_end(sdp);
2055 return error;
2056}
2057
2058/**
2059 * do_shrink - make a file smaller
2060 * @inode: the inode
2061 * @newsize: the size to make the file
2062 *
2063 * Called with an exclusive lock on @inode. The @size must
2064 * be equal to or smaller than the current inode size.
2065 *
2066 * Returns: errno
2067 */
2068
2069static int do_shrink(struct inode *inode, u64 newsize)
2070{
2071 struct gfs2_inode *ip = GFS2_I(inode);
2072 int error;
2073
2074 error = trunc_start(inode, newsize);
2075 if (error < 0)
2076 return error;
2077 if (gfs2_is_stuffed(ip))
2078 return 0;
2079
2080 error = punch_hole(ip, newsize, 0);
2081 if (error == 0)
2082 error = trunc_end(ip);
2083
2084 return error;
2085}
2086
2087void gfs2_trim_blocks(struct inode *inode)
2088{
2089 int ret;
2090
2091 ret = do_shrink(inode, inode->i_size);
2092 WARN_ON(ret != 0);
2093}
2094
2095/**
2096 * do_grow - Touch and update inode size
2097 * @inode: The inode
2098 * @size: The new size
2099 *
2100 * This function updates the timestamps on the inode and
2101 * may also increase the size of the inode. This function
2102 * must not be called with @size any smaller than the current
2103 * inode size.
2104 *
2105 * Although it is not strictly required to unstuff files here,
2106 * earlier versions of GFS2 have a bug in the stuffed file reading
2107 * code which will result in a buffer overrun if the size is larger
2108 * than the max stuffed file size. In order to prevent this from
2109 * occurring, such files are unstuffed, but in other cases we can
2110 * just update the inode size directly.
2111 *
2112 * Returns: 0 on success, or -ve on error
2113 */
2114
2115static int do_grow(struct inode *inode, u64 size)
2116{
2117 struct gfs2_inode *ip = GFS2_I(inode);
2118 struct gfs2_sbd *sdp = GFS2_SB(inode);
2119 struct gfs2_alloc_parms ap = { .target = 1, };
2120 struct buffer_head *dibh;
2121 int error;
2122 int unstuff = 0;
2123
2124 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2125 error = gfs2_quota_lock_check(ip, &ap);
2126 if (error)
2127 return error;
2128
2129 error = gfs2_inplace_reserve(ip, &ap);
2130 if (error)
2131 goto do_grow_qunlock;
2132 unstuff = 1;
2133 }
2134
2135 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2136 (unstuff &&
2137 gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2138 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2139 0 : RES_QUOTA), 0);
2140 if (error)
2141 goto do_grow_release;
2142
2143 if (unstuff) {
2144 error = gfs2_unstuff_dinode(ip, NULL);
2145 if (error)
2146 goto do_end_trans;
2147 }
2148
2149 error = gfs2_meta_inode_buffer(ip, &dibh);
2150 if (error)
2151 goto do_end_trans;
2152
2153 truncate_setsize(inode, size);
2154 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2155 gfs2_trans_add_meta(ip->i_gl, dibh);
2156 gfs2_dinode_out(ip, dibh->b_data);
2157 brelse(dibh);
2158
2159do_end_trans:
2160 gfs2_trans_end(sdp);
2161do_grow_release:
2162 if (unstuff) {
2163 gfs2_inplace_release(ip);
2164do_grow_qunlock:
2165 gfs2_quota_unlock(ip);
2166 }
2167 return error;
2168}
2169
2170/**
2171 * gfs2_setattr_size - make a file a given size
2172 * @inode: the inode
2173 * @newsize: the size to make the file
2174 *
2175 * The file size can grow, shrink, or stay the same size. This
2176 * is called holding i_rwsem and an exclusive glock on the inode
2177 * in question.
2178 *
2179 * Returns: errno
2180 */
2181
2182int gfs2_setattr_size(struct inode *inode, u64 newsize)
2183{
2184 struct gfs2_inode *ip = GFS2_I(inode);
2185 int ret;
2186
2187 BUG_ON(!S_ISREG(inode->i_mode));
2188
2189 ret = inode_newsize_ok(inode, newsize);
2190 if (ret)
2191 return ret;
2192
2193 inode_dio_wait(inode);
2194
2195 ret = gfs2_qa_get(ip);
2196 if (ret)
2197 goto out;
2198
2199 if (newsize >= inode->i_size) {
2200 ret = do_grow(inode, newsize);
2201 goto out;
2202 }
2203
2204 ret = do_shrink(inode, newsize);
2205out:
2206 gfs2_rs_delete(ip, NULL);
2207 gfs2_qa_put(ip);
2208 return ret;
2209}
2210
2211int gfs2_truncatei_resume(struct gfs2_inode *ip)
2212{
2213 int error;
2214 error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2215 if (!error)
2216 error = trunc_end(ip);
2217 return error;
2218}
2219
2220int gfs2_file_dealloc(struct gfs2_inode *ip)
2221{
2222 return punch_hole(ip, 0, 0);
2223}
2224
2225/**
2226 * gfs2_free_journal_extents - Free cached journal bmap info
2227 * @jd: The journal
2228 *
2229 */
2230
2231void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2232{
2233 struct gfs2_journal_extent *jext;
2234
2235 while(!list_empty(&jd->extent_list)) {
2236 jext = list_first_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2237 list_del(&jext->list);
2238 kfree(jext);
2239 }
2240}
2241
2242/**
2243 * gfs2_add_jextent - Add or merge a new extent to extent cache
2244 * @jd: The journal descriptor
2245 * @lblock: The logical block at start of new extent
2246 * @dblock: The physical block at start of new extent
2247 * @blocks: Size of extent in fs blocks
2248 *
2249 * Returns: 0 on success or -ENOMEM
2250 */
2251
2252static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2253{
2254 struct gfs2_journal_extent *jext;
2255
2256 if (!list_empty(&jd->extent_list)) {
2257 jext = list_last_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2258 if ((jext->dblock + jext->blocks) == dblock) {
2259 jext->blocks += blocks;
2260 return 0;
2261 }
2262 }
2263
2264 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2265 if (jext == NULL)
2266 return -ENOMEM;
2267 jext->dblock = dblock;
2268 jext->lblock = lblock;
2269 jext->blocks = blocks;
2270 list_add_tail(&jext->list, &jd->extent_list);
2271 jd->nr_extents++;
2272 return 0;
2273}
2274
2275/**
2276 * gfs2_map_journal_extents - Cache journal bmap info
2277 * @sdp: The super block
2278 * @jd: The journal to map
2279 *
2280 * Create a reusable "extent" mapping from all logical
2281 * blocks to all physical blocks for the given journal. This will save
2282 * us time when writing journal blocks. Most journals will have only one
2283 * extent that maps all their logical blocks. That's because gfs2.mkfs
2284 * arranges the journal blocks sequentially to maximize performance.
2285 * So the extent would map the first block for the entire file length.
2286 * However, gfs2_jadd can happen while file activity is happening, so
2287 * those journals may not be sequential. Less likely is the case where
2288 * the users created their own journals by mounting the metafs and
2289 * laying it out. But it's still possible. These journals might have
2290 * several extents.
2291 *
2292 * Returns: 0 on success, or error on failure
2293 */
2294
2295int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2296{
2297 u64 lblock = 0;
2298 u64 lblock_stop;
2299 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2300 struct buffer_head bh;
2301 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2302 u64 size;
2303 int rc;
2304 ktime_t start, end;
2305
2306 start = ktime_get();
2307 lblock_stop = i_size_read(jd->jd_inode) >> shift;
2308 size = (lblock_stop - lblock) << shift;
2309 jd->nr_extents = 0;
2310 WARN_ON(!list_empty(&jd->extent_list));
2311
2312 do {
2313 bh.b_state = 0;
2314 bh.b_blocknr = 0;
2315 bh.b_size = size;
2316 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2317 if (rc || !buffer_mapped(&bh))
2318 goto fail;
2319 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2320 if (rc)
2321 goto fail;
2322 size -= bh.b_size;
2323 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2324 } while(size > 0);
2325
2326 end = ktime_get();
2327 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2328 jd->nr_extents, ktime_ms_delta(end, start));
2329 return 0;
2330
2331fail:
2332 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2333 rc, jd->jd_jid,
2334 (unsigned long long)(i_size_read(jd->jd_inode) - size),
2335 jd->nr_extents);
2336 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2337 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2338 bh.b_state, (unsigned long long)bh.b_size);
2339 gfs2_free_journal_extents(jd);
2340 return rc;
2341}
2342
2343/**
2344 * gfs2_write_alloc_required - figure out if a write will require an allocation
2345 * @ip: the file being written to
2346 * @offset: the offset to write to
2347 * @len: the number of bytes being written
2348 *
2349 * Returns: 1 if an alloc is required, 0 otherwise
2350 */
2351
2352int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2353 unsigned int len)
2354{
2355 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2356 struct buffer_head bh;
2357 unsigned int shift;
2358 u64 lblock, lblock_stop, size;
2359 u64 end_of_file;
2360
2361 if (!len)
2362 return 0;
2363
2364 if (gfs2_is_stuffed(ip)) {
2365 if (offset + len > gfs2_max_stuffed_size(ip))
2366 return 1;
2367 return 0;
2368 }
2369
2370 shift = sdp->sd_sb.sb_bsize_shift;
2371 BUG_ON(gfs2_is_dir(ip));
2372 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2373 lblock = offset >> shift;
2374 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2375 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2376 return 1;
2377
2378 size = (lblock_stop - lblock) << shift;
2379 do {
2380 bh.b_state = 0;
2381 bh.b_size = size;
2382 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2383 if (!buffer_mapped(&bh))
2384 return 1;
2385 size -= bh.b_size;
2386 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2387 } while(size > 0);
2388
2389 return 0;
2390}
2391
2392static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2393{
2394 struct gfs2_inode *ip = GFS2_I(inode);
2395 struct buffer_head *dibh;
2396 int error;
2397
2398 if (offset >= inode->i_size)
2399 return 0;
2400 if (offset + length > inode->i_size)
2401 length = inode->i_size - offset;
2402
2403 error = gfs2_meta_inode_buffer(ip, &dibh);
2404 if (error)
2405 return error;
2406 gfs2_trans_add_meta(ip->i_gl, dibh);
2407 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2408 length);
2409 brelse(dibh);
2410 return 0;
2411}
2412
2413static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2414 loff_t length)
2415{
2416 struct gfs2_sbd *sdp = GFS2_SB(inode);
2417 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2418 int error;
2419
2420 while (length) {
2421 struct gfs2_trans *tr;
2422 loff_t chunk;
2423 unsigned int offs;
2424
2425 chunk = length;
2426 if (chunk > max_chunk)
2427 chunk = max_chunk;
2428
2429 offs = offset & ~PAGE_MASK;
2430 if (offs && chunk > PAGE_SIZE)
2431 chunk = offs + ((chunk - offs) & PAGE_MASK);
2432
2433 truncate_pagecache_range(inode, offset, chunk);
2434 offset += chunk;
2435 length -= chunk;
2436
2437 tr = current->journal_info;
2438 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2439 continue;
2440
2441 gfs2_trans_end(sdp);
2442 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2443 if (error)
2444 return error;
2445 }
2446 return 0;
2447}
2448
2449int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2450{
2451 struct inode *inode = file_inode(file);
2452 struct gfs2_inode *ip = GFS2_I(inode);
2453 struct gfs2_sbd *sdp = GFS2_SB(inode);
2454 unsigned int blocksize = i_blocksize(inode);
2455 loff_t start, end;
2456 int error;
2457
2458 if (!gfs2_is_stuffed(ip)) {
2459 unsigned int start_off, end_len;
2460
2461 start_off = offset & (blocksize - 1);
2462 end_len = (offset + length) & (blocksize - 1);
2463 if (start_off) {
2464 unsigned int len = length;
2465 if (length > blocksize - start_off)
2466 len = blocksize - start_off;
2467 error = gfs2_block_zero_range(inode, offset, len);
2468 if (error)
2469 goto out;
2470 if (start_off + length < blocksize)
2471 end_len = 0;
2472 }
2473 if (end_len) {
2474 error = gfs2_block_zero_range(inode,
2475 offset + length - end_len, end_len);
2476 if (error)
2477 goto out;
2478 }
2479 }
2480
2481 start = round_down(offset, blocksize);
2482 end = round_up(offset + length, blocksize) - 1;
2483 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
2484 if (error)
2485 return error;
2486
2487 if (gfs2_is_jdata(ip))
2488 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2489 GFS2_JTRUNC_REVOKES);
2490 else
2491 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2492 if (error)
2493 return error;
2494
2495 if (gfs2_is_stuffed(ip)) {
2496 error = stuffed_zero_range(inode, offset, length);
2497 if (error)
2498 goto out;
2499 }
2500
2501 if (gfs2_is_jdata(ip)) {
2502 BUG_ON(!current->journal_info);
2503 gfs2_journaled_truncate_range(inode, offset, length);
2504 } else
2505 truncate_pagecache_range(inode, offset, offset + length - 1);
2506
2507 file_update_time(file);
2508 mark_inode_dirty(inode);
2509
2510 if (current->journal_info)
2511 gfs2_trans_end(sdp);
2512
2513 if (!gfs2_is_stuffed(ip))
2514 error = punch_hole(ip, offset, length);
2515
2516out:
2517 if (current->journal_info)
2518 gfs2_trans_end(sdp);
2519 return error;
2520}