Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mm.h>
16#include <linux/pagemap.h>
17#include <linux/writeback.h>
18#include <linux/swap.h>
19#include <linux/delay.h>
20#include <linux/bio.h>
21#include <linux/gfs2_ondisk.h>
22
23#include "gfs2.h"
24#include "incore.h"
25#include "glock.h"
26#include "glops.h"
27#include "inode.h"
28#include "log.h"
29#include "lops.h"
30#include "meta_io.h"
31#include "rgrp.h"
32#include "trans.h"
33#include "util.h"
34#include "trace_gfs2.h"
35
36static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
37{
38 struct buffer_head *bh, *head;
39 int nr_underway = 0;
40 int write_op = REQ_META | REQ_PRIO |
41 (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
42
43 BUG_ON(!PageLocked(page));
44 BUG_ON(!page_has_buffers(page));
45
46 head = page_buffers(page);
47 bh = head;
48
49 do {
50 if (!buffer_mapped(bh))
51 continue;
52 /*
53 * If it's a fully non-blocking write attempt and we cannot
54 * lock the buffer then redirty the page. Note that this can
55 * potentially cause a busy-wait loop from pdflush and kswapd
56 * activity, but those code paths have their own higher-level
57 * throttling.
58 */
59 if (wbc->sync_mode != WB_SYNC_NONE) {
60 lock_buffer(bh);
61 } else if (!trylock_buffer(bh)) {
62 redirty_page_for_writepage(wbc, page);
63 continue;
64 }
65 if (test_clear_buffer_dirty(bh)) {
66 mark_buffer_async_write(bh);
67 } else {
68 unlock_buffer(bh);
69 }
70 } while ((bh = bh->b_this_page) != head);
71
72 /*
73 * The page and its buffers are protected by PageWriteback(), so we can
74 * drop the bh refcounts early.
75 */
76 BUG_ON(PageWriteback(page));
77 set_page_writeback(page);
78
79 do {
80 struct buffer_head *next = bh->b_this_page;
81 if (buffer_async_write(bh)) {
82 submit_bh(write_op, bh);
83 nr_underway++;
84 }
85 bh = next;
86 } while (bh != head);
87 unlock_page(page);
88
89 if (nr_underway == 0)
90 end_page_writeback(page);
91
92 return 0;
93}
94
95const struct address_space_operations gfs2_meta_aops = {
96 .writepage = gfs2_aspace_writepage,
97 .releasepage = gfs2_releasepage,
98};
99
100/**
101 * gfs2_meta_sync - Sync all buffers associated with a glock
102 * @gl: The glock
103 *
104 */
105
106void gfs2_meta_sync(struct gfs2_glock *gl)
107{
108 struct address_space *mapping = gfs2_glock2aspace(gl);
109 int error;
110
111 filemap_fdatawrite(mapping);
112 error = filemap_fdatawait(mapping);
113
114 if (error)
115 gfs2_io_error(gl->gl_sbd);
116}
117
118/**
119 * gfs2_getbuf - Get a buffer with a given address space
120 * @gl: the glock
121 * @blkno: the block number (filesystem scope)
122 * @create: 1 if the buffer should be created
123 *
124 * Returns: the buffer
125 */
126
127struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
128{
129 struct address_space *mapping = gfs2_glock2aspace(gl);
130 struct gfs2_sbd *sdp = gl->gl_sbd;
131 struct page *page;
132 struct buffer_head *bh;
133 unsigned int shift;
134 unsigned long index;
135 unsigned int bufnum;
136
137 shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
138 index = blkno >> shift; /* convert block to page */
139 bufnum = blkno - (index << shift); /* block buf index within page */
140
141 if (create) {
142 for (;;) {
143 page = grab_cache_page(mapping, index);
144 if (page)
145 break;
146 yield();
147 }
148 } else {
149 page = find_lock_page(mapping, index);
150 if (!page)
151 return NULL;
152 }
153
154 if (!page_has_buffers(page))
155 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
156
157 /* Locate header for our buffer within our page */
158 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
159 /* Do nothing */;
160 get_bh(bh);
161
162 if (!buffer_mapped(bh))
163 map_bh(bh, sdp->sd_vfs, blkno);
164
165 unlock_page(page);
166 mark_page_accessed(page);
167 page_cache_release(page);
168
169 return bh;
170}
171
172static void meta_prep_new(struct buffer_head *bh)
173{
174 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
175
176 lock_buffer(bh);
177 clear_buffer_dirty(bh);
178 set_buffer_uptodate(bh);
179 unlock_buffer(bh);
180
181 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
182}
183
184/**
185 * gfs2_meta_new - Get a block
186 * @gl: The glock associated with this block
187 * @blkno: The block number
188 *
189 * Returns: The buffer
190 */
191
192struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
193{
194 struct buffer_head *bh;
195 bh = gfs2_getbuf(gl, blkno, CREATE);
196 meta_prep_new(bh);
197 return bh;
198}
199
200/**
201 * gfs2_meta_read - Read a block from disk
202 * @gl: The glock covering the block
203 * @blkno: The block number
204 * @flags: flags
205 * @bhp: the place where the buffer is returned (NULL on failure)
206 *
207 * Returns: errno
208 */
209
210int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
211 struct buffer_head **bhp)
212{
213 struct gfs2_sbd *sdp = gl->gl_sbd;
214 struct buffer_head *bh;
215
216 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
217 return -EIO;
218
219 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
220
221 lock_buffer(bh);
222 if (buffer_uptodate(bh)) {
223 unlock_buffer(bh);
224 return 0;
225 }
226 bh->b_end_io = end_buffer_read_sync;
227 get_bh(bh);
228 submit_bh(READ_SYNC | REQ_META | REQ_PRIO, bh);
229 if (!(flags & DIO_WAIT))
230 return 0;
231
232 wait_on_buffer(bh);
233 if (unlikely(!buffer_uptodate(bh))) {
234 struct gfs2_trans *tr = current->journal_info;
235 if (tr && tr->tr_touched)
236 gfs2_io_error_bh(sdp, bh);
237 brelse(bh);
238 return -EIO;
239 }
240
241 return 0;
242}
243
244/**
245 * gfs2_meta_wait - Reread a block from disk
246 * @sdp: the filesystem
247 * @bh: The block to wait for
248 *
249 * Returns: errno
250 */
251
252int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
253{
254 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
255 return -EIO;
256
257 wait_on_buffer(bh);
258
259 if (!buffer_uptodate(bh)) {
260 struct gfs2_trans *tr = current->journal_info;
261 if (tr && tr->tr_touched)
262 gfs2_io_error_bh(sdp, bh);
263 return -EIO;
264 }
265 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
266 return -EIO;
267
268 return 0;
269}
270
271/**
272 * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
273 * @gl: the glock the buffer belongs to
274 * @bh: The buffer to be attached to
275 * @meta: Flag to indicate whether its metadata or not
276 */
277
278void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
279 int meta)
280{
281 struct gfs2_bufdata *bd;
282
283 if (meta)
284 lock_page(bh->b_page);
285
286 if (bh->b_private) {
287 if (meta)
288 unlock_page(bh->b_page);
289 return;
290 }
291
292 bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
293 bd->bd_bh = bh;
294 bd->bd_gl = gl;
295
296 INIT_LIST_HEAD(&bd->bd_list_tr);
297 if (meta)
298 lops_init_le(&bd->bd_le, &gfs2_buf_lops);
299 else
300 lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
301 bh->b_private = bd;
302
303 if (meta)
304 unlock_page(bh->b_page);
305}
306
307void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
308{
309 struct address_space *mapping = bh->b_page->mapping;
310 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
311 struct gfs2_bufdata *bd = bh->b_private;
312
313 if (test_clear_buffer_pinned(bh)) {
314 trace_gfs2_pin(bd, 0);
315 atomic_dec(&sdp->sd_log_pinned);
316 list_del_init(&bd->bd_le.le_list);
317 if (meta) {
318 gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
319 sdp->sd_log_num_buf--;
320 tr->tr_num_buf_rm++;
321 } else {
322 gfs2_assert_warn(sdp, sdp->sd_log_num_databuf);
323 sdp->sd_log_num_databuf--;
324 tr->tr_num_databuf_rm++;
325 }
326 tr->tr_touched = 1;
327 brelse(bh);
328 }
329 if (bd) {
330 spin_lock(&sdp->sd_ail_lock);
331 if (bd->bd_ail) {
332 gfs2_remove_from_ail(bd);
333 bh->b_private = NULL;
334 bd->bd_bh = NULL;
335 bd->bd_blkno = bh->b_blocknr;
336 gfs2_trans_add_revoke(sdp, bd);
337 }
338 spin_unlock(&sdp->sd_ail_lock);
339 }
340 clear_buffer_dirty(bh);
341 clear_buffer_uptodate(bh);
342}
343
344/**
345 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
346 * @ip: the inode who owns the buffers
347 * @bstart: the first buffer in the run
348 * @blen: the number of buffers in the run
349 *
350 */
351
352void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
353{
354 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
355 struct buffer_head *bh;
356
357 while (blen) {
358 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
359 if (bh) {
360 lock_buffer(bh);
361 gfs2_log_lock(sdp);
362 gfs2_remove_from_journal(bh, current->journal_info, 1);
363 gfs2_log_unlock(sdp);
364 unlock_buffer(bh);
365 brelse(bh);
366 }
367
368 bstart++;
369 blen--;
370 }
371}
372
373/**
374 * gfs2_meta_indirect_buffer - Get a metadata buffer
375 * @ip: The GFS2 inode
376 * @height: The level of this buf in the metadata (indir addr) tree (if any)
377 * @num: The block number (device relative) of the buffer
378 * @new: Non-zero if we may create a new buffer
379 * @bhp: the buffer is returned here
380 *
381 * Returns: errno
382 */
383
384int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
385 int new, struct buffer_head **bhp)
386{
387 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
388 struct gfs2_glock *gl = ip->i_gl;
389 struct buffer_head *bh;
390 int ret = 0;
391
392 if (new) {
393 BUG_ON(height == 0);
394 bh = gfs2_meta_new(gl, num);
395 gfs2_trans_add_bh(ip->i_gl, bh, 1);
396 gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
397 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
398 } else {
399 u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
400 ret = gfs2_meta_read(gl, num, DIO_WAIT, &bh);
401 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
402 brelse(bh);
403 ret = -EIO;
404 }
405 }
406 *bhp = bh;
407 return ret;
408}
409
410/**
411 * gfs2_meta_ra - start readahead on an extent of a file
412 * @gl: the glock the blocks belong to
413 * @dblock: the starting disk block
414 * @extlen: the number of blocks in the extent
415 *
416 * returns: the first buffer in the extent
417 */
418
419struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
420{
421 struct gfs2_sbd *sdp = gl->gl_sbd;
422 struct buffer_head *first_bh, *bh;
423 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
424 sdp->sd_sb.sb_bsize_shift;
425
426 BUG_ON(!extlen);
427
428 if (max_ra < 1)
429 max_ra = 1;
430 if (extlen > max_ra)
431 extlen = max_ra;
432
433 first_bh = gfs2_getbuf(gl, dblock, CREATE);
434
435 if (buffer_uptodate(first_bh))
436 goto out;
437 if (!buffer_locked(first_bh))
438 ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
439
440 dblock++;
441 extlen--;
442
443 while (extlen) {
444 bh = gfs2_getbuf(gl, dblock, CREATE);
445
446 if (!buffer_uptodate(bh) && !buffer_locked(bh))
447 ll_rw_block(READA, 1, &bh);
448 brelse(bh);
449 dblock++;
450 extlen--;
451 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
452 goto out;
453 }
454
455 wait_on_buffer(first_bh);
456out:
457 return first_bh;
458}
459
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/mm.h>
13#include <linux/pagemap.h>
14#include <linux/writeback.h>
15#include <linux/swap.h>
16#include <linux/delay.h>
17#include <linux/bio.h>
18#include <linux/gfs2_ondisk.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "glock.h"
23#include "glops.h"
24#include "inode.h"
25#include "log.h"
26#include "lops.h"
27#include "meta_io.h"
28#include "rgrp.h"
29#include "trans.h"
30#include "util.h"
31#include "trace_gfs2.h"
32
33static void gfs2_aspace_write_folio(struct folio *folio,
34 struct writeback_control *wbc)
35{
36 struct buffer_head *bh, *head;
37 int nr_underway = 0;
38 blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
39
40 BUG_ON(!folio_test_locked(folio));
41
42 head = folio_buffers(folio);
43 bh = head;
44
45 do {
46 if (!buffer_mapped(bh))
47 continue;
48 /*
49 * If it's a fully non-blocking write attempt and we cannot
50 * lock the buffer then redirty the page. Note that this can
51 * potentially cause a busy-wait loop from flusher thread and kswapd
52 * activity, but those code paths have their own higher-level
53 * throttling.
54 */
55 if (wbc->sync_mode != WB_SYNC_NONE) {
56 lock_buffer(bh);
57 } else if (!trylock_buffer(bh)) {
58 folio_redirty_for_writepage(wbc, folio);
59 continue;
60 }
61 if (test_clear_buffer_dirty(bh)) {
62 mark_buffer_async_write(bh);
63 } else {
64 unlock_buffer(bh);
65 }
66 } while ((bh = bh->b_this_page) != head);
67
68 /*
69 * The folio and its buffers are protected from truncation by
70 * the writeback flag, so we can drop the bh refcounts early.
71 */
72 BUG_ON(folio_test_writeback(folio));
73 folio_start_writeback(folio);
74
75 do {
76 struct buffer_head *next = bh->b_this_page;
77 if (buffer_async_write(bh)) {
78 submit_bh(REQ_OP_WRITE | write_flags, bh);
79 nr_underway++;
80 }
81 bh = next;
82 } while (bh != head);
83 folio_unlock(folio);
84
85 if (nr_underway == 0)
86 folio_end_writeback(folio);
87}
88
89static int gfs2_aspace_writepages(struct address_space *mapping,
90 struct writeback_control *wbc)
91{
92 struct folio *folio = NULL;
93 int error;
94
95 while ((folio = writeback_iter(mapping, wbc, folio, &error)))
96 gfs2_aspace_write_folio(folio, wbc);
97
98 return error;
99}
100
101const struct address_space_operations gfs2_meta_aops = {
102 .dirty_folio = block_dirty_folio,
103 .invalidate_folio = block_invalidate_folio,
104 .writepages = gfs2_aspace_writepages,
105 .release_folio = gfs2_release_folio,
106};
107
108const struct address_space_operations gfs2_rgrp_aops = {
109 .dirty_folio = block_dirty_folio,
110 .invalidate_folio = block_invalidate_folio,
111 .writepages = gfs2_aspace_writepages,
112 .release_folio = gfs2_release_folio,
113};
114
115/**
116 * gfs2_getbuf - Get a buffer with a given address space
117 * @gl: the glock
118 * @blkno: the block number (filesystem scope)
119 * @create: 1 if the buffer should be created
120 *
121 * Returns: the buffer
122 */
123
124struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
125{
126 struct address_space *mapping = gfs2_glock2aspace(gl);
127 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
128 struct folio *folio;
129 struct buffer_head *bh;
130 unsigned int shift;
131 unsigned long index;
132 unsigned int bufnum;
133
134 if (mapping == NULL)
135 mapping = &sdp->sd_aspace;
136
137 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
138 index = blkno >> shift; /* convert block to page */
139 bufnum = blkno - (index << shift); /* block buf index within page */
140
141 if (create) {
142 folio = __filemap_get_folio(mapping, index,
143 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
144 mapping_gfp_mask(mapping) | __GFP_NOFAIL);
145 bh = folio_buffers(folio);
146 if (!bh)
147 bh = create_empty_buffers(folio,
148 sdp->sd_sb.sb_bsize, 0);
149 } else {
150 folio = __filemap_get_folio(mapping, index,
151 FGP_LOCK | FGP_ACCESSED, 0);
152 if (IS_ERR(folio))
153 return NULL;
154 bh = folio_buffers(folio);
155 }
156
157 if (!bh)
158 goto out_unlock;
159
160 bh = get_nth_bh(bh, bufnum);
161 if (!buffer_mapped(bh))
162 map_bh(bh, sdp->sd_vfs, blkno);
163
164out_unlock:
165 folio_unlock(folio);
166 folio_put(folio);
167
168 return bh;
169}
170
171static void meta_prep_new(struct buffer_head *bh)
172{
173 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
174
175 lock_buffer(bh);
176 clear_buffer_dirty(bh);
177 set_buffer_uptodate(bh);
178 unlock_buffer(bh);
179
180 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
181}
182
183/**
184 * gfs2_meta_new - Get a block
185 * @gl: The glock associated with this block
186 * @blkno: The block number
187 *
188 * Returns: The buffer
189 */
190
191struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
192{
193 struct buffer_head *bh;
194 bh = gfs2_getbuf(gl, blkno, CREATE);
195 meta_prep_new(bh);
196 return bh;
197}
198
199static void gfs2_meta_read_endio(struct bio *bio)
200{
201 struct bio_vec *bvec;
202 struct bvec_iter_all iter_all;
203
204 bio_for_each_segment_all(bvec, bio, iter_all) {
205 struct page *page = bvec->bv_page;
206 struct buffer_head *bh = page_buffers(page);
207 unsigned int len = bvec->bv_len;
208
209 while (bh_offset(bh) < bvec->bv_offset)
210 bh = bh->b_this_page;
211 do {
212 struct buffer_head *next = bh->b_this_page;
213 len -= bh->b_size;
214 bh->b_end_io(bh, !bio->bi_status);
215 bh = next;
216 } while (bh && len);
217 }
218 bio_put(bio);
219}
220
221/*
222 * Submit several consecutive buffer head I/O requests as a single bio I/O
223 * request. (See submit_bh_wbc.)
224 */
225static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num)
226{
227 while (num > 0) {
228 struct buffer_head *bh = *bhs;
229 struct bio *bio;
230
231 bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO);
232 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
233 while (num > 0) {
234 bh = *bhs;
235 if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
236 BUG_ON(bio->bi_iter.bi_size == 0);
237 break;
238 }
239 bhs++;
240 num--;
241 }
242 bio->bi_end_io = gfs2_meta_read_endio;
243 submit_bio(bio);
244 }
245}
246
247/**
248 * gfs2_meta_read - Read a block from disk
249 * @gl: The glock covering the block
250 * @blkno: The block number
251 * @flags: flags
252 * @rahead: Do read-ahead
253 * @bhp: the place where the buffer is returned (NULL on failure)
254 *
255 * Returns: errno
256 */
257
258int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
259 int rahead, struct buffer_head **bhp)
260{
261 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
262 struct buffer_head *bh, *bhs[2];
263 int num = 0;
264
265 if (gfs2_withdrawing_or_withdrawn(sdp) &&
266 !gfs2_withdraw_in_prog(sdp)) {
267 *bhp = NULL;
268 return -EIO;
269 }
270
271 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
272
273 lock_buffer(bh);
274 if (buffer_uptodate(bh)) {
275 unlock_buffer(bh);
276 flags &= ~DIO_WAIT;
277 } else {
278 bh->b_end_io = end_buffer_read_sync;
279 get_bh(bh);
280 bhs[num++] = bh;
281 }
282
283 if (rahead) {
284 bh = gfs2_getbuf(gl, blkno + 1, CREATE);
285
286 lock_buffer(bh);
287 if (buffer_uptodate(bh)) {
288 unlock_buffer(bh);
289 brelse(bh);
290 } else {
291 bh->b_end_io = end_buffer_read_sync;
292 bhs[num++] = bh;
293 }
294 }
295
296 gfs2_submit_bhs(REQ_OP_READ | REQ_META | REQ_PRIO, bhs, num);
297 if (!(flags & DIO_WAIT))
298 return 0;
299
300 bh = *bhp;
301 wait_on_buffer(bh);
302 if (unlikely(!buffer_uptodate(bh))) {
303 struct gfs2_trans *tr = current->journal_info;
304 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
305 gfs2_io_error_bh_wd(sdp, bh);
306 brelse(bh);
307 *bhp = NULL;
308 return -EIO;
309 }
310
311 return 0;
312}
313
314/**
315 * gfs2_meta_wait - Reread a block from disk
316 * @sdp: the filesystem
317 * @bh: The block to wait for
318 *
319 * Returns: errno
320 */
321
322int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
323{
324 if (gfs2_withdrawing_or_withdrawn(sdp) &&
325 !gfs2_withdraw_in_prog(sdp))
326 return -EIO;
327
328 wait_on_buffer(bh);
329
330 if (!buffer_uptodate(bh)) {
331 struct gfs2_trans *tr = current->journal_info;
332 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
333 gfs2_io_error_bh_wd(sdp, bh);
334 return -EIO;
335 }
336 if (gfs2_withdrawing_or_withdrawn(sdp) &&
337 !gfs2_withdraw_in_prog(sdp))
338 return -EIO;
339
340 return 0;
341}
342
343void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
344{
345 struct address_space *mapping = bh->b_folio->mapping;
346 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
347 struct gfs2_bufdata *bd = bh->b_private;
348 struct gfs2_trans *tr = current->journal_info;
349 int was_pinned = 0;
350
351 if (test_clear_buffer_pinned(bh)) {
352 trace_gfs2_pin(bd, 0);
353 atomic_dec(&sdp->sd_log_pinned);
354 list_del_init(&bd->bd_list);
355 if (meta == REMOVE_META)
356 tr->tr_num_buf_rm++;
357 else
358 tr->tr_num_databuf_rm++;
359 set_bit(TR_TOUCHED, &tr->tr_flags);
360 was_pinned = 1;
361 brelse(bh);
362 }
363 if (bd) {
364 if (bd->bd_tr) {
365 gfs2_trans_add_revoke(sdp, bd);
366 } else if (was_pinned) {
367 bh->b_private = NULL;
368 kmem_cache_free(gfs2_bufdata_cachep, bd);
369 } else if (!list_empty(&bd->bd_ail_st_list) &&
370 !list_empty(&bd->bd_ail_gl_list)) {
371 gfs2_remove_from_ail(bd);
372 }
373 }
374 clear_buffer_dirty(bh);
375 clear_buffer_uptodate(bh);
376}
377
378/**
379 * gfs2_ail1_wipe - remove deleted/freed buffers from the ail1 list
380 * @sdp: superblock
381 * @bstart: starting block address of buffers to remove
382 * @blen: length of buffers to be removed
383 *
384 * This function is called from gfs2_journal wipe, whose job is to remove
385 * buffers, corresponding to deleted blocks, from the journal. If we find any
386 * bufdata elements on the system ail1 list, they haven't been written to
387 * the journal yet. So we remove them.
388 */
389static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen)
390{
391 struct gfs2_trans *tr, *s;
392 struct gfs2_bufdata *bd, *bs;
393 struct buffer_head *bh;
394 u64 end = bstart + blen;
395
396 gfs2_log_lock(sdp);
397 spin_lock(&sdp->sd_ail_lock);
398 list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) {
399 list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list,
400 bd_ail_st_list) {
401 bh = bd->bd_bh;
402 if (bh->b_blocknr < bstart || bh->b_blocknr >= end)
403 continue;
404
405 gfs2_remove_from_journal(bh, REMOVE_JDATA);
406 }
407 }
408 spin_unlock(&sdp->sd_ail_lock);
409 gfs2_log_unlock(sdp);
410}
411
412static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno)
413{
414 struct address_space *mapping = ip->i_inode.i_mapping;
415 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
416 struct folio *folio;
417 struct buffer_head *bh;
418 unsigned int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
419 unsigned long index = blkno >> shift; /* convert block to page */
420 unsigned int bufnum = blkno - (index << shift);
421
422 folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED, 0);
423 if (IS_ERR(folio))
424 return NULL;
425 bh = folio_buffers(folio);
426 if (bh)
427 bh = get_nth_bh(bh, bufnum);
428 folio_unlock(folio);
429 folio_put(folio);
430 return bh;
431}
432
433/**
434 * gfs2_journal_wipe - make inode's buffers so they aren't dirty/pinned anymore
435 * @ip: the inode who owns the buffers
436 * @bstart: the first buffer in the run
437 * @blen: the number of buffers in the run
438 *
439 */
440
441void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
442{
443 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
444 struct buffer_head *bh;
445 int ty;
446
447 if (!ip->i_gl) {
448 /* This can only happen during incomplete inode creation. */
449 BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
450 return;
451 }
452
453 gfs2_ail1_wipe(sdp, bstart, blen);
454 while (blen) {
455 ty = REMOVE_META;
456 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
457 if (!bh && gfs2_is_jdata(ip)) {
458 bh = gfs2_getjdatabuf(ip, bstart);
459 ty = REMOVE_JDATA;
460 }
461 if (bh) {
462 lock_buffer(bh);
463 gfs2_log_lock(sdp);
464 spin_lock(&sdp->sd_ail_lock);
465 gfs2_remove_from_journal(bh, ty);
466 spin_unlock(&sdp->sd_ail_lock);
467 gfs2_log_unlock(sdp);
468 unlock_buffer(bh);
469 brelse(bh);
470 }
471
472 bstart++;
473 blen--;
474 }
475}
476
477/**
478 * gfs2_meta_buffer - Get a metadata buffer
479 * @ip: The GFS2 inode
480 * @mtype: The block type (GFS2_METATYPE_*)
481 * @num: The block number (device relative) of the buffer
482 * @bhp: the buffer is returned here
483 *
484 * Returns: errno
485 */
486
487int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
488 struct buffer_head **bhp)
489{
490 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
491 struct gfs2_glock *gl = ip->i_gl;
492 struct buffer_head *bh;
493 int ret = 0;
494 int rahead = 0;
495
496 if (num == ip->i_no_addr)
497 rahead = ip->i_rahead;
498
499 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
500 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
501 brelse(bh);
502 ret = -EIO;
503 } else {
504 *bhp = bh;
505 }
506 return ret;
507}
508
509/**
510 * gfs2_meta_ra - start readahead on an extent of a file
511 * @gl: the glock the blocks belong to
512 * @dblock: the starting disk block
513 * @extlen: the number of blocks in the extent
514 *
515 * returns: the first buffer in the extent
516 */
517
518struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
519{
520 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
521 struct buffer_head *first_bh, *bh;
522 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
523 sdp->sd_sb.sb_bsize_shift;
524
525 BUG_ON(!extlen);
526
527 if (max_ra < 1)
528 max_ra = 1;
529 if (extlen > max_ra)
530 extlen = max_ra;
531
532 first_bh = gfs2_getbuf(gl, dblock, CREATE);
533
534 if (buffer_uptodate(first_bh))
535 goto out;
536 bh_read_nowait(first_bh, REQ_META | REQ_PRIO);
537
538 dblock++;
539 extlen--;
540
541 while (extlen) {
542 bh = gfs2_getbuf(gl, dblock, CREATE);
543
544 bh_readahead(bh, REQ_RAHEAD | REQ_META | REQ_PRIO);
545 brelse(bh);
546 dblock++;
547 extlen--;
548 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
549 goto out;
550 }
551
552 wait_on_buffer(first_bh);
553out:
554 return first_bh;
555}
556