Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mm.h>
16#include <linux/pagemap.h>
17#include <linux/writeback.h>
18#include <linux/swap.h>
19#include <linux/delay.h>
20#include <linux/bio.h>
21#include <linux/gfs2_ondisk.h>
22
23#include "gfs2.h"
24#include "incore.h"
25#include "glock.h"
26#include "glops.h"
27#include "inode.h"
28#include "log.h"
29#include "lops.h"
30#include "meta_io.h"
31#include "rgrp.h"
32#include "trans.h"
33#include "util.h"
34#include "trace_gfs2.h"
35
36static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
37{
38 struct buffer_head *bh, *head;
39 int nr_underway = 0;
40 int write_op = REQ_META | REQ_PRIO |
41 (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
42
43 BUG_ON(!PageLocked(page));
44 BUG_ON(!page_has_buffers(page));
45
46 head = page_buffers(page);
47 bh = head;
48
49 do {
50 if (!buffer_mapped(bh))
51 continue;
52 /*
53 * If it's a fully non-blocking write attempt and we cannot
54 * lock the buffer then redirty the page. Note that this can
55 * potentially cause a busy-wait loop from flusher thread and kswapd
56 * activity, but those code paths have their own higher-level
57 * throttling.
58 */
59 if (wbc->sync_mode != WB_SYNC_NONE) {
60 lock_buffer(bh);
61 } else if (!trylock_buffer(bh)) {
62 redirty_page_for_writepage(wbc, page);
63 continue;
64 }
65 if (test_clear_buffer_dirty(bh)) {
66 mark_buffer_async_write(bh);
67 } else {
68 unlock_buffer(bh);
69 }
70 } while ((bh = bh->b_this_page) != head);
71
72 /*
73 * The page and its buffers are protected by PageWriteback(), so we can
74 * drop the bh refcounts early.
75 */
76 BUG_ON(PageWriteback(page));
77 set_page_writeback(page);
78
79 do {
80 struct buffer_head *next = bh->b_this_page;
81 if (buffer_async_write(bh)) {
82 submit_bh(write_op, bh);
83 nr_underway++;
84 }
85 bh = next;
86 } while (bh != head);
87 unlock_page(page);
88
89 if (nr_underway == 0)
90 end_page_writeback(page);
91
92 return 0;
93}
94
95const struct address_space_operations gfs2_meta_aops = {
96 .writepage = gfs2_aspace_writepage,
97 .releasepage = gfs2_releasepage,
98};
99
100const struct address_space_operations gfs2_rgrp_aops = {
101 .writepage = gfs2_aspace_writepage,
102 .releasepage = gfs2_releasepage,
103};
104
105/**
106 * gfs2_getbuf - Get a buffer with a given address space
107 * @gl: the glock
108 * @blkno: the block number (filesystem scope)
109 * @create: 1 if the buffer should be created
110 *
111 * Returns: the buffer
112 */
113
114struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
115{
116 struct address_space *mapping = gfs2_glock2aspace(gl);
117 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
118 struct page *page;
119 struct buffer_head *bh;
120 unsigned int shift;
121 unsigned long index;
122 unsigned int bufnum;
123
124 if (mapping == NULL)
125 mapping = &sdp->sd_aspace;
126
127 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
128 index = blkno >> shift; /* convert block to page */
129 bufnum = blkno - (index << shift); /* block buf index within page */
130
131 if (create) {
132 for (;;) {
133 page = grab_cache_page(mapping, index);
134 if (page)
135 break;
136 yield();
137 }
138 } else {
139 page = find_get_page_flags(mapping, index,
140 FGP_LOCK|FGP_ACCESSED);
141 if (!page)
142 return NULL;
143 }
144
145 if (!page_has_buffers(page))
146 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
147
148 /* Locate header for our buffer within our page */
149 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
150 /* Do nothing */;
151 get_bh(bh);
152
153 if (!buffer_mapped(bh))
154 map_bh(bh, sdp->sd_vfs, blkno);
155
156 unlock_page(page);
157 put_page(page);
158
159 return bh;
160}
161
162static void meta_prep_new(struct buffer_head *bh)
163{
164 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
165
166 lock_buffer(bh);
167 clear_buffer_dirty(bh);
168 set_buffer_uptodate(bh);
169 unlock_buffer(bh);
170
171 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
172}
173
174/**
175 * gfs2_meta_new - Get a block
176 * @gl: The glock associated with this block
177 * @blkno: The block number
178 *
179 * Returns: The buffer
180 */
181
182struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
183{
184 struct buffer_head *bh;
185 bh = gfs2_getbuf(gl, blkno, CREATE);
186 meta_prep_new(bh);
187 return bh;
188}
189
190static void gfs2_meta_read_endio(struct bio *bio)
191{
192 struct bio_vec *bvec;
193 int i;
194
195 bio_for_each_segment_all(bvec, bio, i) {
196 struct page *page = bvec->bv_page;
197 struct buffer_head *bh = page_buffers(page);
198 unsigned int len = bvec->bv_len;
199
200 while (bh_offset(bh) < bvec->bv_offset)
201 bh = bh->b_this_page;
202 do {
203 struct buffer_head *next = bh->b_this_page;
204 len -= bh->b_size;
205 bh->b_end_io(bh, !bio->bi_error);
206 bh = next;
207 } while (bh && len);
208 }
209 bio_put(bio);
210}
211
212/*
213 * Submit several consecutive buffer head I/O requests as a single bio I/O
214 * request. (See submit_bh_wbc.)
215 */
216static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num)
217{
218 struct buffer_head *bh = bhs[0];
219 struct bio *bio;
220 int i;
221
222 if (!num)
223 return;
224
225 bio = bio_alloc(GFP_NOIO, num);
226 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
227 bio->bi_bdev = bh->b_bdev;
228 for (i = 0; i < num; i++) {
229 bh = bhs[i];
230 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
231 }
232 bio->bi_end_io = gfs2_meta_read_endio;
233 submit_bio(rw, bio);
234}
235
236/**
237 * gfs2_meta_read - Read a block from disk
238 * @gl: The glock covering the block
239 * @blkno: The block number
240 * @flags: flags
241 * @bhp: the place where the buffer is returned (NULL on failure)
242 *
243 * Returns: errno
244 */
245
246int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
247 int rahead, struct buffer_head **bhp)
248{
249 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
250 struct buffer_head *bh, *bhs[2];
251 int num = 0;
252
253 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
254 *bhp = NULL;
255 return -EIO;
256 }
257
258 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
259
260 lock_buffer(bh);
261 if (buffer_uptodate(bh)) {
262 unlock_buffer(bh);
263 flags &= ~DIO_WAIT;
264 } else {
265 bh->b_end_io = end_buffer_read_sync;
266 get_bh(bh);
267 bhs[num++] = bh;
268 }
269
270 if (rahead) {
271 bh = gfs2_getbuf(gl, blkno + 1, CREATE);
272
273 lock_buffer(bh);
274 if (buffer_uptodate(bh)) {
275 unlock_buffer(bh);
276 brelse(bh);
277 } else {
278 bh->b_end_io = end_buffer_read_sync;
279 bhs[num++] = bh;
280 }
281 }
282
283 gfs2_submit_bhs(READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
284 if (!(flags & DIO_WAIT))
285 return 0;
286
287 bh = *bhp;
288 wait_on_buffer(bh);
289 if (unlikely(!buffer_uptodate(bh))) {
290 struct gfs2_trans *tr = current->journal_info;
291 if (tr && tr->tr_touched)
292 gfs2_io_error_bh(sdp, bh);
293 brelse(bh);
294 *bhp = NULL;
295 return -EIO;
296 }
297
298 return 0;
299}
300
301/**
302 * gfs2_meta_wait - Reread a block from disk
303 * @sdp: the filesystem
304 * @bh: The block to wait for
305 *
306 * Returns: errno
307 */
308
309int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
310{
311 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
312 return -EIO;
313
314 wait_on_buffer(bh);
315
316 if (!buffer_uptodate(bh)) {
317 struct gfs2_trans *tr = current->journal_info;
318 if (tr && tr->tr_touched)
319 gfs2_io_error_bh(sdp, bh);
320 return -EIO;
321 }
322 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
323 return -EIO;
324
325 return 0;
326}
327
328void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
329{
330 struct address_space *mapping = bh->b_page->mapping;
331 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
332 struct gfs2_bufdata *bd = bh->b_private;
333 int was_pinned = 0;
334
335 if (test_clear_buffer_pinned(bh)) {
336 trace_gfs2_pin(bd, 0);
337 atomic_dec(&sdp->sd_log_pinned);
338 list_del_init(&bd->bd_list);
339 if (meta)
340 tr->tr_num_buf_rm++;
341 else
342 tr->tr_num_databuf_rm++;
343 tr->tr_touched = 1;
344 was_pinned = 1;
345 brelse(bh);
346 }
347 if (bd) {
348 spin_lock(&sdp->sd_ail_lock);
349 if (bd->bd_tr) {
350 gfs2_trans_add_revoke(sdp, bd);
351 } else if (was_pinned) {
352 bh->b_private = NULL;
353 kmem_cache_free(gfs2_bufdata_cachep, bd);
354 }
355 spin_unlock(&sdp->sd_ail_lock);
356 }
357 clear_buffer_dirty(bh);
358 clear_buffer_uptodate(bh);
359}
360
361/**
362 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
363 * @ip: the inode who owns the buffers
364 * @bstart: the first buffer in the run
365 * @blen: the number of buffers in the run
366 *
367 */
368
369void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
370{
371 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
372 struct buffer_head *bh;
373
374 while (blen) {
375 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
376 if (bh) {
377 lock_buffer(bh);
378 gfs2_log_lock(sdp);
379 gfs2_remove_from_journal(bh, current->journal_info, 1);
380 gfs2_log_unlock(sdp);
381 unlock_buffer(bh);
382 brelse(bh);
383 }
384
385 bstart++;
386 blen--;
387 }
388}
389
390/**
391 * gfs2_meta_indirect_buffer - Get a metadata buffer
392 * @ip: The GFS2 inode
393 * @height: The level of this buf in the metadata (indir addr) tree (if any)
394 * @num: The block number (device relative) of the buffer
395 * @bhp: the buffer is returned here
396 *
397 * Returns: errno
398 */
399
400int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
401 struct buffer_head **bhp)
402{
403 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
404 struct gfs2_glock *gl = ip->i_gl;
405 struct buffer_head *bh;
406 int ret = 0;
407 u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
408 int rahead = 0;
409
410 if (num == ip->i_no_addr)
411 rahead = ip->i_rahead;
412
413 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
414 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
415 brelse(bh);
416 ret = -EIO;
417 }
418 *bhp = bh;
419 return ret;
420}
421
422/**
423 * gfs2_meta_ra - start readahead on an extent of a file
424 * @gl: the glock the blocks belong to
425 * @dblock: the starting disk block
426 * @extlen: the number of blocks in the extent
427 *
428 * returns: the first buffer in the extent
429 */
430
431struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
432{
433 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
434 struct buffer_head *first_bh, *bh;
435 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
436 sdp->sd_sb.sb_bsize_shift;
437
438 BUG_ON(!extlen);
439
440 if (max_ra < 1)
441 max_ra = 1;
442 if (extlen > max_ra)
443 extlen = max_ra;
444
445 first_bh = gfs2_getbuf(gl, dblock, CREATE);
446
447 if (buffer_uptodate(first_bh))
448 goto out;
449 if (!buffer_locked(first_bh))
450 ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
451
452 dblock++;
453 extlen--;
454
455 while (extlen) {
456 bh = gfs2_getbuf(gl, dblock, CREATE);
457
458 if (!buffer_uptodate(bh) && !buffer_locked(bh))
459 ll_rw_block(READA | REQ_META, 1, &bh);
460 brelse(bh);
461 dblock++;
462 extlen--;
463 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
464 goto out;
465 }
466
467 wait_on_buffer(first_bh);
468out:
469 return first_bh;
470}
471
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mm.h>
16#include <linux/pagemap.h>
17#include <linux/writeback.h>
18#include <linux/swap.h>
19#include <linux/delay.h>
20#include <linux/bio.h>
21#include <linux/gfs2_ondisk.h>
22
23#include "gfs2.h"
24#include "incore.h"
25#include "glock.h"
26#include "glops.h"
27#include "inode.h"
28#include "log.h"
29#include "lops.h"
30#include "meta_io.h"
31#include "rgrp.h"
32#include "trans.h"
33#include "util.h"
34#include "trace_gfs2.h"
35
36static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
37{
38 struct buffer_head *bh, *head;
39 int nr_underway = 0;
40 int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
41
42 BUG_ON(!PageLocked(page));
43 BUG_ON(!page_has_buffers(page));
44
45 head = page_buffers(page);
46 bh = head;
47
48 do {
49 if (!buffer_mapped(bh))
50 continue;
51 /*
52 * If it's a fully non-blocking write attempt and we cannot
53 * lock the buffer then redirty the page. Note that this can
54 * potentially cause a busy-wait loop from flusher thread and kswapd
55 * activity, but those code paths have their own higher-level
56 * throttling.
57 */
58 if (wbc->sync_mode != WB_SYNC_NONE) {
59 lock_buffer(bh);
60 } else if (!trylock_buffer(bh)) {
61 redirty_page_for_writepage(wbc, page);
62 continue;
63 }
64 if (test_clear_buffer_dirty(bh)) {
65 mark_buffer_async_write(bh);
66 } else {
67 unlock_buffer(bh);
68 }
69 } while ((bh = bh->b_this_page) != head);
70
71 /*
72 * The page and its buffers are protected by PageWriteback(), so we can
73 * drop the bh refcounts early.
74 */
75 BUG_ON(PageWriteback(page));
76 set_page_writeback(page);
77
78 do {
79 struct buffer_head *next = bh->b_this_page;
80 if (buffer_async_write(bh)) {
81 submit_bh(REQ_OP_WRITE, write_flags, bh);
82 nr_underway++;
83 }
84 bh = next;
85 } while (bh != head);
86 unlock_page(page);
87
88 if (nr_underway == 0)
89 end_page_writeback(page);
90
91 return 0;
92}
93
94const struct address_space_operations gfs2_meta_aops = {
95 .writepage = gfs2_aspace_writepage,
96 .releasepage = gfs2_releasepage,
97};
98
99const struct address_space_operations gfs2_rgrp_aops = {
100 .writepage = gfs2_aspace_writepage,
101 .releasepage = gfs2_releasepage,
102};
103
104/**
105 * gfs2_getbuf - Get a buffer with a given address space
106 * @gl: the glock
107 * @blkno: the block number (filesystem scope)
108 * @create: 1 if the buffer should be created
109 *
110 * Returns: the buffer
111 */
112
113struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
114{
115 struct address_space *mapping = gfs2_glock2aspace(gl);
116 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
117 struct page *page;
118 struct buffer_head *bh;
119 unsigned int shift;
120 unsigned long index;
121 unsigned int bufnum;
122
123 if (mapping == NULL)
124 mapping = &sdp->sd_aspace;
125
126 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
127 index = blkno >> shift; /* convert block to page */
128 bufnum = blkno - (index << shift); /* block buf index within page */
129
130 if (create) {
131 for (;;) {
132 page = grab_cache_page(mapping, index);
133 if (page)
134 break;
135 yield();
136 }
137 } else {
138 page = find_get_page_flags(mapping, index,
139 FGP_LOCK|FGP_ACCESSED);
140 if (!page)
141 return NULL;
142 }
143
144 if (!page_has_buffers(page))
145 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
146
147 /* Locate header for our buffer within our page */
148 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
149 /* Do nothing */;
150 get_bh(bh);
151
152 if (!buffer_mapped(bh))
153 map_bh(bh, sdp->sd_vfs, blkno);
154
155 unlock_page(page);
156 put_page(page);
157
158 return bh;
159}
160
161static void meta_prep_new(struct buffer_head *bh)
162{
163 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
164
165 lock_buffer(bh);
166 clear_buffer_dirty(bh);
167 set_buffer_uptodate(bh);
168 unlock_buffer(bh);
169
170 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
171}
172
173/**
174 * gfs2_meta_new - Get a block
175 * @gl: The glock associated with this block
176 * @blkno: The block number
177 *
178 * Returns: The buffer
179 */
180
181struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
182{
183 struct buffer_head *bh;
184 bh = gfs2_getbuf(gl, blkno, CREATE);
185 meta_prep_new(bh);
186 return bh;
187}
188
189static void gfs2_meta_read_endio(struct bio *bio)
190{
191 struct bio_vec *bvec;
192 int i;
193
194 bio_for_each_segment_all(bvec, bio, i) {
195 struct page *page = bvec->bv_page;
196 struct buffer_head *bh = page_buffers(page);
197 unsigned int len = bvec->bv_len;
198
199 while (bh_offset(bh) < bvec->bv_offset)
200 bh = bh->b_this_page;
201 do {
202 struct buffer_head *next = bh->b_this_page;
203 len -= bh->b_size;
204 bh->b_end_io(bh, !bio->bi_error);
205 bh = next;
206 } while (bh && len);
207 }
208 bio_put(bio);
209}
210
211/*
212 * Submit several consecutive buffer head I/O requests as a single bio I/O
213 * request. (See submit_bh_wbc.)
214 */
215static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
216 int num)
217{
218 while (num > 0) {
219 struct buffer_head *bh = *bhs;
220 struct bio *bio;
221
222 bio = bio_alloc(GFP_NOIO, num);
223 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
224 bio->bi_bdev = bh->b_bdev;
225 while (num > 0) {
226 bh = *bhs;
227 if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
228 BUG_ON(bio->bi_iter.bi_size == 0);
229 break;
230 }
231 bhs++;
232 num--;
233 }
234 bio->bi_end_io = gfs2_meta_read_endio;
235 bio_set_op_attrs(bio, op, op_flags);
236 submit_bio(bio);
237 }
238}
239
240/**
241 * gfs2_meta_read - Read a block from disk
242 * @gl: The glock covering the block
243 * @blkno: The block number
244 * @flags: flags
245 * @bhp: the place where the buffer is returned (NULL on failure)
246 *
247 * Returns: errno
248 */
249
250int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
251 int rahead, struct buffer_head **bhp)
252{
253 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
254 struct buffer_head *bh, *bhs[2];
255 int num = 0;
256
257 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
258 *bhp = NULL;
259 return -EIO;
260 }
261
262 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
263
264 lock_buffer(bh);
265 if (buffer_uptodate(bh)) {
266 unlock_buffer(bh);
267 flags &= ~DIO_WAIT;
268 } else {
269 bh->b_end_io = end_buffer_read_sync;
270 get_bh(bh);
271 bhs[num++] = bh;
272 }
273
274 if (rahead) {
275 bh = gfs2_getbuf(gl, blkno + 1, CREATE);
276
277 lock_buffer(bh);
278 if (buffer_uptodate(bh)) {
279 unlock_buffer(bh);
280 brelse(bh);
281 } else {
282 bh->b_end_io = end_buffer_read_sync;
283 bhs[num++] = bh;
284 }
285 }
286
287 gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
288 if (!(flags & DIO_WAIT))
289 return 0;
290
291 bh = *bhp;
292 wait_on_buffer(bh);
293 if (unlikely(!buffer_uptodate(bh))) {
294 struct gfs2_trans *tr = current->journal_info;
295 if (tr && tr->tr_touched)
296 gfs2_io_error_bh(sdp, bh);
297 brelse(bh);
298 *bhp = NULL;
299 return -EIO;
300 }
301
302 return 0;
303}
304
305/**
306 * gfs2_meta_wait - Reread a block from disk
307 * @sdp: the filesystem
308 * @bh: The block to wait for
309 *
310 * Returns: errno
311 */
312
313int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
314{
315 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
316 return -EIO;
317
318 wait_on_buffer(bh);
319
320 if (!buffer_uptodate(bh)) {
321 struct gfs2_trans *tr = current->journal_info;
322 if (tr && tr->tr_touched)
323 gfs2_io_error_bh(sdp, bh);
324 return -EIO;
325 }
326 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
327 return -EIO;
328
329 return 0;
330}
331
332void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
333{
334 struct address_space *mapping = bh->b_page->mapping;
335 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
336 struct gfs2_bufdata *bd = bh->b_private;
337 struct gfs2_trans *tr = current->journal_info;
338 int was_pinned = 0;
339
340 if (test_clear_buffer_pinned(bh)) {
341 trace_gfs2_pin(bd, 0);
342 atomic_dec(&sdp->sd_log_pinned);
343 list_del_init(&bd->bd_list);
344 if (meta == REMOVE_META)
345 tr->tr_num_buf_rm++;
346 else
347 tr->tr_num_databuf_rm++;
348 tr->tr_touched = 1;
349 was_pinned = 1;
350 brelse(bh);
351 }
352 if (bd) {
353 spin_lock(&sdp->sd_ail_lock);
354 if (bd->bd_tr) {
355 gfs2_trans_add_revoke(sdp, bd);
356 } else if (was_pinned) {
357 bh->b_private = NULL;
358 kmem_cache_free(gfs2_bufdata_cachep, bd);
359 }
360 spin_unlock(&sdp->sd_ail_lock);
361 }
362 clear_buffer_dirty(bh);
363 clear_buffer_uptodate(bh);
364}
365
366/**
367 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
368 * @ip: the inode who owns the buffers
369 * @bstart: the first buffer in the run
370 * @blen: the number of buffers in the run
371 *
372 */
373
374void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
375{
376 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
377 struct buffer_head *bh;
378
379 while (blen) {
380 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
381 if (bh) {
382 lock_buffer(bh);
383 gfs2_log_lock(sdp);
384 gfs2_remove_from_journal(bh, REMOVE_META);
385 gfs2_log_unlock(sdp);
386 unlock_buffer(bh);
387 brelse(bh);
388 }
389
390 bstart++;
391 blen--;
392 }
393}
394
395/**
396 * gfs2_meta_indirect_buffer - Get a metadata buffer
397 * @ip: The GFS2 inode
398 * @height: The level of this buf in the metadata (indir addr) tree (if any)
399 * @num: The block number (device relative) of the buffer
400 * @bhp: the buffer is returned here
401 *
402 * Returns: errno
403 */
404
405int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
406 struct buffer_head **bhp)
407{
408 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
409 struct gfs2_glock *gl = ip->i_gl;
410 struct buffer_head *bh;
411 int ret = 0;
412 u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
413 int rahead = 0;
414
415 if (num == ip->i_no_addr)
416 rahead = ip->i_rahead;
417
418 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
419 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
420 brelse(bh);
421 ret = -EIO;
422 }
423 *bhp = bh;
424 return ret;
425}
426
427/**
428 * gfs2_meta_ra - start readahead on an extent of a file
429 * @gl: the glock the blocks belong to
430 * @dblock: the starting disk block
431 * @extlen: the number of blocks in the extent
432 *
433 * returns: the first buffer in the extent
434 */
435
436struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
437{
438 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
439 struct buffer_head *first_bh, *bh;
440 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
441 sdp->sd_sb.sb_bsize_shift;
442
443 BUG_ON(!extlen);
444
445 if (max_ra < 1)
446 max_ra = 1;
447 if (extlen > max_ra)
448 extlen = max_ra;
449
450 first_bh = gfs2_getbuf(gl, dblock, CREATE);
451
452 if (buffer_uptodate(first_bh))
453 goto out;
454 if (!buffer_locked(first_bh))
455 ll_rw_block(REQ_OP_READ, REQ_META, 1, &first_bh);
456
457 dblock++;
458 extlen--;
459
460 while (extlen) {
461 bh = gfs2_getbuf(gl, dblock, CREATE);
462
463 if (!buffer_uptodate(bh) && !buffer_locked(bh))
464 ll_rw_block(REQ_OP_READ, REQ_RAHEAD | REQ_META, 1, &bh);
465 brelse(bh);
466 dblock++;
467 extlen--;
468 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
469 goto out;
470 }
471
472 wait_on_buffer(first_bh);
473out:
474 return first_bh;
475}
476