Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#include <linux/sched.h>
 11#include <linux/slab.h>
 12#include <linux/spinlock.h>
 13#include <linux/completion.h>
 14#include <linux/buffer_head.h>
 15#include <linux/mm.h>
 16#include <linux/pagemap.h>
 17#include <linux/writeback.h>
 18#include <linux/swap.h>
 19#include <linux/delay.h>
 20#include <linux/bio.h>
 21#include <linux/gfs2_ondisk.h>
 22
 23#include "gfs2.h"
 24#include "incore.h"
 25#include "glock.h"
 26#include "glops.h"
 27#include "inode.h"
 28#include "log.h"
 29#include "lops.h"
 30#include "meta_io.h"
 31#include "rgrp.h"
 32#include "trans.h"
 33#include "util.h"
 34#include "trace_gfs2.h"
 35
 36static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
 37{
 38	struct buffer_head *bh, *head;
 39	int nr_underway = 0;
 40	int write_op = REQ_META | REQ_PRIO |
 41		(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
 42
 43	BUG_ON(!PageLocked(page));
 44	BUG_ON(!page_has_buffers(page));
 45
 46	head = page_buffers(page);
 47	bh = head;
 48
 49	do {
 50		if (!buffer_mapped(bh))
 51			continue;
 52		/*
 53		 * If it's a fully non-blocking write attempt and we cannot
 54		 * lock the buffer then redirty the page.  Note that this can
 55		 * potentially cause a busy-wait loop from flusher thread and kswapd
 56		 * activity, but those code paths have their own higher-level
 57		 * throttling.
 58		 */
 59		if (wbc->sync_mode != WB_SYNC_NONE) {
 60			lock_buffer(bh);
 61		} else if (!trylock_buffer(bh)) {
 62			redirty_page_for_writepage(wbc, page);
 63			continue;
 64		}
 65		if (test_clear_buffer_dirty(bh)) {
 66			mark_buffer_async_write(bh);
 67		} else {
 68			unlock_buffer(bh);
 69		}
 70	} while ((bh = bh->b_this_page) != head);
 71
 72	/*
 73	 * The page and its buffers are protected by PageWriteback(), so we can
 74	 * drop the bh refcounts early.
 75	 */
 76	BUG_ON(PageWriteback(page));
 77	set_page_writeback(page);
 78
 79	do {
 80		struct buffer_head *next = bh->b_this_page;
 81		if (buffer_async_write(bh)) {
 82			submit_bh(write_op, bh);
 83			nr_underway++;
 84		}
 85		bh = next;
 86	} while (bh != head);
 87	unlock_page(page);
 88
 89	if (nr_underway == 0)
 90		end_page_writeback(page);
 91
 92	return 0;
 93}
 94
 95const struct address_space_operations gfs2_meta_aops = {
 96	.writepage = gfs2_aspace_writepage,
 97	.releasepage = gfs2_releasepage,
 98};
 99
100const struct address_space_operations gfs2_rgrp_aops = {
101	.writepage = gfs2_aspace_writepage,
102	.releasepage = gfs2_releasepage,
103};
104
105/**
106 * gfs2_getbuf - Get a buffer with a given address space
107 * @gl: the glock
108 * @blkno: the block number (filesystem scope)
109 * @create: 1 if the buffer should be created
110 *
111 * Returns: the buffer
112 */
113
114struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
115{
116	struct address_space *mapping = gfs2_glock2aspace(gl);
117	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
118	struct page *page;
119	struct buffer_head *bh;
120	unsigned int shift;
121	unsigned long index;
122	unsigned int bufnum;
123
124	if (mapping == NULL)
125		mapping = &sdp->sd_aspace;
126
127	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
128	index = blkno >> shift;             /* convert block to page */
129	bufnum = blkno - (index << shift);  /* block buf index within page */
130
131	if (create) {
132		for (;;) {
133			page = grab_cache_page(mapping, index);
134			if (page)
135				break;
136			yield();
137		}
138	} else {
139		page = find_get_page_flags(mapping, index,
140						FGP_LOCK|FGP_ACCESSED);
141		if (!page)
142			return NULL;
143	}
144
145	if (!page_has_buffers(page))
146		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
147
148	/* Locate header for our buffer within our page */
149	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
150		/* Do nothing */;
151	get_bh(bh);
152
153	if (!buffer_mapped(bh))
154		map_bh(bh, sdp->sd_vfs, blkno);
155
156	unlock_page(page);
157	put_page(page);
158
159	return bh;
160}
161
162static void meta_prep_new(struct buffer_head *bh)
163{
164	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
165
166	lock_buffer(bh);
167	clear_buffer_dirty(bh);
168	set_buffer_uptodate(bh);
169	unlock_buffer(bh);
170
171	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
172}
173
174/**
175 * gfs2_meta_new - Get a block
176 * @gl: The glock associated with this block
177 * @blkno: The block number
178 *
179 * Returns: The buffer
180 */
181
182struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
183{
184	struct buffer_head *bh;
185	bh = gfs2_getbuf(gl, blkno, CREATE);
186	meta_prep_new(bh);
187	return bh;
188}
189
190static void gfs2_meta_read_endio(struct bio *bio)
191{
192	struct bio_vec *bvec;
193	int i;
194
195	bio_for_each_segment_all(bvec, bio, i) {
196		struct page *page = bvec->bv_page;
197		struct buffer_head *bh = page_buffers(page);
198		unsigned int len = bvec->bv_len;
199
200		while (bh_offset(bh) < bvec->bv_offset)
201			bh = bh->b_this_page;
202		do {
203			struct buffer_head *next = bh->b_this_page;
204			len -= bh->b_size;
205			bh->b_end_io(bh, !bio->bi_error);
206			bh = next;
207		} while (bh && len);
208	}
209	bio_put(bio);
210}
211
212/*
213 * Submit several consecutive buffer head I/O requests as a single bio I/O
214 * request.  (See submit_bh_wbc.)
215 */
216static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num)
 
217{
218	struct buffer_head *bh = bhs[0];
219	struct bio *bio;
220	int i;
221
222	if (!num)
223		return;
224
225	bio = bio_alloc(GFP_NOIO, num);
226	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
227	bio->bi_bdev = bh->b_bdev;
228	for (i = 0; i < num; i++) {
229		bh = bhs[i];
230		bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
 
 
 
 
 
 
231	}
232	bio->bi_end_io = gfs2_meta_read_endio;
233	submit_bio(rw, bio);
234}
235
236/**
237 * gfs2_meta_read - Read a block from disk
238 * @gl: The glock covering the block
239 * @blkno: The block number
240 * @flags: flags
241 * @bhp: the place where the buffer is returned (NULL on failure)
242 *
243 * Returns: errno
244 */
245
246int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
247		   int rahead, struct buffer_head **bhp)
248{
249	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
250	struct buffer_head *bh, *bhs[2];
251	int num = 0;
252
253	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
254		*bhp = NULL;
255		return -EIO;
256	}
257
258	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
259
260	lock_buffer(bh);
261	if (buffer_uptodate(bh)) {
262		unlock_buffer(bh);
263		flags &= ~DIO_WAIT;
264	} else {
265		bh->b_end_io = end_buffer_read_sync;
266		get_bh(bh);
267		bhs[num++] = bh;
268	}
269
270	if (rahead) {
271		bh = gfs2_getbuf(gl, blkno + 1, CREATE);
272
273		lock_buffer(bh);
274		if (buffer_uptodate(bh)) {
275			unlock_buffer(bh);
276			brelse(bh);
277		} else {
278			bh->b_end_io = end_buffer_read_sync;
279			bhs[num++] = bh;
280		}
281	}
282
283	gfs2_submit_bhs(READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
284	if (!(flags & DIO_WAIT))
285		return 0;
286
287	bh = *bhp;
288	wait_on_buffer(bh);
289	if (unlikely(!buffer_uptodate(bh))) {
290		struct gfs2_trans *tr = current->journal_info;
291		if (tr && tr->tr_touched)
292			gfs2_io_error_bh(sdp, bh);
293		brelse(bh);
294		*bhp = NULL;
295		return -EIO;
296	}
297
298	return 0;
299}
300
301/**
302 * gfs2_meta_wait - Reread a block from disk
303 * @sdp: the filesystem
304 * @bh: The block to wait for
305 *
306 * Returns: errno
307 */
308
309int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
310{
311	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
312		return -EIO;
313
314	wait_on_buffer(bh);
315
316	if (!buffer_uptodate(bh)) {
317		struct gfs2_trans *tr = current->journal_info;
318		if (tr && tr->tr_touched)
319			gfs2_io_error_bh(sdp, bh);
320		return -EIO;
321	}
322	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
323		return -EIO;
324
325	return 0;
326}
327
328void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
329{
330	struct address_space *mapping = bh->b_page->mapping;
331	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
332	struct gfs2_bufdata *bd = bh->b_private;
 
333	int was_pinned = 0;
334
335	if (test_clear_buffer_pinned(bh)) {
336		trace_gfs2_pin(bd, 0);
337		atomic_dec(&sdp->sd_log_pinned);
338		list_del_init(&bd->bd_list);
339		if (meta)
340			tr->tr_num_buf_rm++;
341		else
342			tr->tr_num_databuf_rm++;
343		tr->tr_touched = 1;
344		was_pinned = 1;
345		brelse(bh);
346	}
347	if (bd) {
348		spin_lock(&sdp->sd_ail_lock);
349		if (bd->bd_tr) {
350			gfs2_trans_add_revoke(sdp, bd);
351		} else if (was_pinned) {
352			bh->b_private = NULL;
353			kmem_cache_free(gfs2_bufdata_cachep, bd);
354		}
355		spin_unlock(&sdp->sd_ail_lock);
356	}
357	clear_buffer_dirty(bh);
358	clear_buffer_uptodate(bh);
359}
360
361/**
362 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
363 * @ip: the inode who owns the buffers
364 * @bstart: the first buffer in the run
365 * @blen: the number of buffers in the run
366 *
367 */
368
369void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
370{
371	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
372	struct buffer_head *bh;
373
374	while (blen) {
375		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
376		if (bh) {
377			lock_buffer(bh);
378			gfs2_log_lock(sdp);
379			gfs2_remove_from_journal(bh, current->journal_info, 1);
380			gfs2_log_unlock(sdp);
381			unlock_buffer(bh);
382			brelse(bh);
383		}
384
385		bstart++;
386		blen--;
387	}
388}
389
390/**
391 * gfs2_meta_indirect_buffer - Get a metadata buffer
392 * @ip: The GFS2 inode
393 * @height: The level of this buf in the metadata (indir addr) tree (if any)
394 * @num: The block number (device relative) of the buffer
395 * @bhp: the buffer is returned here
396 *
397 * Returns: errno
398 */
399
400int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
401			      struct buffer_head **bhp)
402{
403	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
404	struct gfs2_glock *gl = ip->i_gl;
405	struct buffer_head *bh;
406	int ret = 0;
407	u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
408	int rahead = 0;
409
410	if (num == ip->i_no_addr)
411		rahead = ip->i_rahead;
412
413	ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
414	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
415		brelse(bh);
416		ret = -EIO;
 
 
417	}
418	*bhp = bh;
419	return ret;
420}
421
422/**
423 * gfs2_meta_ra - start readahead on an extent of a file
424 * @gl: the glock the blocks belong to
425 * @dblock: the starting disk block
426 * @extlen: the number of blocks in the extent
427 *
428 * returns: the first buffer in the extent
429 */
430
431struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
432{
433	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
434	struct buffer_head *first_bh, *bh;
435	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
436			  sdp->sd_sb.sb_bsize_shift;
437
438	BUG_ON(!extlen);
439
440	if (max_ra < 1)
441		max_ra = 1;
442	if (extlen > max_ra)
443		extlen = max_ra;
444
445	first_bh = gfs2_getbuf(gl, dblock, CREATE);
446
447	if (buffer_uptodate(first_bh))
448		goto out;
449	if (!buffer_locked(first_bh))
450		ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
451
452	dblock++;
453	extlen--;
454
455	while (extlen) {
456		bh = gfs2_getbuf(gl, dblock, CREATE);
457
458		if (!buffer_uptodate(bh) && !buffer_locked(bh))
459			ll_rw_block(READA | REQ_META, 1, &bh);
 
 
460		brelse(bh);
461		dblock++;
462		extlen--;
463		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
464			goto out;
465	}
466
467	wait_on_buffer(first_bh);
468out:
469	return first_bh;
470}
471
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 
 
 
 
  5 */
  6
  7#include <linux/sched.h>
  8#include <linux/slab.h>
  9#include <linux/spinlock.h>
 10#include <linux/completion.h>
 11#include <linux/buffer_head.h>
 12#include <linux/mm.h>
 13#include <linux/pagemap.h>
 14#include <linux/writeback.h>
 15#include <linux/swap.h>
 16#include <linux/delay.h>
 17#include <linux/bio.h>
 18#include <linux/gfs2_ondisk.h>
 19
 20#include "gfs2.h"
 21#include "incore.h"
 22#include "glock.h"
 23#include "glops.h"
 24#include "inode.h"
 25#include "log.h"
 26#include "lops.h"
 27#include "meta_io.h"
 28#include "rgrp.h"
 29#include "trans.h"
 30#include "util.h"
 31#include "trace_gfs2.h"
 32
 33static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
 34{
 35	struct buffer_head *bh, *head;
 36	int nr_underway = 0;
 37	int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
 
 38
 39	BUG_ON(!PageLocked(page));
 40	BUG_ON(!page_has_buffers(page));
 41
 42	head = page_buffers(page);
 43	bh = head;
 44
 45	do {
 46		if (!buffer_mapped(bh))
 47			continue;
 48		/*
 49		 * If it's a fully non-blocking write attempt and we cannot
 50		 * lock the buffer then redirty the page.  Note that this can
 51		 * potentially cause a busy-wait loop from flusher thread and kswapd
 52		 * activity, but those code paths have their own higher-level
 53		 * throttling.
 54		 */
 55		if (wbc->sync_mode != WB_SYNC_NONE) {
 56			lock_buffer(bh);
 57		} else if (!trylock_buffer(bh)) {
 58			redirty_page_for_writepage(wbc, page);
 59			continue;
 60		}
 61		if (test_clear_buffer_dirty(bh)) {
 62			mark_buffer_async_write(bh);
 63		} else {
 64			unlock_buffer(bh);
 65		}
 66	} while ((bh = bh->b_this_page) != head);
 67
 68	/*
 69	 * The page and its buffers are protected by PageWriteback(), so we can
 70	 * drop the bh refcounts early.
 71	 */
 72	BUG_ON(PageWriteback(page));
 73	set_page_writeback(page);
 74
 75	do {
 76		struct buffer_head *next = bh->b_this_page;
 77		if (buffer_async_write(bh)) {
 78			submit_bh(REQ_OP_WRITE, write_flags, bh);
 79			nr_underway++;
 80		}
 81		bh = next;
 82	} while (bh != head);
 83	unlock_page(page);
 84
 85	if (nr_underway == 0)
 86		end_page_writeback(page);
 87
 88	return 0;
 89}
 90
 91const struct address_space_operations gfs2_meta_aops = {
 92	.writepage = gfs2_aspace_writepage,
 93	.releasepage = gfs2_releasepage,
 94};
 95
 96const struct address_space_operations gfs2_rgrp_aops = {
 97	.writepage = gfs2_aspace_writepage,
 98	.releasepage = gfs2_releasepage,
 99};
100
101/**
102 * gfs2_getbuf - Get a buffer with a given address space
103 * @gl: the glock
104 * @blkno: the block number (filesystem scope)
105 * @create: 1 if the buffer should be created
106 *
107 * Returns: the buffer
108 */
109
110struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
111{
112	struct address_space *mapping = gfs2_glock2aspace(gl);
113	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
114	struct page *page;
115	struct buffer_head *bh;
116	unsigned int shift;
117	unsigned long index;
118	unsigned int bufnum;
119
120	if (mapping == NULL)
121		mapping = &sdp->sd_aspace;
122
123	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
124	index = blkno >> shift;             /* convert block to page */
125	bufnum = blkno - (index << shift);  /* block buf index within page */
126
127	if (create) {
128		for (;;) {
129			page = grab_cache_page(mapping, index);
130			if (page)
131				break;
132			yield();
133		}
134	} else {
135		page = find_get_page_flags(mapping, index,
136						FGP_LOCK|FGP_ACCESSED);
137		if (!page)
138			return NULL;
139	}
140
141	if (!page_has_buffers(page))
142		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
143
144	/* Locate header for our buffer within our page */
145	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
146		/* Do nothing */;
147	get_bh(bh);
148
149	if (!buffer_mapped(bh))
150		map_bh(bh, sdp->sd_vfs, blkno);
151
152	unlock_page(page);
153	put_page(page);
154
155	return bh;
156}
157
158static void meta_prep_new(struct buffer_head *bh)
159{
160	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
161
162	lock_buffer(bh);
163	clear_buffer_dirty(bh);
164	set_buffer_uptodate(bh);
165	unlock_buffer(bh);
166
167	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
168}
169
170/**
171 * gfs2_meta_new - Get a block
172 * @gl: The glock associated with this block
173 * @blkno: The block number
174 *
175 * Returns: The buffer
176 */
177
178struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
179{
180	struct buffer_head *bh;
181	bh = gfs2_getbuf(gl, blkno, CREATE);
182	meta_prep_new(bh);
183	return bh;
184}
185
186static void gfs2_meta_read_endio(struct bio *bio)
187{
188	struct bio_vec *bvec;
189	struct bvec_iter_all iter_all;
190
191	bio_for_each_segment_all(bvec, bio, iter_all) {
192		struct page *page = bvec->bv_page;
193		struct buffer_head *bh = page_buffers(page);
194		unsigned int len = bvec->bv_len;
195
196		while (bh_offset(bh) < bvec->bv_offset)
197			bh = bh->b_this_page;
198		do {
199			struct buffer_head *next = bh->b_this_page;
200			len -= bh->b_size;
201			bh->b_end_io(bh, !bio->bi_status);
202			bh = next;
203		} while (bh && len);
204	}
205	bio_put(bio);
206}
207
208/*
209 * Submit several consecutive buffer head I/O requests as a single bio I/O
210 * request.  (See submit_bh_wbc.)
211 */
212static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
213			    int num)
214{
215	while (num > 0) {
216		struct buffer_head *bh = *bhs;
217		struct bio *bio;
218
219		bio = bio_alloc(GFP_NOIO, num);
220		bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
221		bio_set_dev(bio, bh->b_bdev);
222		while (num > 0) {
223			bh = *bhs;
224			if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
225				BUG_ON(bio->bi_iter.bi_size == 0);
226				break;
227			}
228			bhs++;
229			num--;
230		}
231		bio->bi_end_io = gfs2_meta_read_endio;
232		bio_set_op_attrs(bio, op, op_flags);
233		submit_bio(bio);
234	}
 
 
235}
236
237/**
238 * gfs2_meta_read - Read a block from disk
239 * @gl: The glock covering the block
240 * @blkno: The block number
241 * @flags: flags
242 * @bhp: the place where the buffer is returned (NULL on failure)
243 *
244 * Returns: errno
245 */
246
247int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
248		   int rahead, struct buffer_head **bhp)
249{
250	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
251	struct buffer_head *bh, *bhs[2];
252	int num = 0;
253
254	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags))) {
255		*bhp = NULL;
256		return -EIO;
257	}
258
259	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
260
261	lock_buffer(bh);
262	if (buffer_uptodate(bh)) {
263		unlock_buffer(bh);
264		flags &= ~DIO_WAIT;
265	} else {
266		bh->b_end_io = end_buffer_read_sync;
267		get_bh(bh);
268		bhs[num++] = bh;
269	}
270
271	if (rahead) {
272		bh = gfs2_getbuf(gl, blkno + 1, CREATE);
273
274		lock_buffer(bh);
275		if (buffer_uptodate(bh)) {
276			unlock_buffer(bh);
277			brelse(bh);
278		} else {
279			bh->b_end_io = end_buffer_read_sync;
280			bhs[num++] = bh;
281		}
282	}
283
284	gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
285	if (!(flags & DIO_WAIT))
286		return 0;
287
288	bh = *bhp;
289	wait_on_buffer(bh);
290	if (unlikely(!buffer_uptodate(bh))) {
291		struct gfs2_trans *tr = current->journal_info;
292		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
293			gfs2_io_error_bh_wd(sdp, bh);
294		brelse(bh);
295		*bhp = NULL;
296		return -EIO;
297	}
298
299	return 0;
300}
301
302/**
303 * gfs2_meta_wait - Reread a block from disk
304 * @sdp: the filesystem
305 * @bh: The block to wait for
306 *
307 * Returns: errno
308 */
309
310int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
311{
312	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
313		return -EIO;
314
315	wait_on_buffer(bh);
316
317	if (!buffer_uptodate(bh)) {
318		struct gfs2_trans *tr = current->journal_info;
319		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
320			gfs2_io_error_bh_wd(sdp, bh);
321		return -EIO;
322	}
323	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
324		return -EIO;
325
326	return 0;
327}
328
329void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
330{
331	struct address_space *mapping = bh->b_page->mapping;
332	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
333	struct gfs2_bufdata *bd = bh->b_private;
334	struct gfs2_trans *tr = current->journal_info;
335	int was_pinned = 0;
336
337	if (test_clear_buffer_pinned(bh)) {
338		trace_gfs2_pin(bd, 0);
339		atomic_dec(&sdp->sd_log_pinned);
340		list_del_init(&bd->bd_list);
341		if (meta == REMOVE_META)
342			tr->tr_num_buf_rm++;
343		else
344			tr->tr_num_databuf_rm++;
345		set_bit(TR_TOUCHED, &tr->tr_flags);
346		was_pinned = 1;
347		brelse(bh);
348	}
349	if (bd) {
350		spin_lock(&sdp->sd_ail_lock);
351		if (bd->bd_tr) {
352			gfs2_trans_add_revoke(sdp, bd);
353		} else if (was_pinned) {
354			bh->b_private = NULL;
355			kmem_cache_free(gfs2_bufdata_cachep, bd);
356		}
357		spin_unlock(&sdp->sd_ail_lock);
358	}
359	clear_buffer_dirty(bh);
360	clear_buffer_uptodate(bh);
361}
362
363/**
364 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
365 * @ip: the inode who owns the buffers
366 * @bstart: the first buffer in the run
367 * @blen: the number of buffers in the run
368 *
369 */
370
371void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
372{
373	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
374	struct buffer_head *bh;
375
376	while (blen) {
377		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
378		if (bh) {
379			lock_buffer(bh);
380			gfs2_log_lock(sdp);
381			gfs2_remove_from_journal(bh, REMOVE_META);
382			gfs2_log_unlock(sdp);
383			unlock_buffer(bh);
384			brelse(bh);
385		}
386
387		bstart++;
388		blen--;
389	}
390}
391
392/**
393 * gfs2_meta_indirect_buffer - Get a metadata buffer
394 * @ip: The GFS2 inode
395 * @height: The level of this buf in the metadata (indir addr) tree (if any)
396 * @num: The block number (device relative) of the buffer
397 * @bhp: the buffer is returned here
398 *
399 * Returns: errno
400 */
401
402int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
403			      struct buffer_head **bhp)
404{
405	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
406	struct gfs2_glock *gl = ip->i_gl;
407	struct buffer_head *bh;
408	int ret = 0;
409	u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
410	int rahead = 0;
411
412	if (num == ip->i_no_addr)
413		rahead = ip->i_rahead;
414
415	ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
416	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
417		brelse(bh);
418		ret = -EIO;
419	} else {
420		*bhp = bh;
421	}
 
422	return ret;
423}
424
425/**
426 * gfs2_meta_ra - start readahead on an extent of a file
427 * @gl: the glock the blocks belong to
428 * @dblock: the starting disk block
429 * @extlen: the number of blocks in the extent
430 *
431 * returns: the first buffer in the extent
432 */
433
434struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
435{
436	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
437	struct buffer_head *first_bh, *bh;
438	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
439			  sdp->sd_sb.sb_bsize_shift;
440
441	BUG_ON(!extlen);
442
443	if (max_ra < 1)
444		max_ra = 1;
445	if (extlen > max_ra)
446		extlen = max_ra;
447
448	first_bh = gfs2_getbuf(gl, dblock, CREATE);
449
450	if (buffer_uptodate(first_bh))
451		goto out;
452	if (!buffer_locked(first_bh))
453		ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh);
454
455	dblock++;
456	extlen--;
457
458	while (extlen) {
459		bh = gfs2_getbuf(gl, dblock, CREATE);
460
461		if (!buffer_uptodate(bh) && !buffer_locked(bh))
462			ll_rw_block(REQ_OP_READ,
463				    REQ_RAHEAD | REQ_META | REQ_PRIO,
464				    1, &bh);
465		brelse(bh);
466		dblock++;
467		extlen--;
468		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
469			goto out;
470	}
471
472	wait_on_buffer(first_bh);
473out:
474	return first_bh;
475}
476