Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#include <linux/sched.h>
 11#include <linux/slab.h>
 12#include <linux/spinlock.h>
 13#include <linux/completion.h>
 14#include <linux/buffer_head.h>
 15#include <linux/mm.h>
 16#include <linux/pagemap.h>
 17#include <linux/writeback.h>
 18#include <linux/swap.h>
 19#include <linux/delay.h>
 20#include <linux/bio.h>
 21#include <linux/gfs2_ondisk.h>
 22
 23#include "gfs2.h"
 24#include "incore.h"
 25#include "glock.h"
 26#include "glops.h"
 27#include "inode.h"
 28#include "log.h"
 29#include "lops.h"
 30#include "meta_io.h"
 31#include "rgrp.h"
 32#include "trans.h"
 33#include "util.h"
 34#include "trace_gfs2.h"
 35
 36static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
 37{
 38	struct buffer_head *bh, *head;
 39	int nr_underway = 0;
 40	int write_op = REQ_META | REQ_PRIO |
 41		(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
 42
 43	BUG_ON(!PageLocked(page));
 44	BUG_ON(!page_has_buffers(page));
 45
 46	head = page_buffers(page);
 47	bh = head;
 48
 49	do {
 50		if (!buffer_mapped(bh))
 51			continue;
 52		/*
 53		 * If it's a fully non-blocking write attempt and we cannot
 54		 * lock the buffer then redirty the page.  Note that this can
 55		 * potentially cause a busy-wait loop from pdflush and kswapd
 56		 * activity, but those code paths have their own higher-level
 57		 * throttling.
 58		 */
 59		if (wbc->sync_mode != WB_SYNC_NONE) {
 60			lock_buffer(bh);
 61		} else if (!trylock_buffer(bh)) {
 62			redirty_page_for_writepage(wbc, page);
 63			continue;
 64		}
 65		if (test_clear_buffer_dirty(bh)) {
 66			mark_buffer_async_write(bh);
 67		} else {
 68			unlock_buffer(bh);
 69		}
 70	} while ((bh = bh->b_this_page) != head);
 71
 72	/*
 73	 * The page and its buffers are protected by PageWriteback(), so we can
 74	 * drop the bh refcounts early.
 75	 */
 76	BUG_ON(PageWriteback(page));
 77	set_page_writeback(page);
 78
 79	do {
 80		struct buffer_head *next = bh->b_this_page;
 81		if (buffer_async_write(bh)) {
 82			submit_bh(write_op, bh);
 83			nr_underway++;
 84		}
 85		bh = next;
 86	} while (bh != head);
 87	unlock_page(page);
 88
 89	if (nr_underway == 0)
 90		end_page_writeback(page);
 91
 92	return 0;
 93}
 94
 95const struct address_space_operations gfs2_meta_aops = {
 96	.writepage = gfs2_aspace_writepage,
 97	.releasepage = gfs2_releasepage,
 98};
 99
100/**
101 * gfs2_meta_sync - Sync all buffers associated with a glock
102 * @gl: The glock
103 *
104 */
105
106void gfs2_meta_sync(struct gfs2_glock *gl)
107{
108	struct address_space *mapping = gfs2_glock2aspace(gl);
109	int error;
110
111	filemap_fdatawrite(mapping);
112	error = filemap_fdatawait(mapping);
113
114	if (error)
115		gfs2_io_error(gl->gl_sbd);
116}
117
118/**
119 * gfs2_getbuf - Get a buffer with a given address space
120 * @gl: the glock
121 * @blkno: the block number (filesystem scope)
122 * @create: 1 if the buffer should be created
123 *
124 * Returns: the buffer
125 */
126
127struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
128{
129	struct address_space *mapping = gfs2_glock2aspace(gl);
130	struct gfs2_sbd *sdp = gl->gl_sbd;
131	struct page *page;
132	struct buffer_head *bh;
133	unsigned int shift;
134	unsigned long index;
135	unsigned int bufnum;
136
137	shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
 
 
 
138	index = blkno >> shift;             /* convert block to page */
139	bufnum = blkno - (index << shift);  /* block buf index within page */
140
141	if (create) {
142		for (;;) {
143			page = grab_cache_page(mapping, index);
144			if (page)
145				break;
146			yield();
147		}
148	} else {
149		page = find_lock_page(mapping, index);
 
150		if (!page)
151			return NULL;
152	}
153
154	if (!page_has_buffers(page))
155		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
156
157	/* Locate header for our buffer within our page */
158	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
159		/* Do nothing */;
160	get_bh(bh);
161
162	if (!buffer_mapped(bh))
163		map_bh(bh, sdp->sd_vfs, blkno);
164
165	unlock_page(page);
166	mark_page_accessed(page);
167	page_cache_release(page);
168
169	return bh;
170}
171
172static void meta_prep_new(struct buffer_head *bh)
173{
174	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
175
176	lock_buffer(bh);
177	clear_buffer_dirty(bh);
178	set_buffer_uptodate(bh);
179	unlock_buffer(bh);
180
181	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
182}
183
184/**
185 * gfs2_meta_new - Get a block
186 * @gl: The glock associated with this block
187 * @blkno: The block number
188 *
189 * Returns: The buffer
190 */
191
192struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
193{
194	struct buffer_head *bh;
195	bh = gfs2_getbuf(gl, blkno, CREATE);
196	meta_prep_new(bh);
197	return bh;
198}
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200/**
201 * gfs2_meta_read - Read a block from disk
202 * @gl: The glock covering the block
203 * @blkno: The block number
204 * @flags: flags
205 * @bhp: the place where the buffer is returned (NULL on failure)
206 *
207 * Returns: errno
208 */
209
210int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
211		   struct buffer_head **bhp)
212{
213	struct gfs2_sbd *sdp = gl->gl_sbd;
214	struct buffer_head *bh;
215
216	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 
 
 
217		return -EIO;
 
218
219	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
220
221	lock_buffer(bh);
222	if (buffer_uptodate(bh)) {
223		unlock_buffer(bh);
224		return 0;
 
 
 
 
225	}
226	bh->b_end_io = end_buffer_read_sync;
227	get_bh(bh);
228	submit_bh(READ_SYNC | REQ_META | REQ_PRIO, bh);
 
 
 
 
 
 
 
 
 
 
 
 
229	if (!(flags & DIO_WAIT))
230		return 0;
231
 
232	wait_on_buffer(bh);
233	if (unlikely(!buffer_uptodate(bh))) {
234		struct gfs2_trans *tr = current->journal_info;
235		if (tr && tr->tr_touched)
236			gfs2_io_error_bh(sdp, bh);
237		brelse(bh);
 
238		return -EIO;
239	}
240
241	return 0;
242}
243
244/**
245 * gfs2_meta_wait - Reread a block from disk
246 * @sdp: the filesystem
247 * @bh: The block to wait for
248 *
249 * Returns: errno
250 */
251
252int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
253{
254	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
255		return -EIO;
256
257	wait_on_buffer(bh);
258
259	if (!buffer_uptodate(bh)) {
260		struct gfs2_trans *tr = current->journal_info;
261		if (tr && tr->tr_touched)
262			gfs2_io_error_bh(sdp, bh);
263		return -EIO;
264	}
265	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
266		return -EIO;
267
268	return 0;
269}
270
271/**
272 * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
273 * @gl: the glock the buffer belongs to
274 * @bh: The buffer to be attached to
275 * @meta: Flag to indicate whether its metadata or not
276 */
277
278void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
279			 int meta)
280{
281	struct gfs2_bufdata *bd;
282
283	if (meta)
284		lock_page(bh->b_page);
285
286	if (bh->b_private) {
287		if (meta)
288			unlock_page(bh->b_page);
289		return;
290	}
291
292	bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
293	bd->bd_bh = bh;
294	bd->bd_gl = gl;
295
296	INIT_LIST_HEAD(&bd->bd_list_tr);
297	if (meta)
298		lops_init_le(&bd->bd_le, &gfs2_buf_lops);
299	else
300		lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
301	bh->b_private = bd;
302
303	if (meta)
304		unlock_page(bh->b_page);
305}
306
307void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
308{
309	struct address_space *mapping = bh->b_page->mapping;
310	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
311	struct gfs2_bufdata *bd = bh->b_private;
 
 
312
313	if (test_clear_buffer_pinned(bh)) {
314		trace_gfs2_pin(bd, 0);
315		atomic_dec(&sdp->sd_log_pinned);
316		list_del_init(&bd->bd_le.le_list);
317		if (meta) {
318			gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
319			sdp->sd_log_num_buf--;
320			tr->tr_num_buf_rm++;
321		} else {
322			gfs2_assert_warn(sdp, sdp->sd_log_num_databuf);
323			sdp->sd_log_num_databuf--;
324			tr->tr_num_databuf_rm++;
325		}
326		tr->tr_touched = 1;
327		brelse(bh);
328	}
329	if (bd) {
330		spin_lock(&sdp->sd_ail_lock);
331		if (bd->bd_ail) {
332			gfs2_remove_from_ail(bd);
333			bh->b_private = NULL;
334			bd->bd_bh = NULL;
335			bd->bd_blkno = bh->b_blocknr;
336			gfs2_trans_add_revoke(sdp, bd);
 
 
 
337		}
338		spin_unlock(&sdp->sd_ail_lock);
339	}
340	clear_buffer_dirty(bh);
341	clear_buffer_uptodate(bh);
342}
343
344/**
345 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
346 * @ip: the inode who owns the buffers
347 * @bstart: the first buffer in the run
348 * @blen: the number of buffers in the run
349 *
350 */
351
352void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
353{
354	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
355	struct buffer_head *bh;
356
357	while (blen) {
358		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
359		if (bh) {
360			lock_buffer(bh);
361			gfs2_log_lock(sdp);
362			gfs2_remove_from_journal(bh, current->journal_info, 1);
363			gfs2_log_unlock(sdp);
364			unlock_buffer(bh);
365			brelse(bh);
366		}
367
368		bstart++;
369		blen--;
370	}
371}
372
373/**
374 * gfs2_meta_indirect_buffer - Get a metadata buffer
375 * @ip: The GFS2 inode
376 * @height: The level of this buf in the metadata (indir addr) tree (if any)
377 * @num: The block number (device relative) of the buffer
378 * @new: Non-zero if we may create a new buffer
379 * @bhp: the buffer is returned here
380 *
381 * Returns: errno
382 */
383
384int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
385			      int new, struct buffer_head **bhp)
386{
387	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
388	struct gfs2_glock *gl = ip->i_gl;
389	struct buffer_head *bh;
390	int ret = 0;
 
 
 
 
 
391
392	if (new) {
393		BUG_ON(height == 0);
394		bh = gfs2_meta_new(gl, num);
395		gfs2_trans_add_bh(ip->i_gl, bh, 1);
396		gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
397		gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
398	} else {
399		u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
400		ret = gfs2_meta_read(gl, num, DIO_WAIT, &bh);
401		if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
402			brelse(bh);
403			ret = -EIO;
404		}
405	}
406	*bhp = bh;
407	return ret;
408}
409
410/**
411 * gfs2_meta_ra - start readahead on an extent of a file
412 * @gl: the glock the blocks belong to
413 * @dblock: the starting disk block
414 * @extlen: the number of blocks in the extent
415 *
416 * returns: the first buffer in the extent
417 */
418
419struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
420{
421	struct gfs2_sbd *sdp = gl->gl_sbd;
422	struct buffer_head *first_bh, *bh;
423	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
424			  sdp->sd_sb.sb_bsize_shift;
425
426	BUG_ON(!extlen);
427
428	if (max_ra < 1)
429		max_ra = 1;
430	if (extlen > max_ra)
431		extlen = max_ra;
432
433	first_bh = gfs2_getbuf(gl, dblock, CREATE);
434
435	if (buffer_uptodate(first_bh))
436		goto out;
437	if (!buffer_locked(first_bh))
438		ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
439
440	dblock++;
441	extlen--;
442
443	while (extlen) {
444		bh = gfs2_getbuf(gl, dblock, CREATE);
445
446		if (!buffer_uptodate(bh) && !buffer_locked(bh))
447			ll_rw_block(READA, 1, &bh);
 
 
448		brelse(bh);
449		dblock++;
450		extlen--;
451		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
452			goto out;
453	}
454
455	wait_on_buffer(first_bh);
456out:
457	return first_bh;
458}
459
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 
 
 
 
  5 */
  6
  7#include <linux/sched.h>
  8#include <linux/slab.h>
  9#include <linux/spinlock.h>
 10#include <linux/completion.h>
 11#include <linux/buffer_head.h>
 12#include <linux/mm.h>
 13#include <linux/pagemap.h>
 14#include <linux/writeback.h>
 15#include <linux/swap.h>
 16#include <linux/delay.h>
 17#include <linux/bio.h>
 18#include <linux/gfs2_ondisk.h>
 19
 20#include "gfs2.h"
 21#include "incore.h"
 22#include "glock.h"
 23#include "glops.h"
 24#include "inode.h"
 25#include "log.h"
 26#include "lops.h"
 27#include "meta_io.h"
 28#include "rgrp.h"
 29#include "trans.h"
 30#include "util.h"
 31#include "trace_gfs2.h"
 32
 33static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
 34{
 35	struct buffer_head *bh, *head;
 36	int nr_underway = 0;
 37	int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
 
 38
 39	BUG_ON(!PageLocked(page));
 40	BUG_ON(!page_has_buffers(page));
 41
 42	head = page_buffers(page);
 43	bh = head;
 44
 45	do {
 46		if (!buffer_mapped(bh))
 47			continue;
 48		/*
 49		 * If it's a fully non-blocking write attempt and we cannot
 50		 * lock the buffer then redirty the page.  Note that this can
 51		 * potentially cause a busy-wait loop from flusher thread and kswapd
 52		 * activity, but those code paths have their own higher-level
 53		 * throttling.
 54		 */
 55		if (wbc->sync_mode != WB_SYNC_NONE) {
 56			lock_buffer(bh);
 57		} else if (!trylock_buffer(bh)) {
 58			redirty_page_for_writepage(wbc, page);
 59			continue;
 60		}
 61		if (test_clear_buffer_dirty(bh)) {
 62			mark_buffer_async_write(bh);
 63		} else {
 64			unlock_buffer(bh);
 65		}
 66	} while ((bh = bh->b_this_page) != head);
 67
 68	/*
 69	 * The page and its buffers are protected by PageWriteback(), so we can
 70	 * drop the bh refcounts early.
 71	 */
 72	BUG_ON(PageWriteback(page));
 73	set_page_writeback(page);
 74
 75	do {
 76		struct buffer_head *next = bh->b_this_page;
 77		if (buffer_async_write(bh)) {
 78			submit_bh(REQ_OP_WRITE, write_flags, bh);
 79			nr_underway++;
 80		}
 81		bh = next;
 82	} while (bh != head);
 83	unlock_page(page);
 84
 85	if (nr_underway == 0)
 86		end_page_writeback(page);
 87
 88	return 0;
 89}
 90
 91const struct address_space_operations gfs2_meta_aops = {
 92	.writepage = gfs2_aspace_writepage,
 93	.releasepage = gfs2_releasepage,
 94};
 95
 96const struct address_space_operations gfs2_rgrp_aops = {
 97	.writepage = gfs2_aspace_writepage,
 98	.releasepage = gfs2_releasepage,
 99};
 
 
 
 
 
 
 
 
 
 
 
 
 
100
101/**
102 * gfs2_getbuf - Get a buffer with a given address space
103 * @gl: the glock
104 * @blkno: the block number (filesystem scope)
105 * @create: 1 if the buffer should be created
106 *
107 * Returns: the buffer
108 */
109
110struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
111{
112	struct address_space *mapping = gfs2_glock2aspace(gl);
113	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
114	struct page *page;
115	struct buffer_head *bh;
116	unsigned int shift;
117	unsigned long index;
118	unsigned int bufnum;
119
120	if (mapping == NULL)
121		mapping = &sdp->sd_aspace;
122
123	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
124	index = blkno >> shift;             /* convert block to page */
125	bufnum = blkno - (index << shift);  /* block buf index within page */
126
127	if (create) {
128		for (;;) {
129			page = grab_cache_page(mapping, index);
130			if (page)
131				break;
132			yield();
133		}
134	} else {
135		page = find_get_page_flags(mapping, index,
136						FGP_LOCK|FGP_ACCESSED);
137		if (!page)
138			return NULL;
139	}
140
141	if (!page_has_buffers(page))
142		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
143
144	/* Locate header for our buffer within our page */
145	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
146		/* Do nothing */;
147	get_bh(bh);
148
149	if (!buffer_mapped(bh))
150		map_bh(bh, sdp->sd_vfs, blkno);
151
152	unlock_page(page);
153	put_page(page);
 
154
155	return bh;
156}
157
158static void meta_prep_new(struct buffer_head *bh)
159{
160	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
161
162	lock_buffer(bh);
163	clear_buffer_dirty(bh);
164	set_buffer_uptodate(bh);
165	unlock_buffer(bh);
166
167	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
168}
169
170/**
171 * gfs2_meta_new - Get a block
172 * @gl: The glock associated with this block
173 * @blkno: The block number
174 *
175 * Returns: The buffer
176 */
177
178struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
179{
180	struct buffer_head *bh;
181	bh = gfs2_getbuf(gl, blkno, CREATE);
182	meta_prep_new(bh);
183	return bh;
184}
185
186static void gfs2_meta_read_endio(struct bio *bio)
187{
188	struct bio_vec *bvec;
189	struct bvec_iter_all iter_all;
190
191	bio_for_each_segment_all(bvec, bio, iter_all) {
192		struct page *page = bvec->bv_page;
193		struct buffer_head *bh = page_buffers(page);
194		unsigned int len = bvec->bv_len;
195
196		while (bh_offset(bh) < bvec->bv_offset)
197			bh = bh->b_this_page;
198		do {
199			struct buffer_head *next = bh->b_this_page;
200			len -= bh->b_size;
201			bh->b_end_io(bh, !bio->bi_status);
202			bh = next;
203		} while (bh && len);
204	}
205	bio_put(bio);
206}
207
208/*
209 * Submit several consecutive buffer head I/O requests as a single bio I/O
210 * request.  (See submit_bh_wbc.)
211 */
212static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
213			    int num)
214{
215	while (num > 0) {
216		struct buffer_head *bh = *bhs;
217		struct bio *bio;
218
219		bio = bio_alloc(GFP_NOIO, num);
220		bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
221		bio_set_dev(bio, bh->b_bdev);
222		while (num > 0) {
223			bh = *bhs;
224			if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
225				BUG_ON(bio->bi_iter.bi_size == 0);
226				break;
227			}
228			bhs++;
229			num--;
230		}
231		bio->bi_end_io = gfs2_meta_read_endio;
232		bio_set_op_attrs(bio, op, op_flags);
233		submit_bio(bio);
234	}
235}
236
237/**
238 * gfs2_meta_read - Read a block from disk
239 * @gl: The glock covering the block
240 * @blkno: The block number
241 * @flags: flags
242 * @bhp: the place where the buffer is returned (NULL on failure)
243 *
244 * Returns: errno
245 */
246
247int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
248		   int rahead, struct buffer_head **bhp)
249{
250	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
251	struct buffer_head *bh, *bhs[2];
252	int num = 0;
253
254	if (unlikely(gfs2_withdrawn(sdp)) &&
255	    (!sdp->sd_jdesc || gl != sdp->sd_jinode_gl)) {
256		*bhp = NULL;
257		return -EIO;
258	}
259
260	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
261
262	lock_buffer(bh);
263	if (buffer_uptodate(bh)) {
264		unlock_buffer(bh);
265		flags &= ~DIO_WAIT;
266	} else {
267		bh->b_end_io = end_buffer_read_sync;
268		get_bh(bh);
269		bhs[num++] = bh;
270	}
271
272	if (rahead) {
273		bh = gfs2_getbuf(gl, blkno + 1, CREATE);
274
275		lock_buffer(bh);
276		if (buffer_uptodate(bh)) {
277			unlock_buffer(bh);
278			brelse(bh);
279		} else {
280			bh->b_end_io = end_buffer_read_sync;
281			bhs[num++] = bh;
282		}
283	}
284
285	gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
286	if (!(flags & DIO_WAIT))
287		return 0;
288
289	bh = *bhp;
290	wait_on_buffer(bh);
291	if (unlikely(!buffer_uptodate(bh))) {
292		struct gfs2_trans *tr = current->journal_info;
293		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
294			gfs2_io_error_bh_wd(sdp, bh);
295		brelse(bh);
296		*bhp = NULL;
297		return -EIO;
298	}
299
300	return 0;
301}
302
303/**
304 * gfs2_meta_wait - Reread a block from disk
305 * @sdp: the filesystem
306 * @bh: The block to wait for
307 *
308 * Returns: errno
309 */
310
311int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
312{
313	if (unlikely(gfs2_withdrawn(sdp)))
314		return -EIO;
315
316	wait_on_buffer(bh);
317
318	if (!buffer_uptodate(bh)) {
319		struct gfs2_trans *tr = current->journal_info;
320		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
321			gfs2_io_error_bh_wd(sdp, bh);
322		return -EIO;
323	}
324	if (unlikely(gfs2_withdrawn(sdp)))
325		return -EIO;
326
327	return 0;
328}
329
330void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331{
332	struct address_space *mapping = bh->b_page->mapping;
333	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
334	struct gfs2_bufdata *bd = bh->b_private;
335	struct gfs2_trans *tr = current->journal_info;
336	int was_pinned = 0;
337
338	if (test_clear_buffer_pinned(bh)) {
339		trace_gfs2_pin(bd, 0);
340		atomic_dec(&sdp->sd_log_pinned);
341		list_del_init(&bd->bd_list);
342		if (meta == REMOVE_META)
 
 
343			tr->tr_num_buf_rm++;
344		else
 
 
345			tr->tr_num_databuf_rm++;
346		set_bit(TR_TOUCHED, &tr->tr_flags);
347		was_pinned = 1;
348		brelse(bh);
349	}
350	if (bd) {
351		spin_lock(&sdp->sd_ail_lock);
352		if (bd->bd_tr) {
 
 
 
 
353			gfs2_trans_add_revoke(sdp, bd);
354		} else if (was_pinned) {
355			bh->b_private = NULL;
356			kmem_cache_free(gfs2_bufdata_cachep, bd);
357		}
358		spin_unlock(&sdp->sd_ail_lock);
359	}
360	clear_buffer_dirty(bh);
361	clear_buffer_uptodate(bh);
362}
363
364/**
365 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
366 * @ip: the inode who owns the buffers
367 * @bstart: the first buffer in the run
368 * @blen: the number of buffers in the run
369 *
370 */
371
372void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
373{
374	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
375	struct buffer_head *bh;
376
377	while (blen) {
378		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
379		if (bh) {
380			lock_buffer(bh);
381			gfs2_log_lock(sdp);
382			gfs2_remove_from_journal(bh, REMOVE_META);
383			gfs2_log_unlock(sdp);
384			unlock_buffer(bh);
385			brelse(bh);
386		}
387
388		bstart++;
389		blen--;
390	}
391}
392
393/**
394 * gfs2_meta_indirect_buffer - Get a metadata buffer
395 * @ip: The GFS2 inode
396 * @height: The level of this buf in the metadata (indir addr) tree (if any)
397 * @num: The block number (device relative) of the buffer
 
398 * @bhp: the buffer is returned here
399 *
400 * Returns: errno
401 */
402
403int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
404			      struct buffer_head **bhp)
405{
406	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
407	struct gfs2_glock *gl = ip->i_gl;
408	struct buffer_head *bh;
409	int ret = 0;
410	u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
411	int rahead = 0;
412
413	if (num == ip->i_no_addr)
414		rahead = ip->i_rahead;
415
416	ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
417	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
418		brelse(bh);
419		ret = -EIO;
 
 
420	} else {
421		*bhp = bh;
 
 
 
 
 
422	}
 
423	return ret;
424}
425
426/**
427 * gfs2_meta_ra - start readahead on an extent of a file
428 * @gl: the glock the blocks belong to
429 * @dblock: the starting disk block
430 * @extlen: the number of blocks in the extent
431 *
432 * returns: the first buffer in the extent
433 */
434
435struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
436{
437	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
438	struct buffer_head *first_bh, *bh;
439	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
440			  sdp->sd_sb.sb_bsize_shift;
441
442	BUG_ON(!extlen);
443
444	if (max_ra < 1)
445		max_ra = 1;
446	if (extlen > max_ra)
447		extlen = max_ra;
448
449	first_bh = gfs2_getbuf(gl, dblock, CREATE);
450
451	if (buffer_uptodate(first_bh))
452		goto out;
453	if (!buffer_locked(first_bh))
454		ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh);
455
456	dblock++;
457	extlen--;
458
459	while (extlen) {
460		bh = gfs2_getbuf(gl, dblock, CREATE);
461
462		if (!buffer_uptodate(bh) && !buffer_locked(bh))
463			ll_rw_block(REQ_OP_READ,
464				    REQ_RAHEAD | REQ_META | REQ_PRIO,
465				    1, &bh);
466		brelse(bh);
467		dblock++;
468		extlen--;
469		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
470			goto out;
471	}
472
473	wait_on_buffer(first_bh);
474out:
475	return first_bh;
476}
477