Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  4 *
  5 * This copyrighted material is made available to anyone wishing to use,
  6 * modify, copy, or redistribute it subject to the terms and conditions
  7 * of the GNU General Public License version 2.
  8 */
  9
 10#include <linux/sched.h>
 11#include <linux/slab.h>
 12#include <linux/spinlock.h>
 13#include <linux/completion.h>
 14#include <linux/buffer_head.h>
 15#include <linux/mm.h>
 16#include <linux/pagemap.h>
 17#include <linux/writeback.h>
 18#include <linux/swap.h>
 19#include <linux/delay.h>
 20#include <linux/bio.h>
 21#include <linux/gfs2_ondisk.h>
 22
 23#include "gfs2.h"
 24#include "incore.h"
 25#include "glock.h"
 26#include "glops.h"
 27#include "inode.h"
 28#include "log.h"
 29#include "lops.h"
 30#include "meta_io.h"
 31#include "rgrp.h"
 32#include "trans.h"
 33#include "util.h"
 34#include "trace_gfs2.h"
 35
 36static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
 
 37{
 38	struct buffer_head *bh, *head;
 39	int nr_underway = 0;
 40	int write_op = REQ_META | REQ_PRIO |
 41		(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
 42
 43	BUG_ON(!PageLocked(page));
 44	BUG_ON(!page_has_buffers(page));
 45
 46	head = page_buffers(page);
 47	bh = head;
 48
 49	do {
 50		if (!buffer_mapped(bh))
 51			continue;
 52		/*
 53		 * If it's a fully non-blocking write attempt and we cannot
 54		 * lock the buffer then redirty the page.  Note that this can
 55		 * potentially cause a busy-wait loop from flusher thread and kswapd
 56		 * activity, but those code paths have their own higher-level
 57		 * throttling.
 58		 */
 59		if (wbc->sync_mode != WB_SYNC_NONE) {
 60			lock_buffer(bh);
 61		} else if (!trylock_buffer(bh)) {
 62			redirty_page_for_writepage(wbc, page);
 63			continue;
 64		}
 65		if (test_clear_buffer_dirty(bh)) {
 66			mark_buffer_async_write(bh);
 67		} else {
 68			unlock_buffer(bh);
 69		}
 70	} while ((bh = bh->b_this_page) != head);
 71
 72	/*
 73	 * The page and its buffers are protected by PageWriteback(), so we can
 74	 * drop the bh refcounts early.
 75	 */
 76	BUG_ON(PageWriteback(page));
 77	set_page_writeback(page);
 78
 79	do {
 80		struct buffer_head *next = bh->b_this_page;
 81		if (buffer_async_write(bh)) {
 82			submit_bh(write_op, bh);
 83			nr_underway++;
 84		}
 85		bh = next;
 86	} while (bh != head);
 87	unlock_page(page);
 88
 89	if (nr_underway == 0)
 90		end_page_writeback(page);
 
 91
 92	return 0;
 
 
 
 
 
 
 
 
 
 93}
 94
 95const struct address_space_operations gfs2_meta_aops = {
 96	.writepage = gfs2_aspace_writepage,
 97	.releasepage = gfs2_releasepage,
 
 
 98};
 99
100const struct address_space_operations gfs2_rgrp_aops = {
101	.writepage = gfs2_aspace_writepage,
102	.releasepage = gfs2_releasepage,
 
 
103};
104
105/**
106 * gfs2_getbuf - Get a buffer with a given address space
107 * @gl: the glock
108 * @blkno: the block number (filesystem scope)
109 * @create: 1 if the buffer should be created
110 *
111 * Returns: the buffer
112 */
113
114struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
115{
116	struct address_space *mapping = gfs2_glock2aspace(gl);
117	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
118	struct page *page;
119	struct buffer_head *bh;
120	unsigned int shift;
121	unsigned long index;
122	unsigned int bufnum;
123
124	if (mapping == NULL)
125		mapping = &sdp->sd_aspace;
126
127	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
128	index = blkno >> shift;             /* convert block to page */
129	bufnum = blkno - (index << shift);  /* block buf index within page */
130
131	if (create) {
132		for (;;) {
133			page = grab_cache_page(mapping, index);
134			if (page)
135				break;
136			yield();
137		}
 
138	} else {
139		page = find_get_page_flags(mapping, index,
140						FGP_LOCK|FGP_ACCESSED);
141		if (!page)
142			return NULL;
 
143	}
144
145	if (!page_has_buffers(page))
146		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
147
148	/* Locate header for our buffer within our page */
149	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
150		/* Do nothing */;
151	get_bh(bh);
152
 
153	if (!buffer_mapped(bh))
154		map_bh(bh, sdp->sd_vfs, blkno);
155
156	unlock_page(page);
157	put_page(page);
 
158
159	return bh;
160}
161
162static void meta_prep_new(struct buffer_head *bh)
163{
164	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
165
166	lock_buffer(bh);
167	clear_buffer_dirty(bh);
168	set_buffer_uptodate(bh);
169	unlock_buffer(bh);
170
171	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
172}
173
174/**
175 * gfs2_meta_new - Get a block
176 * @gl: The glock associated with this block
177 * @blkno: The block number
178 *
179 * Returns: The buffer
180 */
181
182struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
183{
184	struct buffer_head *bh;
185	bh = gfs2_getbuf(gl, blkno, CREATE);
186	meta_prep_new(bh);
187	return bh;
188}
189
190static void gfs2_meta_read_endio(struct bio *bio)
191{
192	struct bio_vec *bvec;
193	int i;
194
195	bio_for_each_segment_all(bvec, bio, i) {
196		struct page *page = bvec->bv_page;
197		struct buffer_head *bh = page_buffers(page);
198		unsigned int len = bvec->bv_len;
199
200		while (bh_offset(bh) < bvec->bv_offset)
201			bh = bh->b_this_page;
202		do {
203			struct buffer_head *next = bh->b_this_page;
204			len -= bh->b_size;
205			bh->b_end_io(bh, !bio->bi_error);
206			bh = next;
207		} while (bh && len);
208	}
209	bio_put(bio);
210}
211
212/*
213 * Submit several consecutive buffer head I/O requests as a single bio I/O
214 * request.  (See submit_bh_wbc.)
215 */
216static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num)
217{
218	struct buffer_head *bh = bhs[0];
219	struct bio *bio;
220	int i;
221
222	if (!num)
223		return;
224
225	bio = bio_alloc(GFP_NOIO, num);
226	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
227	bio->bi_bdev = bh->b_bdev;
228	for (i = 0; i < num; i++) {
229		bh = bhs[i];
230		bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
 
 
 
 
231	}
232	bio->bi_end_io = gfs2_meta_read_endio;
233	submit_bio(rw, bio);
234}
235
236/**
237 * gfs2_meta_read - Read a block from disk
238 * @gl: The glock covering the block
239 * @blkno: The block number
240 * @flags: flags
 
241 * @bhp: the place where the buffer is returned (NULL on failure)
242 *
243 * Returns: errno
244 */
245
246int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
247		   int rahead, struct buffer_head **bhp)
248{
249	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
250	struct buffer_head *bh, *bhs[2];
251	int num = 0;
252
253	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
 
254		*bhp = NULL;
255		return -EIO;
256	}
257
258	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
259
260	lock_buffer(bh);
261	if (buffer_uptodate(bh)) {
262		unlock_buffer(bh);
263		flags &= ~DIO_WAIT;
264	} else {
265		bh->b_end_io = end_buffer_read_sync;
266		get_bh(bh);
267		bhs[num++] = bh;
268	}
269
270	if (rahead) {
271		bh = gfs2_getbuf(gl, blkno + 1, CREATE);
272
273		lock_buffer(bh);
274		if (buffer_uptodate(bh)) {
275			unlock_buffer(bh);
276			brelse(bh);
277		} else {
278			bh->b_end_io = end_buffer_read_sync;
279			bhs[num++] = bh;
280		}
281	}
282
283	gfs2_submit_bhs(READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
284	if (!(flags & DIO_WAIT))
285		return 0;
286
287	bh = *bhp;
288	wait_on_buffer(bh);
289	if (unlikely(!buffer_uptodate(bh))) {
290		struct gfs2_trans *tr = current->journal_info;
291		if (tr && tr->tr_touched)
292			gfs2_io_error_bh(sdp, bh);
293		brelse(bh);
294		*bhp = NULL;
295		return -EIO;
296	}
297
298	return 0;
299}
300
301/**
302 * gfs2_meta_wait - Reread a block from disk
303 * @sdp: the filesystem
304 * @bh: The block to wait for
305 *
306 * Returns: errno
307 */
308
309int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
310{
311	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 
312		return -EIO;
313
314	wait_on_buffer(bh);
315
316	if (!buffer_uptodate(bh)) {
317		struct gfs2_trans *tr = current->journal_info;
318		if (tr && tr->tr_touched)
319			gfs2_io_error_bh(sdp, bh);
320		return -EIO;
321	}
322	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 
323		return -EIO;
324
325	return 0;
326}
327
328void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
329{
330	struct address_space *mapping = bh->b_page->mapping;
331	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
332	struct gfs2_bufdata *bd = bh->b_private;
 
333	int was_pinned = 0;
334
335	if (test_clear_buffer_pinned(bh)) {
336		trace_gfs2_pin(bd, 0);
337		atomic_dec(&sdp->sd_log_pinned);
338		list_del_init(&bd->bd_list);
339		if (meta)
340			tr->tr_num_buf_rm++;
341		else
342			tr->tr_num_databuf_rm++;
343		tr->tr_touched = 1;
344		was_pinned = 1;
345		brelse(bh);
346	}
347	if (bd) {
348		spin_lock(&sdp->sd_ail_lock);
349		if (bd->bd_tr) {
350			gfs2_trans_add_revoke(sdp, bd);
351		} else if (was_pinned) {
352			bh->b_private = NULL;
353			kmem_cache_free(gfs2_bufdata_cachep, bd);
 
 
 
354		}
355		spin_unlock(&sdp->sd_ail_lock);
356	}
357	clear_buffer_dirty(bh);
358	clear_buffer_uptodate(bh);
359}
360
361/**
362 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363 * @ip: the inode who owns the buffers
364 * @bstart: the first buffer in the run
365 * @blen: the number of buffers in the run
366 *
367 */
368
369void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
370{
371	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
372	struct buffer_head *bh;
 
373
 
 
 
 
 
 
 
374	while (blen) {
 
375		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
 
 
 
 
376		if (bh) {
377			lock_buffer(bh);
378			gfs2_log_lock(sdp);
379			gfs2_remove_from_journal(bh, current->journal_info, 1);
 
 
380			gfs2_log_unlock(sdp);
381			unlock_buffer(bh);
382			brelse(bh);
383		}
384
385		bstart++;
386		blen--;
387	}
388}
389
390/**
391 * gfs2_meta_indirect_buffer - Get a metadata buffer
392 * @ip: The GFS2 inode
393 * @height: The level of this buf in the metadata (indir addr) tree (if any)
394 * @num: The block number (device relative) of the buffer
395 * @bhp: the buffer is returned here
396 *
397 * Returns: errno
398 */
399
400int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
401			      struct buffer_head **bhp)
402{
403	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
404	struct gfs2_glock *gl = ip->i_gl;
405	struct buffer_head *bh;
406	int ret = 0;
407	u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
408	int rahead = 0;
409
410	if (num == ip->i_no_addr)
411		rahead = ip->i_rahead;
412
413	ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
414	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
415		brelse(bh);
416		ret = -EIO;
 
 
417	}
418	*bhp = bh;
419	return ret;
420}
421
422/**
423 * gfs2_meta_ra - start readahead on an extent of a file
424 * @gl: the glock the blocks belong to
425 * @dblock: the starting disk block
426 * @extlen: the number of blocks in the extent
427 *
428 * returns: the first buffer in the extent
429 */
430
431struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
432{
433	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
434	struct buffer_head *first_bh, *bh;
435	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
436			  sdp->sd_sb.sb_bsize_shift;
437
438	BUG_ON(!extlen);
439
440	if (max_ra < 1)
441		max_ra = 1;
442	if (extlen > max_ra)
443		extlen = max_ra;
444
445	first_bh = gfs2_getbuf(gl, dblock, CREATE);
446
447	if (buffer_uptodate(first_bh))
448		goto out;
449	if (!buffer_locked(first_bh))
450		ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
451
452	dblock++;
453	extlen--;
454
455	while (extlen) {
456		bh = gfs2_getbuf(gl, dblock, CREATE);
457
458		if (!buffer_uptodate(bh) && !buffer_locked(bh))
459			ll_rw_block(READA | REQ_META, 1, &bh);
460		brelse(bh);
461		dblock++;
462		extlen--;
463		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
464			goto out;
465	}
466
467	wait_on_buffer(first_bh);
468out:
469	return first_bh;
470}
471
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 
 
 
 
  5 */
  6
  7#include <linux/sched.h>
  8#include <linux/slab.h>
  9#include <linux/spinlock.h>
 10#include <linux/completion.h>
 11#include <linux/buffer_head.h>
 12#include <linux/mm.h>
 13#include <linux/pagemap.h>
 14#include <linux/writeback.h>
 15#include <linux/swap.h>
 16#include <linux/delay.h>
 17#include <linux/bio.h>
 18#include <linux/gfs2_ondisk.h>
 19
 20#include "gfs2.h"
 21#include "incore.h"
 22#include "glock.h"
 23#include "glops.h"
 24#include "inode.h"
 25#include "log.h"
 26#include "lops.h"
 27#include "meta_io.h"
 28#include "rgrp.h"
 29#include "trans.h"
 30#include "util.h"
 31#include "trace_gfs2.h"
 32
 33static void gfs2_aspace_write_folio(struct folio *folio,
 34		struct writeback_control *wbc)
 35{
 36	struct buffer_head *bh, *head;
 37	int nr_underway = 0;
 38	blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
 
 39
 40	BUG_ON(!folio_test_locked(folio));
 
 41
 42	head = folio_buffers(folio);
 43	bh = head;
 44
 45	do {
 46		if (!buffer_mapped(bh))
 47			continue;
 48		/*
 49		 * If it's a fully non-blocking write attempt and we cannot
 50		 * lock the buffer then redirty the page.  Note that this can
 51		 * potentially cause a busy-wait loop from flusher thread and kswapd
 52		 * activity, but those code paths have their own higher-level
 53		 * throttling.
 54		 */
 55		if (wbc->sync_mode != WB_SYNC_NONE) {
 56			lock_buffer(bh);
 57		} else if (!trylock_buffer(bh)) {
 58			folio_redirty_for_writepage(wbc, folio);
 59			continue;
 60		}
 61		if (test_clear_buffer_dirty(bh)) {
 62			mark_buffer_async_write(bh);
 63		} else {
 64			unlock_buffer(bh);
 65		}
 66	} while ((bh = bh->b_this_page) != head);
 67
 68	/*
 69	 * The folio and its buffers are protected from truncation by
 70	 * the writeback flag, so we can drop the bh refcounts early.
 71	 */
 72	BUG_ON(folio_test_writeback(folio));
 73	folio_start_writeback(folio);
 74
 75	do {
 76		struct buffer_head *next = bh->b_this_page;
 77		if (buffer_async_write(bh)) {
 78			submit_bh(REQ_OP_WRITE | write_flags, bh);
 79			nr_underway++;
 80		}
 81		bh = next;
 82	} while (bh != head);
 83	folio_unlock(folio);
 84
 85	if (nr_underway == 0)
 86		folio_end_writeback(folio);
 87}
 88
 89static int gfs2_aspace_writepages(struct address_space *mapping,
 90		struct writeback_control *wbc)
 91{
 92	struct folio *folio = NULL;
 93	int error;
 94
 95	while ((folio = writeback_iter(mapping, wbc, folio, &error)))
 96		gfs2_aspace_write_folio(folio, wbc);
 97
 98	return error;
 99}
100
101const struct address_space_operations gfs2_meta_aops = {
102	.dirty_folio	= block_dirty_folio,
103	.invalidate_folio = block_invalidate_folio,
104	.writepages = gfs2_aspace_writepages,
105	.release_folio = gfs2_release_folio,
106};
107
108const struct address_space_operations gfs2_rgrp_aops = {
109	.dirty_folio	= block_dirty_folio,
110	.invalidate_folio = block_invalidate_folio,
111	.writepages = gfs2_aspace_writepages,
112	.release_folio = gfs2_release_folio,
113};
114
115/**
116 * gfs2_getbuf - Get a buffer with a given address space
117 * @gl: the glock
118 * @blkno: the block number (filesystem scope)
119 * @create: 1 if the buffer should be created
120 *
121 * Returns: the buffer
122 */
123
124struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
125{
126	struct address_space *mapping = gfs2_glock2aspace(gl);
127	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
128	struct folio *folio;
129	struct buffer_head *bh;
130	unsigned int shift;
131	unsigned long index;
132	unsigned int bufnum;
133
134	if (mapping == NULL)
135		mapping = &sdp->sd_aspace;
136
137	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
138	index = blkno >> shift;             /* convert block to page */
139	bufnum = blkno - (index << shift);  /* block buf index within page */
140
141	if (create) {
142		folio = __filemap_get_folio(mapping, index,
143				FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
144				mapping_gfp_mask(mapping) | __GFP_NOFAIL);
145		bh = folio_buffers(folio);
146		if (!bh)
147			bh = create_empty_buffers(folio,
148				sdp->sd_sb.sb_bsize, 0);
149	} else {
150		folio = __filemap_get_folio(mapping, index,
151				FGP_LOCK | FGP_ACCESSED, 0);
152		if (IS_ERR(folio))
153			return NULL;
154		bh = folio_buffers(folio);
155	}
156
157	if (!bh)
158		goto out_unlock;
 
 
 
 
 
159
160	bh = get_nth_bh(bh, bufnum);
161	if (!buffer_mapped(bh))
162		map_bh(bh, sdp->sd_vfs, blkno);
163
164out_unlock:
165	folio_unlock(folio);
166	folio_put(folio);
167
168	return bh;
169}
170
171static void meta_prep_new(struct buffer_head *bh)
172{
173	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
174
175	lock_buffer(bh);
176	clear_buffer_dirty(bh);
177	set_buffer_uptodate(bh);
178	unlock_buffer(bh);
179
180	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
181}
182
183/**
184 * gfs2_meta_new - Get a block
185 * @gl: The glock associated with this block
186 * @blkno: The block number
187 *
188 * Returns: The buffer
189 */
190
191struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
192{
193	struct buffer_head *bh;
194	bh = gfs2_getbuf(gl, blkno, CREATE);
195	meta_prep_new(bh);
196	return bh;
197}
198
199static void gfs2_meta_read_endio(struct bio *bio)
200{
201	struct bio_vec *bvec;
202	struct bvec_iter_all iter_all;
203
204	bio_for_each_segment_all(bvec, bio, iter_all) {
205		struct page *page = bvec->bv_page;
206		struct buffer_head *bh = page_buffers(page);
207		unsigned int len = bvec->bv_len;
208
209		while (bh_offset(bh) < bvec->bv_offset)
210			bh = bh->b_this_page;
211		do {
212			struct buffer_head *next = bh->b_this_page;
213			len -= bh->b_size;
214			bh->b_end_io(bh, !bio->bi_status);
215			bh = next;
216		} while (bh && len);
217	}
218	bio_put(bio);
219}
220
221/*
222 * Submit several consecutive buffer head I/O requests as a single bio I/O
223 * request.  (See submit_bh_wbc.)
224 */
225static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num)
226{
227	while (num > 0) {
228		struct buffer_head *bh = *bhs;
229		struct bio *bio;
230
231		bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO);
232		bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
233		while (num > 0) {
234			bh = *bhs;
235			if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
236				BUG_ON(bio->bi_iter.bi_size == 0);
237				break;
238			}
239			bhs++;
240			num--;
241		}
242		bio->bi_end_io = gfs2_meta_read_endio;
243		submit_bio(bio);
244	}
 
 
245}
246
247/**
248 * gfs2_meta_read - Read a block from disk
249 * @gl: The glock covering the block
250 * @blkno: The block number
251 * @flags: flags
252 * @rahead: Do read-ahead
253 * @bhp: the place where the buffer is returned (NULL on failure)
254 *
255 * Returns: errno
256 */
257
258int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
259		   int rahead, struct buffer_head **bhp)
260{
261	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
262	struct buffer_head *bh, *bhs[2];
263	int num = 0;
264
265	if (gfs2_withdrawing_or_withdrawn(sdp) &&
266	    !gfs2_withdraw_in_prog(sdp)) {
267		*bhp = NULL;
268		return -EIO;
269	}
270
271	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
272
273	lock_buffer(bh);
274	if (buffer_uptodate(bh)) {
275		unlock_buffer(bh);
276		flags &= ~DIO_WAIT;
277	} else {
278		bh->b_end_io = end_buffer_read_sync;
279		get_bh(bh);
280		bhs[num++] = bh;
281	}
282
283	if (rahead) {
284		bh = gfs2_getbuf(gl, blkno + 1, CREATE);
285
286		lock_buffer(bh);
287		if (buffer_uptodate(bh)) {
288			unlock_buffer(bh);
289			brelse(bh);
290		} else {
291			bh->b_end_io = end_buffer_read_sync;
292			bhs[num++] = bh;
293		}
294	}
295
296	gfs2_submit_bhs(REQ_OP_READ | REQ_META | REQ_PRIO, bhs, num);
297	if (!(flags & DIO_WAIT))
298		return 0;
299
300	bh = *bhp;
301	wait_on_buffer(bh);
302	if (unlikely(!buffer_uptodate(bh))) {
303		struct gfs2_trans *tr = current->journal_info;
304		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
305			gfs2_io_error_bh_wd(sdp, bh);
306		brelse(bh);
307		*bhp = NULL;
308		return -EIO;
309	}
310
311	return 0;
312}
313
314/**
315 * gfs2_meta_wait - Reread a block from disk
316 * @sdp: the filesystem
317 * @bh: The block to wait for
318 *
319 * Returns: errno
320 */
321
322int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
323{
324	if (gfs2_withdrawing_or_withdrawn(sdp) &&
325	    !gfs2_withdraw_in_prog(sdp))
326		return -EIO;
327
328	wait_on_buffer(bh);
329
330	if (!buffer_uptodate(bh)) {
331		struct gfs2_trans *tr = current->journal_info;
332		if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
333			gfs2_io_error_bh_wd(sdp, bh);
334		return -EIO;
335	}
336	if (gfs2_withdrawing_or_withdrawn(sdp) &&
337	    !gfs2_withdraw_in_prog(sdp))
338		return -EIO;
339
340	return 0;
341}
342
343void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
344{
345	struct address_space *mapping = bh->b_folio->mapping;
346	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
347	struct gfs2_bufdata *bd = bh->b_private;
348	struct gfs2_trans *tr = current->journal_info;
349	int was_pinned = 0;
350
351	if (test_clear_buffer_pinned(bh)) {
352		trace_gfs2_pin(bd, 0);
353		atomic_dec(&sdp->sd_log_pinned);
354		list_del_init(&bd->bd_list);
355		if (meta == REMOVE_META)
356			tr->tr_num_buf_rm++;
357		else
358			tr->tr_num_databuf_rm++;
359		set_bit(TR_TOUCHED, &tr->tr_flags);
360		was_pinned = 1;
361		brelse(bh);
362	}
363	if (bd) {
 
364		if (bd->bd_tr) {
365			gfs2_trans_add_revoke(sdp, bd);
366		} else if (was_pinned) {
367			bh->b_private = NULL;
368			kmem_cache_free(gfs2_bufdata_cachep, bd);
369		} else if (!list_empty(&bd->bd_ail_st_list) &&
370					!list_empty(&bd->bd_ail_gl_list)) {
371			gfs2_remove_from_ail(bd);
372		}
 
373	}
374	clear_buffer_dirty(bh);
375	clear_buffer_uptodate(bh);
376}
377
378/**
379 * gfs2_ail1_wipe - remove deleted/freed buffers from the ail1 list
380 * @sdp: superblock
381 * @bstart: starting block address of buffers to remove
382 * @blen: length of buffers to be removed
383 *
384 * This function is called from gfs2_journal wipe, whose job is to remove
385 * buffers, corresponding to deleted blocks, from the journal. If we find any
386 * bufdata elements on the system ail1 list, they haven't been written to
387 * the journal yet. So we remove them.
388 */
389static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen)
390{
391	struct gfs2_trans *tr, *s;
392	struct gfs2_bufdata *bd, *bs;
393	struct buffer_head *bh;
394	u64 end = bstart + blen;
395
396	gfs2_log_lock(sdp);
397	spin_lock(&sdp->sd_ail_lock);
398	list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) {
399		list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list,
400					 bd_ail_st_list) {
401			bh = bd->bd_bh;
402			if (bh->b_blocknr < bstart || bh->b_blocknr >= end)
403				continue;
404
405			gfs2_remove_from_journal(bh, REMOVE_JDATA);
406		}
407	}
408	spin_unlock(&sdp->sd_ail_lock);
409	gfs2_log_unlock(sdp);
410}
411
412static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno)
413{
414	struct address_space *mapping = ip->i_inode.i_mapping;
415	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
416	struct folio *folio;
417	struct buffer_head *bh;
418	unsigned int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
419	unsigned long index = blkno >> shift; /* convert block to page */
420	unsigned int bufnum = blkno - (index << shift);
421
422	folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED, 0);
423	if (IS_ERR(folio))
424		return NULL;
425	bh = folio_buffers(folio);
426	if (bh)
427		bh = get_nth_bh(bh, bufnum);
428	folio_unlock(folio);
429	folio_put(folio);
430	return bh;
431}
432
433/**
434 * gfs2_journal_wipe - make inode's buffers so they aren't dirty/pinned anymore
435 * @ip: the inode who owns the buffers
436 * @bstart: the first buffer in the run
437 * @blen: the number of buffers in the run
438 *
439 */
440
441void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
442{
443	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
444	struct buffer_head *bh;
445	int ty;
446
447	if (!ip->i_gl) {
448		/* This can only happen during incomplete inode creation. */
449		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
450		return;
451	}
452
453	gfs2_ail1_wipe(sdp, bstart, blen);
454	while (blen) {
455		ty = REMOVE_META;
456		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
457		if (!bh && gfs2_is_jdata(ip)) {
458			bh = gfs2_getjdatabuf(ip, bstart);
459			ty = REMOVE_JDATA;
460		}
461		if (bh) {
462			lock_buffer(bh);
463			gfs2_log_lock(sdp);
464			spin_lock(&sdp->sd_ail_lock);
465			gfs2_remove_from_journal(bh, ty);
466			spin_unlock(&sdp->sd_ail_lock);
467			gfs2_log_unlock(sdp);
468			unlock_buffer(bh);
469			brelse(bh);
470		}
471
472		bstart++;
473		blen--;
474	}
475}
476
477/**
478 * gfs2_meta_buffer - Get a metadata buffer
479 * @ip: The GFS2 inode
480 * @mtype: The block type (GFS2_METATYPE_*)
481 * @num: The block number (device relative) of the buffer
482 * @bhp: the buffer is returned here
483 *
484 * Returns: errno
485 */
486
487int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
488		     struct buffer_head **bhp)
489{
490	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
491	struct gfs2_glock *gl = ip->i_gl;
492	struct buffer_head *bh;
493	int ret = 0;
 
494	int rahead = 0;
495
496	if (num == ip->i_no_addr)
497		rahead = ip->i_rahead;
498
499	ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
500	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
501		brelse(bh);
502		ret = -EIO;
503	} else {
504		*bhp = bh;
505	}
 
506	return ret;
507}
508
509/**
510 * gfs2_meta_ra - start readahead on an extent of a file
511 * @gl: the glock the blocks belong to
512 * @dblock: the starting disk block
513 * @extlen: the number of blocks in the extent
514 *
515 * returns: the first buffer in the extent
516 */
517
518struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
519{
520	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
521	struct buffer_head *first_bh, *bh;
522	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
523			  sdp->sd_sb.sb_bsize_shift;
524
525	BUG_ON(!extlen);
526
527	if (max_ra < 1)
528		max_ra = 1;
529	if (extlen > max_ra)
530		extlen = max_ra;
531
532	first_bh = gfs2_getbuf(gl, dblock, CREATE);
533
534	if (buffer_uptodate(first_bh))
535		goto out;
536	bh_read_nowait(first_bh, REQ_META | REQ_PRIO);
 
537
538	dblock++;
539	extlen--;
540
541	while (extlen) {
542		bh = gfs2_getbuf(gl, dblock, CREATE);
543
544		bh_readahead(bh, REQ_RAHEAD | REQ_META | REQ_PRIO);
 
545		brelse(bh);
546		dblock++;
547		extlen--;
548		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
549			goto out;
550	}
551
552	wait_on_buffer(first_bh);
553out:
554	return first_bh;
555}
556