Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
 
 
  3 * io.c
  4 *
  5 * Buffer cache handling
  6 *
  7 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
  8 */
  9
 10#include <linux/fs.h>
 11#include <linux/types.h>
 12#include <linux/highmem.h>
 13#include <linux/bio.h>
 14
 15#include <cluster/masklog.h>
 16
 17#include "ocfs2.h"
 18
 19#include "alloc.h"
 20#include "inode.h"
 21#include "journal.h"
 22#include "uptodate.h"
 23#include "buffer_head_io.h"
 24#include "ocfs2_trace.h"
 25
 26/*
 27 * Bits on bh->b_state used by ocfs2.
 28 *
 29 * These MUST be after the JBD2 bits.  Hence, we use BH_JBDPrivateStart.
 30 */
 31enum ocfs2_state_bits {
 32	BH_NeedsValidate = BH_JBDPrivateStart,
 33};
 34
 35/* Expand the magic b_state functions */
 36BUFFER_FNS(NeedsValidate, needs_validate);
 37
 38int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
 39		      struct ocfs2_caching_info *ci)
 40{
 41	int ret = 0;
 42
 43	trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
 44
 45	BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
 46	BUG_ON(buffer_jbd(bh));
 47
 48	/* No need to check for a soft readonly file system here. non
 49	 * journalled writes are only ever done on system files which
 50	 * can get modified during recovery even if read-only. */
 51	if (ocfs2_is_hard_readonly(osb)) {
 52		ret = -EROFS;
 53		mlog_errno(ret);
 54		goto out;
 55	}
 56
 57	ocfs2_metadata_cache_io_lock(ci);
 58
 59	lock_buffer(bh);
 60	set_buffer_uptodate(bh);
 61
 62	/* remove from dirty list before I/O. */
 63	clear_buffer_dirty(bh);
 64
 65	get_bh(bh); /* for end_buffer_write_sync() */
 66	bh->b_end_io = end_buffer_write_sync;
 67	submit_bh(REQ_OP_WRITE, bh);
 68
 69	wait_on_buffer(bh);
 70
 71	if (buffer_uptodate(bh)) {
 72		ocfs2_set_buffer_uptodate(ci, bh);
 73	} else {
 74		/* We don't need to remove the clustered uptodate
 75		 * information for this bh as it's not marked locally
 76		 * uptodate. */
 77		ret = -EIO;
 78		mlog_errno(ret);
 79	}
 80
 81	ocfs2_metadata_cache_io_unlock(ci);
 82out:
 83	return ret;
 84}
 85
 86/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
 87 * will be easier to handle read failure.
 88 */
 89int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
 90			   unsigned int nr, struct buffer_head *bhs[])
 91{
 92	int status = 0;
 93	unsigned int i;
 94	struct buffer_head *bh;
 95	int new_bh = 0;
 96
 97	trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
 98
 99	if (!nr)
100		goto bail;
101
102	/* Don't put buffer head and re-assign it to NULL if it is allocated
103	 * outside since the caller can't be aware of this alternation!
104	 */
105	new_bh = (bhs[0] == NULL);
106
107	for (i = 0 ; i < nr ; i++) {
108		if (bhs[i] == NULL) {
109			bhs[i] = sb_getblk(osb->sb, block++);
110			if (bhs[i] == NULL) {
111				status = -ENOMEM;
112				mlog_errno(status);
113				break;
114			}
115		}
116		bh = bhs[i];
117
118		if (buffer_jbd(bh)) {
119			trace_ocfs2_read_blocks_sync_jbd(
120					(unsigned long long)bh->b_blocknr);
121			continue;
122		}
123
124		if (buffer_dirty(bh)) {
125			/* This should probably be a BUG, or
126			 * at least return an error. */
127			mlog(ML_ERROR,
128			     "trying to sync read a dirty "
129			     "buffer! (blocknr = %llu), skipping\n",
130			     (unsigned long long)bh->b_blocknr);
131			continue;
132		}
133
134		lock_buffer(bh);
135		if (buffer_jbd(bh)) {
136#ifdef CATCH_BH_JBD_RACES
137			mlog(ML_ERROR,
138			     "block %llu had the JBD bit set "
139			     "while I was in lock_buffer!",
140			     (unsigned long long)bh->b_blocknr);
141			BUG();
142#else
143			unlock_buffer(bh);
144			continue;
145#endif
146		}
147
148		get_bh(bh); /* for end_buffer_read_sync() */
149		bh->b_end_io = end_buffer_read_sync;
150		submit_bh(REQ_OP_READ, bh);
151	}
152
153read_failure:
154	for (i = nr; i > 0; i--) {
155		bh = bhs[i - 1];
156
157		if (unlikely(status)) {
158			if (new_bh && bh) {
159				/* If middle bh fails, let previous bh
160				 * finish its read and then put it to
161				 * avoid bh leak
162				 */
163				if (!buffer_jbd(bh))
164					wait_on_buffer(bh);
165				put_bh(bh);
166				bhs[i - 1] = NULL;
167			} else if (bh && buffer_uptodate(bh)) {
168				clear_buffer_uptodate(bh);
169			}
170			continue;
171		}
172
173		/* No need to wait on the buffer if it's managed by JBD. */
174		if (!buffer_jbd(bh))
175			wait_on_buffer(bh);
176
177		if (!buffer_uptodate(bh)) {
178			/* Status won't be cleared from here on out,
179			 * so we can safely record this and loop back
180			 * to cleanup the other buffers. */
181			status = -EIO;
182			goto read_failure;
183		}
184	}
185
186bail:
187	return status;
188}
189
190/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
191 * will be easier to handle read failure.
192 */
193int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
194		      struct buffer_head *bhs[], int flags,
195		      int (*validate)(struct super_block *sb,
196				      struct buffer_head *bh))
197{
198	int status = 0;
199	int i, ignore_cache = 0;
200	struct buffer_head *bh;
201	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
202	int new_bh = 0;
203
204	trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
205
206	BUG_ON(!ci);
207	BUG_ON((flags & OCFS2_BH_READAHEAD) &&
208	       (flags & OCFS2_BH_IGNORE_CACHE));
209
210	if (bhs == NULL) {
211		status = -EINVAL;
212		mlog_errno(status);
213		goto bail;
214	}
215
216	if (nr < 0) {
217		mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
218		status = -EINVAL;
219		mlog_errno(status);
220		goto bail;
221	}
222
223	if (nr == 0) {
224		status = 0;
225		goto bail;
226	}
227
228	/* Don't put buffer head and re-assign it to NULL if it is allocated
229	 * outside since the caller can't be aware of this alternation!
230	 */
231	new_bh = (bhs[0] == NULL);
232
233	ocfs2_metadata_cache_io_lock(ci);
234	for (i = 0 ; i < nr ; i++) {
235		if (bhs[i] == NULL) {
236			bhs[i] = sb_getblk(sb, block++);
237			if (bhs[i] == NULL) {
238				ocfs2_metadata_cache_io_unlock(ci);
239				status = -ENOMEM;
240				mlog_errno(status);
241				/* Don't forget to put previous bh! */
242				break;
243			}
244		}
245		bh = bhs[i];
246		ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
247
248		/* There are three read-ahead cases here which we need to
249		 * be concerned with. All three assume a buffer has
250		 * previously been submitted with OCFS2_BH_READAHEAD
251		 * and it hasn't yet completed I/O.
252		 *
253		 * 1) The current request is sync to disk. This rarely
254		 *    happens these days, and never when performance
255		 *    matters - the code can just wait on the buffer
256		 *    lock and re-submit.
257		 *
258		 * 2) The current request is cached, but not
259		 *    readahead. ocfs2_buffer_uptodate() will return
260		 *    false anyway, so we'll wind up waiting on the
261		 *    buffer lock to do I/O. We re-check the request
262		 *    with after getting the lock to avoid a re-submit.
263		 *
264		 * 3) The current request is readahead (and so must
265		 *    also be a caching one). We short circuit if the
266		 *    buffer is locked (under I/O) and if it's in the
267		 *    uptodate cache. The re-check from #2 catches the
268		 *    case that the previous read-ahead completes just
269		 *    before our is-it-in-flight check.
270		 */
271
272		if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
273			trace_ocfs2_read_blocks_from_disk(
274			     (unsigned long long)bh->b_blocknr,
275			     (unsigned long long)ocfs2_metadata_cache_owner(ci));
276			/* We're using ignore_cache here to say
277			 * "go to disk" */
278			ignore_cache = 1;
279		}
280
281		trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
282			ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
283
284		if (buffer_jbd(bh)) {
285			continue;
286		}
287
288		if (ignore_cache) {
289			if (buffer_dirty(bh)) {
290				/* This should probably be a BUG, or
291				 * at least return an error. */
292				continue;
293			}
294
295			/* A read-ahead request was made - if the
296			 * buffer is already under read-ahead from a
297			 * previously submitted request than we are
298			 * done here. */
299			if ((flags & OCFS2_BH_READAHEAD)
300			    && ocfs2_buffer_read_ahead(ci, bh))
301				continue;
302
303			lock_buffer(bh);
304			if (buffer_jbd(bh)) {
305#ifdef CATCH_BH_JBD_RACES
306				mlog(ML_ERROR, "block %llu had the JBD bit set "
307					       "while I was in lock_buffer!",
308				     (unsigned long long)bh->b_blocknr);
309				BUG();
310#else
311				unlock_buffer(bh);
312				continue;
313#endif
314			}
315
316			/* Re-check ocfs2_buffer_uptodate() as a
317			 * previously read-ahead buffer may have
318			 * completed I/O while we were waiting for the
319			 * buffer lock. */
320			if (!(flags & OCFS2_BH_IGNORE_CACHE)
321			    && !(flags & OCFS2_BH_READAHEAD)
322			    && ocfs2_buffer_uptodate(ci, bh)) {
323				unlock_buffer(bh);
324				continue;
325			}
326
327			get_bh(bh); /* for end_buffer_read_sync() */
328			if (validate)
329				set_buffer_needs_validate(bh);
330			bh->b_end_io = end_buffer_read_sync;
331			submit_bh(REQ_OP_READ, bh);
332			continue;
333		}
334	}
335
336read_failure:
337	for (i = (nr - 1); i >= 0; i--) {
338		bh = bhs[i];
339
340		if (!(flags & OCFS2_BH_READAHEAD)) {
341			if (unlikely(status)) {
342				/* Clear the buffers on error including those
343				 * ever succeeded in reading
344				 */
345				if (new_bh && bh) {
346					/* If middle bh fails, let previous bh
347					 * finish its read and then put it to
348					 * avoid bh leak
349					 */
350					if (!buffer_jbd(bh))
351						wait_on_buffer(bh);
352					put_bh(bh);
353					bhs[i] = NULL;
354				} else if (bh && buffer_uptodate(bh)) {
355					clear_buffer_uptodate(bh);
356				}
357				continue;
358			}
359			/* We know this can't have changed as we hold the
360			 * owner sem. Avoid doing any work on the bh if the
361			 * journal has it. */
362			if (!buffer_jbd(bh))
363				wait_on_buffer(bh);
364
365			if (!buffer_uptodate(bh)) {
366				/* Status won't be cleared from here on out,
367				 * so we can safely record this and loop back
368				 * to cleanup the other buffers. Don't need to
369				 * remove the clustered uptodate information
370				 * for this bh as it's not marked locally
371				 * uptodate. */
372				status = -EIO;
373				clear_buffer_needs_validate(bh);
374				goto read_failure;
375			}
376
377			if (buffer_needs_validate(bh)) {
378				/* We never set NeedsValidate if the
379				 * buffer was held by the journal, so
380				 * that better not have changed */
381				BUG_ON(buffer_jbd(bh));
382				clear_buffer_needs_validate(bh);
383				status = validate(sb, bh);
384				if (status)
385					goto read_failure;
386			}
387		}
388
389		/* Always set the buffer in the cache, even if it was
390		 * a forced read, or read-ahead which hasn't yet
391		 * completed. */
392		ocfs2_set_buffer_uptodate(ci, bh);
393	}
394	ocfs2_metadata_cache_io_unlock(ci);
395
396	trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
397				    flags, ignore_cache);
398
399bail:
400
401	return status;
402}
403
404/* Check whether the blkno is the super block or one of the backups. */
405static void ocfs2_check_super_or_backup(struct super_block *sb,
406					sector_t blkno)
407{
408	int i;
409	u64 backup_blkno;
410
411	if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
412		return;
413
414	for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
415		backup_blkno = ocfs2_backup_super_blkno(sb, i);
416		if (backup_blkno == blkno)
417			return;
418	}
419
420	BUG();
421}
422
423/*
424 * Write super block and backups doesn't need to collaborate with journal,
425 * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
426 * into this function.
427 */
428int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
429				struct buffer_head *bh)
430{
431	int ret = 0;
432	struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
433
434	BUG_ON(buffer_jbd(bh));
435	ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
436
437	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
438		ret = -EROFS;
439		mlog_errno(ret);
440		goto out;
441	}
442
443	lock_buffer(bh);
444	set_buffer_uptodate(bh);
445
446	/* remove from dirty list before I/O. */
447	clear_buffer_dirty(bh);
448
449	get_bh(bh); /* for end_buffer_write_sync() */
450	bh->b_end_io = end_buffer_write_sync;
451	ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
452	submit_bh(REQ_OP_WRITE, bh);
453
454	wait_on_buffer(bh);
455
456	if (!buffer_uptodate(bh)) {
457		ret = -EIO;
458		mlog_errno(ret);
459	}
460
461out:
462	return ret;
463}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* -*- mode: c; c-basic-offset: 8; -*-
  3 * vim: noexpandtab sw=8 ts=8 sts=0:
  4 *
  5 * io.c
  6 *
  7 * Buffer cache handling
  8 *
  9 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
 10 */
 11
 12#include <linux/fs.h>
 13#include <linux/types.h>
 14#include <linux/highmem.h>
 15#include <linux/bio.h>
 16
 17#include <cluster/masklog.h>
 18
 19#include "ocfs2.h"
 20
 21#include "alloc.h"
 22#include "inode.h"
 23#include "journal.h"
 24#include "uptodate.h"
 25#include "buffer_head_io.h"
 26#include "ocfs2_trace.h"
 27
 28/*
 29 * Bits on bh->b_state used by ocfs2.
 30 *
 31 * These MUST be after the JBD2 bits.  Hence, we use BH_JBDPrivateStart.
 32 */
 33enum ocfs2_state_bits {
 34	BH_NeedsValidate = BH_JBDPrivateStart,
 35};
 36
 37/* Expand the magic b_state functions */
 38BUFFER_FNS(NeedsValidate, needs_validate);
 39
 40int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
 41		      struct ocfs2_caching_info *ci)
 42{
 43	int ret = 0;
 44
 45	trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
 46
 47	BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
 48	BUG_ON(buffer_jbd(bh));
 49
 50	/* No need to check for a soft readonly file system here. non
 51	 * journalled writes are only ever done on system files which
 52	 * can get modified during recovery even if read-only. */
 53	if (ocfs2_is_hard_readonly(osb)) {
 54		ret = -EROFS;
 55		mlog_errno(ret);
 56		goto out;
 57	}
 58
 59	ocfs2_metadata_cache_io_lock(ci);
 60
 61	lock_buffer(bh);
 62	set_buffer_uptodate(bh);
 63
 64	/* remove from dirty list before I/O. */
 65	clear_buffer_dirty(bh);
 66
 67	get_bh(bh); /* for end_buffer_write_sync() */
 68	bh->b_end_io = end_buffer_write_sync;
 69	submit_bh(REQ_OP_WRITE, 0, bh);
 70
 71	wait_on_buffer(bh);
 72
 73	if (buffer_uptodate(bh)) {
 74		ocfs2_set_buffer_uptodate(ci, bh);
 75	} else {
 76		/* We don't need to remove the clustered uptodate
 77		 * information for this bh as it's not marked locally
 78		 * uptodate. */
 79		ret = -EIO;
 80		mlog_errno(ret);
 81	}
 82
 83	ocfs2_metadata_cache_io_unlock(ci);
 84out:
 85	return ret;
 86}
 87
 88/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
 89 * will be easier to handle read failure.
 90 */
 91int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
 92			   unsigned int nr, struct buffer_head *bhs[])
 93{
 94	int status = 0;
 95	unsigned int i;
 96	struct buffer_head *bh;
 97	int new_bh = 0;
 98
 99	trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
100
101	if (!nr)
102		goto bail;
103
104	/* Don't put buffer head and re-assign it to NULL if it is allocated
105	 * outside since the caller can't be aware of this alternation!
106	 */
107	new_bh = (bhs[0] == NULL);
108
109	for (i = 0 ; i < nr ; i++) {
110		if (bhs[i] == NULL) {
111			bhs[i] = sb_getblk(osb->sb, block++);
112			if (bhs[i] == NULL) {
113				status = -ENOMEM;
114				mlog_errno(status);
115				break;
116			}
117		}
118		bh = bhs[i];
119
120		if (buffer_jbd(bh)) {
121			trace_ocfs2_read_blocks_sync_jbd(
122					(unsigned long long)bh->b_blocknr);
123			continue;
124		}
125
126		if (buffer_dirty(bh)) {
127			/* This should probably be a BUG, or
128			 * at least return an error. */
129			mlog(ML_ERROR,
130			     "trying to sync read a dirty "
131			     "buffer! (blocknr = %llu), skipping\n",
132			     (unsigned long long)bh->b_blocknr);
133			continue;
134		}
135
136		lock_buffer(bh);
137		if (buffer_jbd(bh)) {
138#ifdef CATCH_BH_JBD_RACES
139			mlog(ML_ERROR,
140			     "block %llu had the JBD bit set "
141			     "while I was in lock_buffer!",
142			     (unsigned long long)bh->b_blocknr);
143			BUG();
144#else
145			unlock_buffer(bh);
146			continue;
147#endif
148		}
149
150		get_bh(bh); /* for end_buffer_read_sync() */
151		bh->b_end_io = end_buffer_read_sync;
152		submit_bh(REQ_OP_READ, 0, bh);
153	}
154
155read_failure:
156	for (i = nr; i > 0; i--) {
157		bh = bhs[i - 1];
158
159		if (unlikely(status)) {
160			if (new_bh && bh) {
161				/* If middle bh fails, let previous bh
162				 * finish its read and then put it to
163				 * aovoid bh leak
164				 */
165				if (!buffer_jbd(bh))
166					wait_on_buffer(bh);
167				put_bh(bh);
168				bhs[i - 1] = NULL;
169			} else if (bh && buffer_uptodate(bh)) {
170				clear_buffer_uptodate(bh);
171			}
172			continue;
173		}
174
175		/* No need to wait on the buffer if it's managed by JBD. */
176		if (!buffer_jbd(bh))
177			wait_on_buffer(bh);
178
179		if (!buffer_uptodate(bh)) {
180			/* Status won't be cleared from here on out,
181			 * so we can safely record this and loop back
182			 * to cleanup the other buffers. */
183			status = -EIO;
184			goto read_failure;
185		}
186	}
187
188bail:
189	return status;
190}
191
192/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
193 * will be easier to handle read failure.
194 */
195int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
196		      struct buffer_head *bhs[], int flags,
197		      int (*validate)(struct super_block *sb,
198				      struct buffer_head *bh))
199{
200	int status = 0;
201	int i, ignore_cache = 0;
202	struct buffer_head *bh;
203	struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
204	int new_bh = 0;
205
206	trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
207
208	BUG_ON(!ci);
209	BUG_ON((flags & OCFS2_BH_READAHEAD) &&
210	       (flags & OCFS2_BH_IGNORE_CACHE));
211
212	if (bhs == NULL) {
213		status = -EINVAL;
214		mlog_errno(status);
215		goto bail;
216	}
217
218	if (nr < 0) {
219		mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
220		status = -EINVAL;
221		mlog_errno(status);
222		goto bail;
223	}
224
225	if (nr == 0) {
226		status = 0;
227		goto bail;
228	}
229
230	/* Don't put buffer head and re-assign it to NULL if it is allocated
231	 * outside since the caller can't be aware of this alternation!
232	 */
233	new_bh = (bhs[0] == NULL);
234
235	ocfs2_metadata_cache_io_lock(ci);
236	for (i = 0 ; i < nr ; i++) {
237		if (bhs[i] == NULL) {
238			bhs[i] = sb_getblk(sb, block++);
239			if (bhs[i] == NULL) {
240				ocfs2_metadata_cache_io_unlock(ci);
241				status = -ENOMEM;
242				mlog_errno(status);
243				/* Don't forget to put previous bh! */
244				break;
245			}
246		}
247		bh = bhs[i];
248		ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
249
250		/* There are three read-ahead cases here which we need to
251		 * be concerned with. All three assume a buffer has
252		 * previously been submitted with OCFS2_BH_READAHEAD
253		 * and it hasn't yet completed I/O.
254		 *
255		 * 1) The current request is sync to disk. This rarely
256		 *    happens these days, and never when performance
257		 *    matters - the code can just wait on the buffer
258		 *    lock and re-submit.
259		 *
260		 * 2) The current request is cached, but not
261		 *    readahead. ocfs2_buffer_uptodate() will return
262		 *    false anyway, so we'll wind up waiting on the
263		 *    buffer lock to do I/O. We re-check the request
264		 *    with after getting the lock to avoid a re-submit.
265		 *
266		 * 3) The current request is readahead (and so must
267		 *    also be a caching one). We short circuit if the
268		 *    buffer is locked (under I/O) and if it's in the
269		 *    uptodate cache. The re-check from #2 catches the
270		 *    case that the previous read-ahead completes just
271		 *    before our is-it-in-flight check.
272		 */
273
274		if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
275			trace_ocfs2_read_blocks_from_disk(
276			     (unsigned long long)bh->b_blocknr,
277			     (unsigned long long)ocfs2_metadata_cache_owner(ci));
278			/* We're using ignore_cache here to say
279			 * "go to disk" */
280			ignore_cache = 1;
281		}
282
283		trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
284			ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
285
286		if (buffer_jbd(bh)) {
287			continue;
288		}
289
290		if (ignore_cache) {
291			if (buffer_dirty(bh)) {
292				/* This should probably be a BUG, or
293				 * at least return an error. */
294				continue;
295			}
296
297			/* A read-ahead request was made - if the
298			 * buffer is already under read-ahead from a
299			 * previously submitted request than we are
300			 * done here. */
301			if ((flags & OCFS2_BH_READAHEAD)
302			    && ocfs2_buffer_read_ahead(ci, bh))
303				continue;
304
305			lock_buffer(bh);
306			if (buffer_jbd(bh)) {
307#ifdef CATCH_BH_JBD_RACES
308				mlog(ML_ERROR, "block %llu had the JBD bit set "
309					       "while I was in lock_buffer!",
310				     (unsigned long long)bh->b_blocknr);
311				BUG();
312#else
313				unlock_buffer(bh);
314				continue;
315#endif
316			}
317
318			/* Re-check ocfs2_buffer_uptodate() as a
319			 * previously read-ahead buffer may have
320			 * completed I/O while we were waiting for the
321			 * buffer lock. */
322			if (!(flags & OCFS2_BH_IGNORE_CACHE)
323			    && !(flags & OCFS2_BH_READAHEAD)
324			    && ocfs2_buffer_uptodate(ci, bh)) {
325				unlock_buffer(bh);
326				continue;
327			}
328
329			get_bh(bh); /* for end_buffer_read_sync() */
330			if (validate)
331				set_buffer_needs_validate(bh);
332			bh->b_end_io = end_buffer_read_sync;
333			submit_bh(REQ_OP_READ, 0, bh);
334			continue;
335		}
336	}
337
338read_failure:
339	for (i = (nr - 1); i >= 0; i--) {
340		bh = bhs[i];
341
342		if (!(flags & OCFS2_BH_READAHEAD)) {
343			if (unlikely(status)) {
344				/* Clear the buffers on error including those
345				 * ever succeeded in reading
346				 */
347				if (new_bh && bh) {
348					/* If middle bh fails, let previous bh
349					 * finish its read and then put it to
350					 * aovoid bh leak
351					 */
352					if (!buffer_jbd(bh))
353						wait_on_buffer(bh);
354					put_bh(bh);
355					bhs[i] = NULL;
356				} else if (bh && buffer_uptodate(bh)) {
357					clear_buffer_uptodate(bh);
358				}
359				continue;
360			}
361			/* We know this can't have changed as we hold the
362			 * owner sem. Avoid doing any work on the bh if the
363			 * journal has it. */
364			if (!buffer_jbd(bh))
365				wait_on_buffer(bh);
366
367			if (!buffer_uptodate(bh)) {
368				/* Status won't be cleared from here on out,
369				 * so we can safely record this and loop back
370				 * to cleanup the other buffers. Don't need to
371				 * remove the clustered uptodate information
372				 * for this bh as it's not marked locally
373				 * uptodate. */
374				status = -EIO;
375				clear_buffer_needs_validate(bh);
376				goto read_failure;
377			}
378
379			if (buffer_needs_validate(bh)) {
380				/* We never set NeedsValidate if the
381				 * buffer was held by the journal, so
382				 * that better not have changed */
383				BUG_ON(buffer_jbd(bh));
384				clear_buffer_needs_validate(bh);
385				status = validate(sb, bh);
386				if (status)
387					goto read_failure;
388			}
389		}
390
391		/* Always set the buffer in the cache, even if it was
392		 * a forced read, or read-ahead which hasn't yet
393		 * completed. */
394		ocfs2_set_buffer_uptodate(ci, bh);
395	}
396	ocfs2_metadata_cache_io_unlock(ci);
397
398	trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
399				    flags, ignore_cache);
400
401bail:
402
403	return status;
404}
405
406/* Check whether the blkno is the super block or one of the backups. */
407static void ocfs2_check_super_or_backup(struct super_block *sb,
408					sector_t blkno)
409{
410	int i;
411	u64 backup_blkno;
412
413	if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
414		return;
415
416	for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
417		backup_blkno = ocfs2_backup_super_blkno(sb, i);
418		if (backup_blkno == blkno)
419			return;
420	}
421
422	BUG();
423}
424
425/*
426 * Write super block and backups doesn't need to collaborate with journal,
427 * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
428 * into this function.
429 */
430int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
431				struct buffer_head *bh)
432{
433	int ret = 0;
434	struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
435
436	BUG_ON(buffer_jbd(bh));
437	ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
438
439	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
440		ret = -EROFS;
441		mlog_errno(ret);
442		goto out;
443	}
444
445	lock_buffer(bh);
446	set_buffer_uptodate(bh);
447
448	/* remove from dirty list before I/O. */
449	clear_buffer_dirty(bh);
450
451	get_bh(bh); /* for end_buffer_write_sync() */
452	bh->b_end_io = end_buffer_write_sync;
453	ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
454	submit_bh(REQ_OP_WRITE, 0, bh);
455
456	wait_on_buffer(bh);
457
458	if (!buffer_uptodate(bh)) {
459		ret = -EIO;
460		mlog_errno(ret);
461	}
462
463out:
464	return ret;
465}