Loading...
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * io.c
5 *
6 * Buffer cache handling
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26#include <linux/fs.h>
27#include <linux/types.h>
28#include <linux/highmem.h>
29
30#include <cluster/masklog.h>
31
32#include "ocfs2.h"
33
34#include "alloc.h"
35#include "inode.h"
36#include "journal.h"
37#include "uptodate.h"
38#include "buffer_head_io.h"
39#include "ocfs2_trace.h"
40
41/*
42 * Bits on bh->b_state used by ocfs2.
43 *
44 * These MUST be after the JBD2 bits. Hence, we use BH_JBDPrivateStart.
45 */
46enum ocfs2_state_bits {
47 BH_NeedsValidate = BH_JBDPrivateStart,
48};
49
50/* Expand the magic b_state functions */
51BUFFER_FNS(NeedsValidate, needs_validate);
52
53int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
54 struct ocfs2_caching_info *ci)
55{
56 int ret = 0;
57
58 trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
59
60 BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
61 BUG_ON(buffer_jbd(bh));
62
63 /* No need to check for a soft readonly file system here. non
64 * journalled writes are only ever done on system files which
65 * can get modified during recovery even if read-only. */
66 if (ocfs2_is_hard_readonly(osb)) {
67 ret = -EROFS;
68 mlog_errno(ret);
69 goto out;
70 }
71
72 ocfs2_metadata_cache_io_lock(ci);
73
74 lock_buffer(bh);
75 set_buffer_uptodate(bh);
76
77 /* remove from dirty list before I/O. */
78 clear_buffer_dirty(bh);
79
80 get_bh(bh); /* for end_buffer_write_sync() */
81 bh->b_end_io = end_buffer_write_sync;
82 submit_bh(WRITE, bh);
83
84 wait_on_buffer(bh);
85
86 if (buffer_uptodate(bh)) {
87 ocfs2_set_buffer_uptodate(ci, bh);
88 } else {
89 /* We don't need to remove the clustered uptodate
90 * information for this bh as it's not marked locally
91 * uptodate. */
92 ret = -EIO;
93 mlog_errno(ret);
94 }
95
96 ocfs2_metadata_cache_io_unlock(ci);
97out:
98 return ret;
99}
100
101int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
102 unsigned int nr, struct buffer_head *bhs[])
103{
104 int status = 0;
105 unsigned int i;
106 struct buffer_head *bh;
107
108 trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
109
110 if (!nr)
111 goto bail;
112
113 for (i = 0 ; i < nr ; i++) {
114 if (bhs[i] == NULL) {
115 bhs[i] = sb_getblk(osb->sb, block++);
116 if (bhs[i] == NULL) {
117 status = -ENOMEM;
118 mlog_errno(status);
119 goto bail;
120 }
121 }
122 bh = bhs[i];
123
124 if (buffer_jbd(bh)) {
125 trace_ocfs2_read_blocks_sync_jbd(
126 (unsigned long long)bh->b_blocknr);
127 continue;
128 }
129
130 if (buffer_dirty(bh)) {
131 /* This should probably be a BUG, or
132 * at least return an error. */
133 mlog(ML_ERROR,
134 "trying to sync read a dirty "
135 "buffer! (blocknr = %llu), skipping\n",
136 (unsigned long long)bh->b_blocknr);
137 continue;
138 }
139
140 lock_buffer(bh);
141 if (buffer_jbd(bh)) {
142 mlog(ML_ERROR,
143 "block %llu had the JBD bit set "
144 "while I was in lock_buffer!",
145 (unsigned long long)bh->b_blocknr);
146 BUG();
147 }
148
149 clear_buffer_uptodate(bh);
150 get_bh(bh); /* for end_buffer_read_sync() */
151 bh->b_end_io = end_buffer_read_sync;
152 submit_bh(READ, bh);
153 }
154
155 for (i = nr; i > 0; i--) {
156 bh = bhs[i - 1];
157
158 /* No need to wait on the buffer if it's managed by JBD. */
159 if (!buffer_jbd(bh))
160 wait_on_buffer(bh);
161
162 if (!buffer_uptodate(bh)) {
163 /* Status won't be cleared from here on out,
164 * so we can safely record this and loop back
165 * to cleanup the other buffers. */
166 status = -EIO;
167 put_bh(bh);
168 bhs[i - 1] = NULL;
169 }
170 }
171
172bail:
173 return status;
174}
175
176int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
177 struct buffer_head *bhs[], int flags,
178 int (*validate)(struct super_block *sb,
179 struct buffer_head *bh))
180{
181 int status = 0;
182 int i, ignore_cache = 0;
183 struct buffer_head *bh;
184 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
185
186 trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
187
188 BUG_ON(!ci);
189 BUG_ON((flags & OCFS2_BH_READAHEAD) &&
190 (flags & OCFS2_BH_IGNORE_CACHE));
191
192 if (bhs == NULL) {
193 status = -EINVAL;
194 mlog_errno(status);
195 goto bail;
196 }
197
198 if (nr < 0) {
199 mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
200 status = -EINVAL;
201 mlog_errno(status);
202 goto bail;
203 }
204
205 if (nr == 0) {
206 status = 0;
207 goto bail;
208 }
209
210 ocfs2_metadata_cache_io_lock(ci);
211 for (i = 0 ; i < nr ; i++) {
212 if (bhs[i] == NULL) {
213 bhs[i] = sb_getblk(sb, block++);
214 if (bhs[i] == NULL) {
215 ocfs2_metadata_cache_io_unlock(ci);
216 status = -ENOMEM;
217 mlog_errno(status);
218 goto bail;
219 }
220 }
221 bh = bhs[i];
222 ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
223
224 /* There are three read-ahead cases here which we need to
225 * be concerned with. All three assume a buffer has
226 * previously been submitted with OCFS2_BH_READAHEAD
227 * and it hasn't yet completed I/O.
228 *
229 * 1) The current request is sync to disk. This rarely
230 * happens these days, and never when performance
231 * matters - the code can just wait on the buffer
232 * lock and re-submit.
233 *
234 * 2) The current request is cached, but not
235 * readahead. ocfs2_buffer_uptodate() will return
236 * false anyway, so we'll wind up waiting on the
237 * buffer lock to do I/O. We re-check the request
238 * with after getting the lock to avoid a re-submit.
239 *
240 * 3) The current request is readahead (and so must
241 * also be a caching one). We short circuit if the
242 * buffer is locked (under I/O) and if it's in the
243 * uptodate cache. The re-check from #2 catches the
244 * case that the previous read-ahead completes just
245 * before our is-it-in-flight check.
246 */
247
248 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
249 trace_ocfs2_read_blocks_from_disk(
250 (unsigned long long)bh->b_blocknr,
251 (unsigned long long)ocfs2_metadata_cache_owner(ci));
252 /* We're using ignore_cache here to say
253 * "go to disk" */
254 ignore_cache = 1;
255 }
256
257 trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
258 ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
259
260 if (buffer_jbd(bh)) {
261 continue;
262 }
263
264 if (ignore_cache) {
265 if (buffer_dirty(bh)) {
266 /* This should probably be a BUG, or
267 * at least return an error. */
268 continue;
269 }
270
271 /* A read-ahead request was made - if the
272 * buffer is already under read-ahead from a
273 * previously submitted request than we are
274 * done here. */
275 if ((flags & OCFS2_BH_READAHEAD)
276 && ocfs2_buffer_read_ahead(ci, bh))
277 continue;
278
279 lock_buffer(bh);
280 if (buffer_jbd(bh)) {
281#ifdef CATCH_BH_JBD_RACES
282 mlog(ML_ERROR, "block %llu had the JBD bit set "
283 "while I was in lock_buffer!",
284 (unsigned long long)bh->b_blocknr);
285 BUG();
286#else
287 unlock_buffer(bh);
288 continue;
289#endif
290 }
291
292 /* Re-check ocfs2_buffer_uptodate() as a
293 * previously read-ahead buffer may have
294 * completed I/O while we were waiting for the
295 * buffer lock. */
296 if (!(flags & OCFS2_BH_IGNORE_CACHE)
297 && !(flags & OCFS2_BH_READAHEAD)
298 && ocfs2_buffer_uptodate(ci, bh)) {
299 unlock_buffer(bh);
300 continue;
301 }
302
303 clear_buffer_uptodate(bh);
304 get_bh(bh); /* for end_buffer_read_sync() */
305 if (validate)
306 set_buffer_needs_validate(bh);
307 bh->b_end_io = end_buffer_read_sync;
308 submit_bh(READ, bh);
309 continue;
310 }
311 }
312
313 status = 0;
314
315 for (i = (nr - 1); i >= 0; i--) {
316 bh = bhs[i];
317
318 if (!(flags & OCFS2_BH_READAHEAD)) {
319 if (status) {
320 /* Clear the rest of the buffers on error */
321 put_bh(bh);
322 bhs[i] = NULL;
323 continue;
324 }
325 /* We know this can't have changed as we hold the
326 * owner sem. Avoid doing any work on the bh if the
327 * journal has it. */
328 if (!buffer_jbd(bh))
329 wait_on_buffer(bh);
330
331 if (!buffer_uptodate(bh)) {
332 /* Status won't be cleared from here on out,
333 * so we can safely record this and loop back
334 * to cleanup the other buffers. Don't need to
335 * remove the clustered uptodate information
336 * for this bh as it's not marked locally
337 * uptodate. */
338 status = -EIO;
339 put_bh(bh);
340 bhs[i] = NULL;
341 continue;
342 }
343
344 if (buffer_needs_validate(bh)) {
345 /* We never set NeedsValidate if the
346 * buffer was held by the journal, so
347 * that better not have changed */
348 BUG_ON(buffer_jbd(bh));
349 clear_buffer_needs_validate(bh);
350 status = validate(sb, bh);
351 if (status) {
352 put_bh(bh);
353 bhs[i] = NULL;
354 continue;
355 }
356 }
357 }
358
359 /* Always set the buffer in the cache, even if it was
360 * a forced read, or read-ahead which hasn't yet
361 * completed. */
362 ocfs2_set_buffer_uptodate(ci, bh);
363 }
364 ocfs2_metadata_cache_io_unlock(ci);
365
366 trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
367 flags, ignore_cache);
368
369bail:
370
371 return status;
372}
373
374/* Check whether the blkno is the super block or one of the backups. */
375static void ocfs2_check_super_or_backup(struct super_block *sb,
376 sector_t blkno)
377{
378 int i;
379 u64 backup_blkno;
380
381 if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
382 return;
383
384 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
385 backup_blkno = ocfs2_backup_super_blkno(sb, i);
386 if (backup_blkno == blkno)
387 return;
388 }
389
390 BUG();
391}
392
393/*
394 * Write super block and backups doesn't need to collaborate with journal,
395 * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
396 * into this function.
397 */
398int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
399 struct buffer_head *bh)
400{
401 int ret = 0;
402 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
403
404 BUG_ON(buffer_jbd(bh));
405 ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
406
407 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
408 ret = -EROFS;
409 mlog_errno(ret);
410 goto out;
411 }
412
413 lock_buffer(bh);
414 set_buffer_uptodate(bh);
415
416 /* remove from dirty list before I/O. */
417 clear_buffer_dirty(bh);
418
419 get_bh(bh); /* for end_buffer_write_sync() */
420 bh->b_end_io = end_buffer_write_sync;
421 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
422 submit_bh(WRITE, bh);
423
424 wait_on_buffer(bh);
425
426 if (!buffer_uptodate(bh)) {
427 ret = -EIO;
428 mlog_errno(ret);
429 }
430
431out:
432 return ret;
433}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
4 *
5 * io.c
6 *
7 * Buffer cache handling
8 *
9 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 */
11
12#include <linux/fs.h>
13#include <linux/types.h>
14#include <linux/highmem.h>
15#include <linux/bio.h>
16
17#include <cluster/masklog.h>
18
19#include "ocfs2.h"
20
21#include "alloc.h"
22#include "inode.h"
23#include "journal.h"
24#include "uptodate.h"
25#include "buffer_head_io.h"
26#include "ocfs2_trace.h"
27
28/*
29 * Bits on bh->b_state used by ocfs2.
30 *
31 * These MUST be after the JBD2 bits. Hence, we use BH_JBDPrivateStart.
32 */
33enum ocfs2_state_bits {
34 BH_NeedsValidate = BH_JBDPrivateStart,
35};
36
37/* Expand the magic b_state functions */
38BUFFER_FNS(NeedsValidate, needs_validate);
39
40int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
41 struct ocfs2_caching_info *ci)
42{
43 int ret = 0;
44
45 trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
46
47 BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
48 BUG_ON(buffer_jbd(bh));
49
50 /* No need to check for a soft readonly file system here. non
51 * journalled writes are only ever done on system files which
52 * can get modified during recovery even if read-only. */
53 if (ocfs2_is_hard_readonly(osb)) {
54 ret = -EROFS;
55 mlog_errno(ret);
56 goto out;
57 }
58
59 ocfs2_metadata_cache_io_lock(ci);
60
61 lock_buffer(bh);
62 set_buffer_uptodate(bh);
63
64 /* remove from dirty list before I/O. */
65 clear_buffer_dirty(bh);
66
67 get_bh(bh); /* for end_buffer_write_sync() */
68 bh->b_end_io = end_buffer_write_sync;
69 submit_bh(REQ_OP_WRITE, 0, bh);
70
71 wait_on_buffer(bh);
72
73 if (buffer_uptodate(bh)) {
74 ocfs2_set_buffer_uptodate(ci, bh);
75 } else {
76 /* We don't need to remove the clustered uptodate
77 * information for this bh as it's not marked locally
78 * uptodate. */
79 ret = -EIO;
80 mlog_errno(ret);
81 }
82
83 ocfs2_metadata_cache_io_unlock(ci);
84out:
85 return ret;
86}
87
88/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
89 * will be easier to handle read failure.
90 */
91int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
92 unsigned int nr, struct buffer_head *bhs[])
93{
94 int status = 0;
95 unsigned int i;
96 struct buffer_head *bh;
97 int new_bh = 0;
98
99 trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
100
101 if (!nr)
102 goto bail;
103
104 /* Don't put buffer head and re-assign it to NULL if it is allocated
105 * outside since the caller can't be aware of this alternation!
106 */
107 new_bh = (bhs[0] == NULL);
108
109 for (i = 0 ; i < nr ; i++) {
110 if (bhs[i] == NULL) {
111 bhs[i] = sb_getblk(osb->sb, block++);
112 if (bhs[i] == NULL) {
113 status = -ENOMEM;
114 mlog_errno(status);
115 break;
116 }
117 }
118 bh = bhs[i];
119
120 if (buffer_jbd(bh)) {
121 trace_ocfs2_read_blocks_sync_jbd(
122 (unsigned long long)bh->b_blocknr);
123 continue;
124 }
125
126 if (buffer_dirty(bh)) {
127 /* This should probably be a BUG, or
128 * at least return an error. */
129 mlog(ML_ERROR,
130 "trying to sync read a dirty "
131 "buffer! (blocknr = %llu), skipping\n",
132 (unsigned long long)bh->b_blocknr);
133 continue;
134 }
135
136 lock_buffer(bh);
137 if (buffer_jbd(bh)) {
138#ifdef CATCH_BH_JBD_RACES
139 mlog(ML_ERROR,
140 "block %llu had the JBD bit set "
141 "while I was in lock_buffer!",
142 (unsigned long long)bh->b_blocknr);
143 BUG();
144#else
145 unlock_buffer(bh);
146 continue;
147#endif
148 }
149
150 get_bh(bh); /* for end_buffer_read_sync() */
151 bh->b_end_io = end_buffer_read_sync;
152 submit_bh(REQ_OP_READ, 0, bh);
153 }
154
155read_failure:
156 for (i = nr; i > 0; i--) {
157 bh = bhs[i - 1];
158
159 if (unlikely(status)) {
160 if (new_bh && bh) {
161 /* If middle bh fails, let previous bh
162 * finish its read and then put it to
163 * aovoid bh leak
164 */
165 if (!buffer_jbd(bh))
166 wait_on_buffer(bh);
167 put_bh(bh);
168 bhs[i - 1] = NULL;
169 } else if (bh && buffer_uptodate(bh)) {
170 clear_buffer_uptodate(bh);
171 }
172 continue;
173 }
174
175 /* No need to wait on the buffer if it's managed by JBD. */
176 if (!buffer_jbd(bh))
177 wait_on_buffer(bh);
178
179 if (!buffer_uptodate(bh)) {
180 /* Status won't be cleared from here on out,
181 * so we can safely record this and loop back
182 * to cleanup the other buffers. */
183 status = -EIO;
184 goto read_failure;
185 }
186 }
187
188bail:
189 return status;
190}
191
192/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
193 * will be easier to handle read failure.
194 */
195int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
196 struct buffer_head *bhs[], int flags,
197 int (*validate)(struct super_block *sb,
198 struct buffer_head *bh))
199{
200 int status = 0;
201 int i, ignore_cache = 0;
202 struct buffer_head *bh;
203 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
204 int new_bh = 0;
205
206 trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
207
208 BUG_ON(!ci);
209 BUG_ON((flags & OCFS2_BH_READAHEAD) &&
210 (flags & OCFS2_BH_IGNORE_CACHE));
211
212 if (bhs == NULL) {
213 status = -EINVAL;
214 mlog_errno(status);
215 goto bail;
216 }
217
218 if (nr < 0) {
219 mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
220 status = -EINVAL;
221 mlog_errno(status);
222 goto bail;
223 }
224
225 if (nr == 0) {
226 status = 0;
227 goto bail;
228 }
229
230 /* Don't put buffer head and re-assign it to NULL if it is allocated
231 * outside since the caller can't be aware of this alternation!
232 */
233 new_bh = (bhs[0] == NULL);
234
235 ocfs2_metadata_cache_io_lock(ci);
236 for (i = 0 ; i < nr ; i++) {
237 if (bhs[i] == NULL) {
238 bhs[i] = sb_getblk(sb, block++);
239 if (bhs[i] == NULL) {
240 ocfs2_metadata_cache_io_unlock(ci);
241 status = -ENOMEM;
242 mlog_errno(status);
243 /* Don't forget to put previous bh! */
244 break;
245 }
246 }
247 bh = bhs[i];
248 ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
249
250 /* There are three read-ahead cases here which we need to
251 * be concerned with. All three assume a buffer has
252 * previously been submitted with OCFS2_BH_READAHEAD
253 * and it hasn't yet completed I/O.
254 *
255 * 1) The current request is sync to disk. This rarely
256 * happens these days, and never when performance
257 * matters - the code can just wait on the buffer
258 * lock and re-submit.
259 *
260 * 2) The current request is cached, but not
261 * readahead. ocfs2_buffer_uptodate() will return
262 * false anyway, so we'll wind up waiting on the
263 * buffer lock to do I/O. We re-check the request
264 * with after getting the lock to avoid a re-submit.
265 *
266 * 3) The current request is readahead (and so must
267 * also be a caching one). We short circuit if the
268 * buffer is locked (under I/O) and if it's in the
269 * uptodate cache. The re-check from #2 catches the
270 * case that the previous read-ahead completes just
271 * before our is-it-in-flight check.
272 */
273
274 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
275 trace_ocfs2_read_blocks_from_disk(
276 (unsigned long long)bh->b_blocknr,
277 (unsigned long long)ocfs2_metadata_cache_owner(ci));
278 /* We're using ignore_cache here to say
279 * "go to disk" */
280 ignore_cache = 1;
281 }
282
283 trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
284 ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
285
286 if (buffer_jbd(bh)) {
287 continue;
288 }
289
290 if (ignore_cache) {
291 if (buffer_dirty(bh)) {
292 /* This should probably be a BUG, or
293 * at least return an error. */
294 continue;
295 }
296
297 /* A read-ahead request was made - if the
298 * buffer is already under read-ahead from a
299 * previously submitted request than we are
300 * done here. */
301 if ((flags & OCFS2_BH_READAHEAD)
302 && ocfs2_buffer_read_ahead(ci, bh))
303 continue;
304
305 lock_buffer(bh);
306 if (buffer_jbd(bh)) {
307#ifdef CATCH_BH_JBD_RACES
308 mlog(ML_ERROR, "block %llu had the JBD bit set "
309 "while I was in lock_buffer!",
310 (unsigned long long)bh->b_blocknr);
311 BUG();
312#else
313 unlock_buffer(bh);
314 continue;
315#endif
316 }
317
318 /* Re-check ocfs2_buffer_uptodate() as a
319 * previously read-ahead buffer may have
320 * completed I/O while we were waiting for the
321 * buffer lock. */
322 if (!(flags & OCFS2_BH_IGNORE_CACHE)
323 && !(flags & OCFS2_BH_READAHEAD)
324 && ocfs2_buffer_uptodate(ci, bh)) {
325 unlock_buffer(bh);
326 continue;
327 }
328
329 get_bh(bh); /* for end_buffer_read_sync() */
330 if (validate)
331 set_buffer_needs_validate(bh);
332 bh->b_end_io = end_buffer_read_sync;
333 submit_bh(REQ_OP_READ, 0, bh);
334 continue;
335 }
336 }
337
338read_failure:
339 for (i = (nr - 1); i >= 0; i--) {
340 bh = bhs[i];
341
342 if (!(flags & OCFS2_BH_READAHEAD)) {
343 if (unlikely(status)) {
344 /* Clear the buffers on error including those
345 * ever succeeded in reading
346 */
347 if (new_bh && bh) {
348 /* If middle bh fails, let previous bh
349 * finish its read and then put it to
350 * aovoid bh leak
351 */
352 if (!buffer_jbd(bh))
353 wait_on_buffer(bh);
354 put_bh(bh);
355 bhs[i] = NULL;
356 } else if (bh && buffer_uptodate(bh)) {
357 clear_buffer_uptodate(bh);
358 }
359 continue;
360 }
361 /* We know this can't have changed as we hold the
362 * owner sem. Avoid doing any work on the bh if the
363 * journal has it. */
364 if (!buffer_jbd(bh))
365 wait_on_buffer(bh);
366
367 if (!buffer_uptodate(bh)) {
368 /* Status won't be cleared from here on out,
369 * so we can safely record this and loop back
370 * to cleanup the other buffers. Don't need to
371 * remove the clustered uptodate information
372 * for this bh as it's not marked locally
373 * uptodate. */
374 status = -EIO;
375 clear_buffer_needs_validate(bh);
376 goto read_failure;
377 }
378
379 if (buffer_needs_validate(bh)) {
380 /* We never set NeedsValidate if the
381 * buffer was held by the journal, so
382 * that better not have changed */
383 BUG_ON(buffer_jbd(bh));
384 clear_buffer_needs_validate(bh);
385 status = validate(sb, bh);
386 if (status)
387 goto read_failure;
388 }
389 }
390
391 /* Always set the buffer in the cache, even if it was
392 * a forced read, or read-ahead which hasn't yet
393 * completed. */
394 ocfs2_set_buffer_uptodate(ci, bh);
395 }
396 ocfs2_metadata_cache_io_unlock(ci);
397
398 trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
399 flags, ignore_cache);
400
401bail:
402
403 return status;
404}
405
406/* Check whether the blkno is the super block or one of the backups. */
407static void ocfs2_check_super_or_backup(struct super_block *sb,
408 sector_t blkno)
409{
410 int i;
411 u64 backup_blkno;
412
413 if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
414 return;
415
416 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
417 backup_blkno = ocfs2_backup_super_blkno(sb, i);
418 if (backup_blkno == blkno)
419 return;
420 }
421
422 BUG();
423}
424
425/*
426 * Write super block and backups doesn't need to collaborate with journal,
427 * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
428 * into this function.
429 */
430int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
431 struct buffer_head *bh)
432{
433 int ret = 0;
434 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
435
436 BUG_ON(buffer_jbd(bh));
437 ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
438
439 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
440 ret = -EROFS;
441 mlog_errno(ret);
442 goto out;
443 }
444
445 lock_buffer(bh);
446 set_buffer_uptodate(bh);
447
448 /* remove from dirty list before I/O. */
449 clear_buffer_dirty(bh);
450
451 get_bh(bh); /* for end_buffer_write_sync() */
452 bh->b_end_io = end_buffer_write_sync;
453 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
454 submit_bh(REQ_OP_WRITE, 0, bh);
455
456 wait_on_buffer(bh);
457
458 if (!buffer_uptodate(bh)) {
459 ret = -EIO;
460 mlog_errno(ret);
461 }
462
463out:
464 return ret;
465}