Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/bio.h>
17#include <linux/fs.h>
18
19#include "gfs2.h"
20#include "incore.h"
21#include "inode.h"
22#include "glock.h"
23#include "log.h"
24#include "lops.h"
25#include "meta_io.h"
26#include "recovery.h"
27#include "rgrp.h"
28#include "trans.h"
29#include "util.h"
30#include "trace_gfs2.h"
31
32/**
33 * gfs2_pin - Pin a buffer in memory
34 * @sdp: The superblock
35 * @bh: The buffer to be pinned
36 *
37 * The log lock must be held when calling this function
38 */
39static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
40{
41 struct gfs2_bufdata *bd;
42
43 BUG_ON(!current->journal_info);
44
45 clear_buffer_dirty(bh);
46 if (test_set_buffer_pinned(bh))
47 gfs2_assert_withdraw(sdp, 0);
48 if (!buffer_uptodate(bh))
49 gfs2_io_error_bh(sdp, bh);
50 bd = bh->b_private;
51 /* If this buffer is in the AIL and it has already been written
52 * to in-place disk block, remove it from the AIL.
53 */
54 spin_lock(&sdp->sd_ail_lock);
55 if (bd->bd_ail)
56 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
57 spin_unlock(&sdp->sd_ail_lock);
58 get_bh(bh);
59 atomic_inc(&sdp->sd_log_pinned);
60 trace_gfs2_pin(bd, 1);
61}
62
63/**
64 * gfs2_unpin - Unpin a buffer
65 * @sdp: the filesystem the buffer belongs to
66 * @bh: The buffer to unpin
67 * @ai:
68 * @flags: The inode dirty flags
69 *
70 */
71
72static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
73 struct gfs2_ail *ai)
74{
75 struct gfs2_bufdata *bd = bh->b_private;
76
77 BUG_ON(!buffer_uptodate(bh));
78 BUG_ON(!buffer_pinned(bh));
79
80 lock_buffer(bh);
81 mark_buffer_dirty(bh);
82 clear_buffer_pinned(bh);
83
84 spin_lock(&sdp->sd_ail_lock);
85 if (bd->bd_ail) {
86 list_del(&bd->bd_ail_st_list);
87 brelse(bh);
88 } else {
89 struct gfs2_glock *gl = bd->bd_gl;
90 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
91 atomic_inc(&gl->gl_ail_count);
92 }
93 bd->bd_ail = ai;
94 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
95 spin_unlock(&sdp->sd_ail_lock);
96
97 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
98 trace_gfs2_pin(bd, 0);
99 unlock_buffer(bh);
100 atomic_dec(&sdp->sd_log_pinned);
101}
102
103
104static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh)
105{
106 return (struct gfs2_log_descriptor *)bh->b_data;
107}
108
109static inline __be64 *bh_log_ptr(struct buffer_head *bh)
110{
111 struct gfs2_log_descriptor *ld = bh_log_desc(bh);
112 return (__force __be64 *)(ld + 1);
113}
114
115static inline __be64 *bh_ptr_end(struct buffer_head *bh)
116{
117 return (__force __be64 *)(bh->b_data + bh->b_size);
118}
119
120
121static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
122{
123 struct buffer_head *bh = gfs2_log_get_buf(sdp);
124 struct gfs2_log_descriptor *ld = bh_log_desc(bh);
125 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
126 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
127 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
128 ld->ld_type = cpu_to_be32(ld_type);
129 ld->ld_length = 0;
130 ld->ld_data1 = 0;
131 ld->ld_data2 = 0;
132 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
133 return bh;
134}
135
136static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
137{
138 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
139 struct gfs2_meta_header *mh;
140 struct gfs2_trans *tr;
141
142 lock_buffer(bd->bd_bh);
143 gfs2_log_lock(sdp);
144 if (!list_empty(&bd->bd_list_tr))
145 goto out;
146 tr = current->journal_info;
147 tr->tr_touched = 1;
148 tr->tr_num_buf++;
149 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
150 if (!list_empty(&le->le_list))
151 goto out;
152 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
153 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
154 gfs2_meta_check(sdp, bd->bd_bh);
155 gfs2_pin(sdp, bd->bd_bh);
156 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
157 mh->__pad0 = cpu_to_be64(0);
158 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
159 sdp->sd_log_num_buf++;
160 list_add(&le->le_list, &sdp->sd_log_le_buf);
161 tr->tr_num_buf_new++;
162out:
163 gfs2_log_unlock(sdp);
164 unlock_buffer(bd->bd_bh);
165}
166
167static void buf_lo_before_commit(struct gfs2_sbd *sdp)
168{
169 struct buffer_head *bh;
170 struct gfs2_log_descriptor *ld;
171 struct gfs2_bufdata *bd1 = NULL, *bd2;
172 unsigned int total;
173 unsigned int limit;
174 unsigned int num;
175 unsigned n;
176 __be64 *ptr;
177
178 limit = buf_limit(sdp);
179 /* for 4k blocks, limit = 503 */
180
181 gfs2_log_lock(sdp);
182 total = sdp->sd_log_num_buf;
183 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
184 while(total) {
185 num = total;
186 if (total > limit)
187 num = limit;
188 gfs2_log_unlock(sdp);
189 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA);
190 gfs2_log_lock(sdp);
191 ld = bh_log_desc(bh);
192 ptr = bh_log_ptr(bh);
193 ld->ld_length = cpu_to_be32(num + 1);
194 ld->ld_data1 = cpu_to_be32(num);
195
196 n = 0;
197 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
198 bd_le.le_list) {
199 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
200 if (++n >= num)
201 break;
202 }
203
204 gfs2_log_unlock(sdp);
205 submit_bh(WRITE_SYNC, bh);
206 gfs2_log_lock(sdp);
207
208 n = 0;
209 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
210 bd_le.le_list) {
211 get_bh(bd2->bd_bh);
212 gfs2_log_unlock(sdp);
213 lock_buffer(bd2->bd_bh);
214 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
215 submit_bh(WRITE_SYNC, bh);
216 gfs2_log_lock(sdp);
217 if (++n >= num)
218 break;
219 }
220
221 BUG_ON(total < num);
222 total -= num;
223 }
224 gfs2_log_unlock(sdp);
225}
226
227static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
228{
229 struct list_head *head = &sdp->sd_log_le_buf;
230 struct gfs2_bufdata *bd;
231
232 while (!list_empty(head)) {
233 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
234 list_del_init(&bd->bd_le.le_list);
235 sdp->sd_log_num_buf--;
236
237 gfs2_unpin(sdp, bd->bd_bh, ai);
238 }
239 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
240}
241
242static void buf_lo_before_scan(struct gfs2_jdesc *jd,
243 struct gfs2_log_header_host *head, int pass)
244{
245 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
246
247 if (pass != 0)
248 return;
249
250 sdp->sd_found_blocks = 0;
251 sdp->sd_replayed_blocks = 0;
252}
253
254static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
255 struct gfs2_log_descriptor *ld, __be64 *ptr,
256 int pass)
257{
258 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
259 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
260 struct gfs2_glock *gl = ip->i_gl;
261 unsigned int blks = be32_to_cpu(ld->ld_data1);
262 struct buffer_head *bh_log, *bh_ip;
263 u64 blkno;
264 int error = 0;
265
266 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
267 return 0;
268
269 gfs2_replay_incr_blk(sdp, &start);
270
271 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
272 blkno = be64_to_cpu(*ptr++);
273
274 sdp->sd_found_blocks++;
275
276 if (gfs2_revoke_check(sdp, blkno, start))
277 continue;
278
279 error = gfs2_replay_read_block(jd, start, &bh_log);
280 if (error)
281 return error;
282
283 bh_ip = gfs2_meta_new(gl, blkno);
284 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
285
286 if (gfs2_meta_check(sdp, bh_ip))
287 error = -EIO;
288 else
289 mark_buffer_dirty(bh_ip);
290
291 brelse(bh_log);
292 brelse(bh_ip);
293
294 if (error)
295 break;
296
297 sdp->sd_replayed_blocks++;
298 }
299
300 return error;
301}
302
303static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
304{
305 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
306 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
307
308 if (error) {
309 gfs2_meta_sync(ip->i_gl);
310 return;
311 }
312 if (pass != 1)
313 return;
314
315 gfs2_meta_sync(ip->i_gl);
316
317 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
318 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
319}
320
321static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
322{
323 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
324 struct gfs2_glock *gl = bd->bd_gl;
325 struct gfs2_trans *tr;
326
327 tr = current->journal_info;
328 tr->tr_touched = 1;
329 tr->tr_num_revoke++;
330 sdp->sd_log_num_revoke++;
331 atomic_inc(&gl->gl_revokes);
332 set_bit(GLF_LFLUSH, &gl->gl_flags);
333 list_add(&le->le_list, &sdp->sd_log_le_revoke);
334}
335
336static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
337{
338 struct gfs2_log_descriptor *ld;
339 struct gfs2_meta_header *mh;
340 struct buffer_head *bh;
341 unsigned int offset;
342 struct list_head *head = &sdp->sd_log_le_revoke;
343 struct gfs2_bufdata *bd;
344
345 if (!sdp->sd_log_num_revoke)
346 return;
347
348 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE);
349 ld = bh_log_desc(bh);
350 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
351 sizeof(u64)));
352 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
353 offset = sizeof(struct gfs2_log_descriptor);
354
355 list_for_each_entry(bd, head, bd_le.le_list) {
356 sdp->sd_log_num_revoke--;
357
358 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
359 submit_bh(WRITE_SYNC, bh);
360
361 bh = gfs2_log_get_buf(sdp);
362 mh = (struct gfs2_meta_header *)bh->b_data;
363 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
364 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
365 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
366 offset = sizeof(struct gfs2_meta_header);
367 }
368
369 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno);
370 offset += sizeof(u64);
371 }
372 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
373
374 submit_bh(WRITE_SYNC, bh);
375}
376
377static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
378{
379 struct list_head *head = &sdp->sd_log_le_revoke;
380 struct gfs2_bufdata *bd;
381 struct gfs2_glock *gl;
382
383 while (!list_empty(head)) {
384 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
385 list_del_init(&bd->bd_le.le_list);
386 gl = bd->bd_gl;
387 atomic_dec(&gl->gl_revokes);
388 clear_bit(GLF_LFLUSH, &gl->gl_flags);
389 kmem_cache_free(gfs2_bufdata_cachep, bd);
390 }
391}
392
393static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
394 struct gfs2_log_header_host *head, int pass)
395{
396 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
397
398 if (pass != 0)
399 return;
400
401 sdp->sd_found_revokes = 0;
402 sdp->sd_replay_tail = head->lh_tail;
403}
404
405static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
406 struct gfs2_log_descriptor *ld, __be64 *ptr,
407 int pass)
408{
409 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
410 unsigned int blks = be32_to_cpu(ld->ld_length);
411 unsigned int revokes = be32_to_cpu(ld->ld_data1);
412 struct buffer_head *bh;
413 unsigned int offset;
414 u64 blkno;
415 int first = 1;
416 int error;
417
418 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
419 return 0;
420
421 offset = sizeof(struct gfs2_log_descriptor);
422
423 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
424 error = gfs2_replay_read_block(jd, start, &bh);
425 if (error)
426 return error;
427
428 if (!first)
429 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
430
431 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
432 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
433
434 error = gfs2_revoke_add(sdp, blkno, start);
435 if (error < 0) {
436 brelse(bh);
437 return error;
438 }
439 else if (error)
440 sdp->sd_found_revokes++;
441
442 if (!--revokes)
443 break;
444 offset += sizeof(u64);
445 }
446
447 brelse(bh);
448 offset = sizeof(struct gfs2_meta_header);
449 first = 0;
450 }
451
452 return 0;
453}
454
455static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
456{
457 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
458
459 if (error) {
460 gfs2_revoke_clean(sdp);
461 return;
462 }
463 if (pass != 1)
464 return;
465
466 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
467 jd->jd_jid, sdp->sd_found_revokes);
468
469 gfs2_revoke_clean(sdp);
470}
471
472static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
473{
474 struct gfs2_rgrpd *rgd;
475 struct gfs2_trans *tr = current->journal_info;
476
477 tr->tr_touched = 1;
478
479 rgd = container_of(le, struct gfs2_rgrpd, rd_le);
480
481 gfs2_log_lock(sdp);
482 if (!list_empty(&le->le_list)){
483 gfs2_log_unlock(sdp);
484 return;
485 }
486 gfs2_rgrp_bh_hold(rgd);
487 sdp->sd_log_num_rg++;
488 list_add(&le->le_list, &sdp->sd_log_le_rg);
489 gfs2_log_unlock(sdp);
490}
491
492static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
493{
494 struct list_head *head = &sdp->sd_log_le_rg;
495 struct gfs2_rgrpd *rgd;
496
497 while (!list_empty(head)) {
498 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
499 list_del_init(&rgd->rd_le.le_list);
500 sdp->sd_log_num_rg--;
501
502 gfs2_rgrp_repolish_clones(rgd);
503 gfs2_rgrp_bh_put(rgd);
504 }
505 gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
506}
507
508/**
509 * databuf_lo_add - Add a databuf to the transaction.
510 *
511 * This is used in two distinct cases:
512 * i) In ordered write mode
513 * We put the data buffer on a list so that we can ensure that its
514 * synced to disk at the right time
515 * ii) In journaled data mode
516 * We need to journal the data block in the same way as metadata in
517 * the functions above. The difference is that here we have a tag
518 * which is two __be64's being the block number (as per meta data)
519 * and a flag which says whether the data block needs escaping or
520 * not. This means we need a new log entry for each 251 or so data
521 * blocks, which isn't an enormous overhead but twice as much as
522 * for normal metadata blocks.
523 */
524static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
525{
526 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
527 struct gfs2_trans *tr = current->journal_info;
528 struct address_space *mapping = bd->bd_bh->b_page->mapping;
529 struct gfs2_inode *ip = GFS2_I(mapping->host);
530
531 lock_buffer(bd->bd_bh);
532 gfs2_log_lock(sdp);
533 if (tr) {
534 if (!list_empty(&bd->bd_list_tr))
535 goto out;
536 tr->tr_touched = 1;
537 if (gfs2_is_jdata(ip)) {
538 tr->tr_num_buf++;
539 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
540 }
541 }
542 if (!list_empty(&le->le_list))
543 goto out;
544
545 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
546 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
547 if (gfs2_is_jdata(ip)) {
548 gfs2_pin(sdp, bd->bd_bh);
549 tr->tr_num_databuf_new++;
550 sdp->sd_log_num_databuf++;
551 list_add_tail(&le->le_list, &sdp->sd_log_le_databuf);
552 } else {
553 list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
554 }
555out:
556 gfs2_log_unlock(sdp);
557 unlock_buffer(bd->bd_bh);
558}
559
560static void gfs2_check_magic(struct buffer_head *bh)
561{
562 void *kaddr;
563 __be32 *ptr;
564
565 clear_buffer_escaped(bh);
566 kaddr = kmap_atomic(bh->b_page, KM_USER0);
567 ptr = kaddr + bh_offset(bh);
568 if (*ptr == cpu_to_be32(GFS2_MAGIC))
569 set_buffer_escaped(bh);
570 kunmap_atomic(kaddr, KM_USER0);
571}
572
573static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
574 struct list_head *list, struct list_head *done,
575 unsigned int n)
576{
577 struct buffer_head *bh1;
578 struct gfs2_log_descriptor *ld;
579 struct gfs2_bufdata *bd;
580 __be64 *ptr;
581
582 if (!bh)
583 return;
584
585 ld = bh_log_desc(bh);
586 ld->ld_length = cpu_to_be32(n + 1);
587 ld->ld_data1 = cpu_to_be32(n);
588
589 ptr = bh_log_ptr(bh);
590
591 get_bh(bh);
592 submit_bh(WRITE_SYNC, bh);
593 gfs2_log_lock(sdp);
594 while(!list_empty(list)) {
595 bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
596 list_move_tail(&bd->bd_le.le_list, done);
597 get_bh(bd->bd_bh);
598 while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) {
599 gfs2_log_incr_head(sdp);
600 ptr += 2;
601 }
602 gfs2_log_unlock(sdp);
603 lock_buffer(bd->bd_bh);
604 if (buffer_escaped(bd->bd_bh)) {
605 void *kaddr;
606 bh1 = gfs2_log_get_buf(sdp);
607 kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0);
608 memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
609 bh1->b_size);
610 kunmap_atomic(kaddr, KM_USER0);
611 *(__be32 *)bh1->b_data = 0;
612 clear_buffer_escaped(bd->bd_bh);
613 unlock_buffer(bd->bd_bh);
614 brelse(bd->bd_bh);
615 } else {
616 bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
617 }
618 submit_bh(WRITE_SYNC, bh1);
619 gfs2_log_lock(sdp);
620 ptr += 2;
621 }
622 gfs2_log_unlock(sdp);
623 brelse(bh);
624}
625
626/**
627 * databuf_lo_before_commit - Scan the data buffers, writing as we go
628 *
629 */
630
631static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
632{
633 struct gfs2_bufdata *bd = NULL;
634 struct buffer_head *bh = NULL;
635 unsigned int n = 0;
636 __be64 *ptr = NULL, *end = NULL;
637 LIST_HEAD(processed);
638 LIST_HEAD(in_progress);
639
640 gfs2_log_lock(sdp);
641 while (!list_empty(&sdp->sd_log_le_databuf)) {
642 if (ptr == end) {
643 gfs2_log_unlock(sdp);
644 gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
645 n = 0;
646 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA);
647 ptr = bh_log_ptr(bh);
648 end = bh_ptr_end(bh) - 1;
649 gfs2_log_lock(sdp);
650 continue;
651 }
652 bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list);
653 list_move_tail(&bd->bd_le.le_list, &in_progress);
654 gfs2_check_magic(bd->bd_bh);
655 *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr);
656 *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0);
657 n++;
658 }
659 gfs2_log_unlock(sdp);
660 gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
661 gfs2_log_lock(sdp);
662 list_splice(&processed, &sdp->sd_log_le_databuf);
663 gfs2_log_unlock(sdp);
664}
665
666static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
667 struct gfs2_log_descriptor *ld,
668 __be64 *ptr, int pass)
669{
670 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
671 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
672 struct gfs2_glock *gl = ip->i_gl;
673 unsigned int blks = be32_to_cpu(ld->ld_data1);
674 struct buffer_head *bh_log, *bh_ip;
675 u64 blkno;
676 u64 esc;
677 int error = 0;
678
679 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
680 return 0;
681
682 gfs2_replay_incr_blk(sdp, &start);
683 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
684 blkno = be64_to_cpu(*ptr++);
685 esc = be64_to_cpu(*ptr++);
686
687 sdp->sd_found_blocks++;
688
689 if (gfs2_revoke_check(sdp, blkno, start))
690 continue;
691
692 error = gfs2_replay_read_block(jd, start, &bh_log);
693 if (error)
694 return error;
695
696 bh_ip = gfs2_meta_new(gl, blkno);
697 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
698
699 /* Unescape */
700 if (esc) {
701 __be32 *eptr = (__be32 *)bh_ip->b_data;
702 *eptr = cpu_to_be32(GFS2_MAGIC);
703 }
704 mark_buffer_dirty(bh_ip);
705
706 brelse(bh_log);
707 brelse(bh_ip);
708 if (error)
709 break;
710
711 sdp->sd_replayed_blocks++;
712 }
713
714 return error;
715}
716
717/* FIXME: sort out accounting for log blocks etc. */
718
719static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
720{
721 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
722 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
723
724 if (error) {
725 gfs2_meta_sync(ip->i_gl);
726 return;
727 }
728 if (pass != 1)
729 return;
730
731 /* data sync? */
732 gfs2_meta_sync(ip->i_gl);
733
734 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
735 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
736}
737
738static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
739{
740 struct list_head *head = &sdp->sd_log_le_databuf;
741 struct gfs2_bufdata *bd;
742
743 while (!list_empty(head)) {
744 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
745 list_del_init(&bd->bd_le.le_list);
746 sdp->sd_log_num_databuf--;
747 gfs2_unpin(sdp, bd->bd_bh, ai);
748 }
749 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
750}
751
752
753const struct gfs2_log_operations gfs2_buf_lops = {
754 .lo_add = buf_lo_add,
755 .lo_before_commit = buf_lo_before_commit,
756 .lo_after_commit = buf_lo_after_commit,
757 .lo_before_scan = buf_lo_before_scan,
758 .lo_scan_elements = buf_lo_scan_elements,
759 .lo_after_scan = buf_lo_after_scan,
760 .lo_name = "buf",
761};
762
763const struct gfs2_log_operations gfs2_revoke_lops = {
764 .lo_add = revoke_lo_add,
765 .lo_before_commit = revoke_lo_before_commit,
766 .lo_after_commit = revoke_lo_after_commit,
767 .lo_before_scan = revoke_lo_before_scan,
768 .lo_scan_elements = revoke_lo_scan_elements,
769 .lo_after_scan = revoke_lo_after_scan,
770 .lo_name = "revoke",
771};
772
773const struct gfs2_log_operations gfs2_rg_lops = {
774 .lo_add = rg_lo_add,
775 .lo_after_commit = rg_lo_after_commit,
776 .lo_name = "rg",
777};
778
779const struct gfs2_log_operations gfs2_databuf_lops = {
780 .lo_add = databuf_lo_add,
781 .lo_before_commit = databuf_lo_before_commit,
782 .lo_after_commit = databuf_lo_after_commit,
783 .lo_scan_elements = databuf_lo_scan_elements,
784 .lo_after_scan = databuf_lo_after_scan,
785 .lo_name = "databuf",
786};
787
788const struct gfs2_log_operations *gfs2_log_ops[] = {
789 &gfs2_databuf_lops,
790 &gfs2_buf_lops,
791 &gfs2_rg_lops,
792 &gfs2_revoke_lops,
793 NULL,
794};
795
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/mempool.h>
13#include <linux/gfs2_ondisk.h>
14#include <linux/bio.h>
15#include <linux/fs.h>
16#include <linux/list_sort.h>
17#include <linux/blkdev.h>
18
19#include "bmap.h"
20#include "dir.h"
21#include "gfs2.h"
22#include "incore.h"
23#include "inode.h"
24#include "glock.h"
25#include "log.h"
26#include "lops.h"
27#include "meta_io.h"
28#include "recovery.h"
29#include "rgrp.h"
30#include "trans.h"
31#include "util.h"
32#include "trace_gfs2.h"
33
34/**
35 * gfs2_pin - Pin a buffer in memory
36 * @sdp: The superblock
37 * @bh: The buffer to be pinned
38 *
39 * The log lock must be held when calling this function
40 */
41void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
42{
43 struct gfs2_bufdata *bd;
44
45 BUG_ON(!current->journal_info);
46
47 clear_buffer_dirty(bh);
48 if (test_set_buffer_pinned(bh))
49 gfs2_assert_withdraw(sdp, 0);
50 if (!buffer_uptodate(bh))
51 gfs2_io_error_bh_wd(sdp, bh);
52 bd = bh->b_private;
53 /* If this buffer is in the AIL and it has already been written
54 * to in-place disk block, remove it from the AIL.
55 */
56 spin_lock(&sdp->sd_ail_lock);
57 if (bd->bd_tr)
58 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
59 spin_unlock(&sdp->sd_ail_lock);
60 get_bh(bh);
61 atomic_inc(&sdp->sd_log_pinned);
62 trace_gfs2_pin(bd, 1);
63}
64
65static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
66{
67 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
68}
69
70static void maybe_release_space(struct gfs2_bufdata *bd)
71{
72 struct gfs2_glock *gl = bd->bd_gl;
73 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
74 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
76 struct gfs2_bitmap *bi = rgd->rd_bits + index;
77
78 if (bi->bi_clone == NULL)
79 return;
80 if (sdp->sd_args.ar_discard)
81 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
82 memcpy(bi->bi_clone + bi->bi_offset,
83 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
84 clear_bit(GBF_FULL, &bi->bi_flags);
85 rgd->rd_free_clone = rgd->rd_free;
86 rgd->rd_extfail_pt = rgd->rd_free;
87}
88
89/**
90 * gfs2_unpin - Unpin a buffer
91 * @sdp: the filesystem the buffer belongs to
92 * @bh: The buffer to unpin
93 * @ai:
94 * @flags: The inode dirty flags
95 *
96 */
97
98static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
99 struct gfs2_trans *tr)
100{
101 struct gfs2_bufdata *bd = bh->b_private;
102
103 BUG_ON(!buffer_uptodate(bh));
104 BUG_ON(!buffer_pinned(bh));
105
106 lock_buffer(bh);
107 mark_buffer_dirty(bh);
108 clear_buffer_pinned(bh);
109
110 if (buffer_is_rgrp(bd))
111 maybe_release_space(bd);
112
113 spin_lock(&sdp->sd_ail_lock);
114 if (bd->bd_tr) {
115 list_del(&bd->bd_ail_st_list);
116 brelse(bh);
117 } else {
118 struct gfs2_glock *gl = bd->bd_gl;
119 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
120 atomic_inc(&gl->gl_ail_count);
121 }
122 bd->bd_tr = tr;
123 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
124 spin_unlock(&sdp->sd_ail_lock);
125
126 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
127 trace_gfs2_pin(bd, 0);
128 unlock_buffer(bh);
129 atomic_dec(&sdp->sd_log_pinned);
130}
131
132void gfs2_log_incr_head(struct gfs2_sbd *sdp)
133{
134 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
135 (sdp->sd_log_flush_head != sdp->sd_log_head));
136
137 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
138 sdp->sd_log_flush_head = 0;
139}
140
141u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
142{
143 struct gfs2_journal_extent *je;
144
145 list_for_each_entry(je, &jd->extent_list, list) {
146 if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
147 return je->dblock + lblock - je->lblock;
148 }
149
150 return -1;
151}
152
153/**
154 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
155 * @sdp: The superblock
156 * @bvec: The bio_vec
157 * @error: The i/o status
158 *
159 * This finds the relevant buffers and unlocks them and sets the
160 * error flag according to the status of the i/o request. This is
161 * used when the log is writing data which has an in-place version
162 * that is pinned in the pagecache.
163 */
164
165static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
166 struct bio_vec *bvec,
167 blk_status_t error)
168{
169 struct buffer_head *bh, *next;
170 struct page *page = bvec->bv_page;
171 unsigned size;
172
173 bh = page_buffers(page);
174 size = bvec->bv_len;
175 while (bh_offset(bh) < bvec->bv_offset)
176 bh = bh->b_this_page;
177 do {
178 if (error)
179 mark_buffer_write_io_error(bh);
180 unlock_buffer(bh);
181 next = bh->b_this_page;
182 size -= bh->b_size;
183 brelse(bh);
184 bh = next;
185 } while(bh && size);
186}
187
188/**
189 * gfs2_end_log_write - end of i/o to the log
190 * @bio: The bio
191 *
192 * Each bio_vec contains either data from the pagecache or data
193 * relating to the log itself. Here we iterate over the bio_vec
194 * array, processing both kinds of data.
195 *
196 */
197
198static void gfs2_end_log_write(struct bio *bio)
199{
200 struct gfs2_sbd *sdp = bio->bi_private;
201 struct bio_vec *bvec;
202 struct page *page;
203 struct bvec_iter_all iter_all;
204
205 if (bio->bi_status) {
206 if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
207 fs_err(sdp, "Error %d writing to journal, jid=%u\n",
208 bio->bi_status, sdp->sd_jdesc->jd_jid);
209 gfs2_withdraw_delayed(sdp);
210 /* prevent more writes to the journal */
211 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
212 wake_up(&sdp->sd_logd_waitq);
213 }
214
215 bio_for_each_segment_all(bvec, bio, iter_all) {
216 page = bvec->bv_page;
217 if (page_has_buffers(page))
218 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
219 else
220 mempool_free(page, gfs2_page_pool);
221 }
222
223 bio_put(bio);
224 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
225 wake_up(&sdp->sd_log_flush_wait);
226}
227
228/**
229 * gfs2_log_submit_bio - Submit any pending log bio
230 * @biop: Address of the bio pointer
231 * @opf: REQ_OP | op_flags
232 *
233 * Submit any pending part-built or full bio to the block device. If
234 * there is no pending bio, then this is a no-op.
235 */
236
237void gfs2_log_submit_bio(struct bio **biop, int opf)
238{
239 struct bio *bio = *biop;
240 if (bio) {
241 struct gfs2_sbd *sdp = bio->bi_private;
242 atomic_inc(&sdp->sd_log_in_flight);
243 bio->bi_opf = opf;
244 submit_bio(bio);
245 *biop = NULL;
246 }
247}
248
249/**
250 * gfs2_log_alloc_bio - Allocate a bio
251 * @sdp: The super block
252 * @blkno: The device block number we want to write to
253 * @end_io: The bi_end_io callback
254 *
255 * Allocate a new bio, initialize it with the given parameters and return it.
256 *
257 * Returns: The newly allocated bio
258 */
259
260static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
261 bio_end_io_t *end_io)
262{
263 struct super_block *sb = sdp->sd_vfs;
264 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
265
266 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
267 bio_set_dev(bio, sb->s_bdev);
268 bio->bi_end_io = end_io;
269 bio->bi_private = sdp;
270
271 return bio;
272}
273
274/**
275 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
276 * @sdp: The super block
277 * @blkno: The device block number we want to write to
278 * @bio: The bio to get or allocate
279 * @op: REQ_OP
280 * @end_io: The bi_end_io callback
281 * @flush: Always flush the current bio and allocate a new one?
282 *
283 * If there is a cached bio, then if the next block number is sequential
284 * with the previous one, return it, otherwise flush the bio to the
285 * device. If there is no cached bio, or we just flushed it, then
286 * allocate a new one.
287 *
288 * Returns: The bio to use for log writes
289 */
290
291static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
292 struct bio **biop, int op,
293 bio_end_io_t *end_io, bool flush)
294{
295 struct bio *bio = *biop;
296
297 if (bio) {
298 u64 nblk;
299
300 nblk = bio_end_sector(bio);
301 nblk >>= sdp->sd_fsb2bb_shift;
302 if (blkno == nblk && !flush)
303 return bio;
304 gfs2_log_submit_bio(biop, op);
305 }
306
307 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
308 return *biop;
309}
310
311/**
312 * gfs2_log_write - write to log
313 * @sdp: the filesystem
314 * @page: the page to write
315 * @size: the size of the data to write
316 * @offset: the offset within the page
317 * @blkno: block number of the log entry
318 *
319 * Try and add the page segment to the current bio. If that fails,
320 * submit the current bio to the device and create a new one, and
321 * then add the page segment to that.
322 */
323
324void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
325 unsigned size, unsigned offset, u64 blkno)
326{
327 struct bio *bio;
328 int ret;
329
330 bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
331 gfs2_end_log_write, false);
332 ret = bio_add_page(bio, page, size, offset);
333 if (ret == 0) {
334 bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
335 REQ_OP_WRITE, gfs2_end_log_write, true);
336 ret = bio_add_page(bio, page, size, offset);
337 WARN_ON(ret == 0);
338 }
339}
340
341/**
342 * gfs2_log_write_bh - write a buffer's content to the log
343 * @sdp: The super block
344 * @bh: The buffer pointing to the in-place location
345 *
346 * This writes the content of the buffer to the next available location
347 * in the log. The buffer will be unlocked once the i/o to the log has
348 * completed.
349 */
350
351static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
352{
353 u64 dblock;
354
355 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
356 gfs2_log_incr_head(sdp);
357 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock);
358}
359
360/**
361 * gfs2_log_write_page - write one block stored in a page, into the log
362 * @sdp: The superblock
363 * @page: The struct page
364 *
365 * This writes the first block-sized part of the page into the log. Note
366 * that the page must have been allocated from the gfs2_page_pool mempool
367 * and that after this has been called, ownership has been transferred and
368 * the page may be freed at any time.
369 */
370
371void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
372{
373 struct super_block *sb = sdp->sd_vfs;
374 u64 dblock;
375
376 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
377 gfs2_log_incr_head(sdp);
378 gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
379}
380
381/**
382 * gfs2_end_log_read - end I/O callback for reads from the log
383 * @bio: The bio
384 *
385 * Simply unlock the pages in the bio. The main thread will wait on them and
386 * process them in order as necessary.
387 */
388
389static void gfs2_end_log_read(struct bio *bio)
390{
391 struct page *page;
392 struct bio_vec *bvec;
393 struct bvec_iter_all iter_all;
394
395 bio_for_each_segment_all(bvec, bio, iter_all) {
396 page = bvec->bv_page;
397 if (bio->bi_status) {
398 int err = blk_status_to_errno(bio->bi_status);
399
400 SetPageError(page);
401 mapping_set_error(page->mapping, err);
402 }
403 unlock_page(page);
404 }
405
406 bio_put(bio);
407}
408
409/**
410 * gfs2_jhead_pg_srch - Look for the journal head in a given page.
411 * @jd: The journal descriptor
412 * @page: The page to look in
413 *
414 * Returns: 1 if found, 0 otherwise.
415 */
416
417static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
418 struct gfs2_log_header_host *head,
419 struct page *page)
420{
421 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
422 struct gfs2_log_header_host lh;
423 void *kaddr = kmap_atomic(page);
424 unsigned int offset;
425 bool ret = false;
426
427 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
428 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
429 if (lh.lh_sequence >= head->lh_sequence)
430 *head = lh;
431 else {
432 ret = true;
433 break;
434 }
435 }
436 }
437 kunmap_atomic(kaddr);
438 return ret;
439}
440
441/**
442 * gfs2_jhead_process_page - Search/cleanup a page
443 * @jd: The journal descriptor
444 * @index: Index of the page to look into
445 * @done: If set, perform only cleanup, else search and set if found.
446 *
447 * Find the page with 'index' in the journal's mapping. Search the page for
448 * the journal head if requested (cleanup == false). Release refs on the
449 * page so the page cache can reclaim it (put_page() twice). We grabbed a
450 * reference on this page two times, first when we did a find_or_create_page()
451 * to obtain the page to add it to the bio and second when we do a
452 * find_get_page() here to get the page to wait on while I/O on it is being
453 * completed.
454 * This function is also used to free up a page we might've grabbed but not
455 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
456 * submitted the I/O, but we already found the jhead so we only need to drop
457 * our references to the page.
458 */
459
460static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
461 struct gfs2_log_header_host *head,
462 bool *done)
463{
464 struct page *page;
465
466 page = find_get_page(jd->jd_inode->i_mapping, index);
467 wait_on_page_locked(page);
468
469 if (PageError(page))
470 *done = true;
471
472 if (!*done)
473 *done = gfs2_jhead_pg_srch(jd, head, page);
474
475 put_page(page); /* Once for find_get_page */
476 put_page(page); /* Once more for find_or_create_page */
477}
478
479static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
480{
481 struct bio *new;
482
483 new = bio_alloc(GFP_NOIO, nr_iovecs);
484 bio_copy_dev(new, prev);
485 new->bi_iter.bi_sector = bio_end_sector(prev);
486 new->bi_opf = prev->bi_opf;
487 new->bi_write_hint = prev->bi_write_hint;
488 bio_chain(new, prev);
489 submit_bio(prev);
490 return new;
491}
492
493/**
494 * gfs2_find_jhead - find the head of a log
495 * @jd: The journal descriptor
496 * @head: The log descriptor for the head of the log is returned here
497 *
498 * Do a search of a journal by reading it in large chunks using bios and find
499 * the valid log entry with the highest sequence number. (i.e. the log head)
500 *
501 * Returns: 0 on success, errno otherwise
502 */
503int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
504 bool keep_cache)
505{
506 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
507 struct address_space *mapping = jd->jd_inode->i_mapping;
508 unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
509 unsigned int bsize = sdp->sd_sb.sb_bsize, off;
510 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
511 unsigned int shift = PAGE_SHIFT - bsize_shift;
512 unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
513 struct gfs2_journal_extent *je;
514 int sz, ret = 0;
515 struct bio *bio = NULL;
516 struct page *page = NULL;
517 bool done = false;
518 errseq_t since;
519
520 memset(head, 0, sizeof(*head));
521 if (list_empty(&jd->extent_list))
522 gfs2_map_journal_extents(sdp, jd);
523
524 since = filemap_sample_wb_err(mapping);
525 list_for_each_entry(je, &jd->extent_list, list) {
526 u64 dblock = je->dblock;
527
528 for (; block < je->lblock + je->blocks; block++, dblock++) {
529 if (!page) {
530 page = find_or_create_page(mapping,
531 block >> shift, GFP_NOFS);
532 if (!page) {
533 ret = -ENOMEM;
534 done = true;
535 goto out;
536 }
537 off = 0;
538 }
539
540 if (bio && (off || block < blocks_submitted + max_blocks)) {
541 sector_t sector = dblock << sdp->sd_fsb2bb_shift;
542
543 if (bio_end_sector(bio) == sector) {
544 sz = bio_add_page(bio, page, bsize, off);
545 if (sz == bsize)
546 goto block_added;
547 }
548 if (off) {
549 unsigned int blocks =
550 (PAGE_SIZE - off) >> bsize_shift;
551
552 bio = gfs2_chain_bio(bio, blocks);
553 goto add_block_to_new_bio;
554 }
555 }
556
557 if (bio) {
558 blocks_submitted = block;
559 submit_bio(bio);
560 }
561
562 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
563 bio->bi_opf = REQ_OP_READ;
564add_block_to_new_bio:
565 sz = bio_add_page(bio, page, bsize, off);
566 BUG_ON(sz != bsize);
567block_added:
568 off += bsize;
569 if (off == PAGE_SIZE)
570 page = NULL;
571 if (blocks_submitted <= blocks_read + max_blocks) {
572 /* Keep at least one bio in flight */
573 continue;
574 }
575
576 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
577 blocks_read += PAGE_SIZE >> bsize_shift;
578 if (done)
579 goto out; /* found */
580 }
581 }
582
583out:
584 if (bio)
585 submit_bio(bio);
586 while (blocks_read < block) {
587 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
588 blocks_read += PAGE_SIZE >> bsize_shift;
589 }
590
591 if (!ret)
592 ret = filemap_check_wb_err(mapping, since);
593
594 if (!keep_cache)
595 truncate_inode_pages(mapping, 0);
596
597 return ret;
598}
599
600static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
601 u32 ld_length, u32 ld_data1)
602{
603 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
604 struct gfs2_log_descriptor *ld = page_address(page);
605 clear_page(ld);
606 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
607 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
608 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
609 ld->ld_type = cpu_to_be32(ld_type);
610 ld->ld_length = cpu_to_be32(ld_length);
611 ld->ld_data1 = cpu_to_be32(ld_data1);
612 ld->ld_data2 = 0;
613 return page;
614}
615
616static void gfs2_check_magic(struct buffer_head *bh)
617{
618 void *kaddr;
619 __be32 *ptr;
620
621 clear_buffer_escaped(bh);
622 kaddr = kmap_atomic(bh->b_page);
623 ptr = kaddr + bh_offset(bh);
624 if (*ptr == cpu_to_be32(GFS2_MAGIC))
625 set_buffer_escaped(bh);
626 kunmap_atomic(kaddr);
627}
628
629static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
630{
631 struct gfs2_bufdata *bda, *bdb;
632
633 bda = list_entry(a, struct gfs2_bufdata, bd_list);
634 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
635
636 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
637 return -1;
638 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
639 return 1;
640 return 0;
641}
642
643static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
644 unsigned int total, struct list_head *blist,
645 bool is_databuf)
646{
647 struct gfs2_log_descriptor *ld;
648 struct gfs2_bufdata *bd1 = NULL, *bd2;
649 struct page *page;
650 unsigned int num;
651 unsigned n;
652 __be64 *ptr;
653
654 gfs2_log_lock(sdp);
655 list_sort(NULL, blist, blocknr_cmp);
656 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
657 while(total) {
658 num = total;
659 if (total > limit)
660 num = limit;
661 gfs2_log_unlock(sdp);
662 page = gfs2_get_log_desc(sdp,
663 is_databuf ? GFS2_LOG_DESC_JDATA :
664 GFS2_LOG_DESC_METADATA, num + 1, num);
665 ld = page_address(page);
666 gfs2_log_lock(sdp);
667 ptr = (__be64 *)(ld + 1);
668
669 n = 0;
670 list_for_each_entry_continue(bd1, blist, bd_list) {
671 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
672 if (is_databuf) {
673 gfs2_check_magic(bd1->bd_bh);
674 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
675 }
676 if (++n >= num)
677 break;
678 }
679
680 gfs2_log_unlock(sdp);
681 gfs2_log_write_page(sdp, page);
682 gfs2_log_lock(sdp);
683
684 n = 0;
685 list_for_each_entry_continue(bd2, blist, bd_list) {
686 get_bh(bd2->bd_bh);
687 gfs2_log_unlock(sdp);
688 lock_buffer(bd2->bd_bh);
689
690 if (buffer_escaped(bd2->bd_bh)) {
691 void *kaddr;
692 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
693 ptr = page_address(page);
694 kaddr = kmap_atomic(bd2->bd_bh->b_page);
695 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
696 bd2->bd_bh->b_size);
697 kunmap_atomic(kaddr);
698 *(__be32 *)ptr = 0;
699 clear_buffer_escaped(bd2->bd_bh);
700 unlock_buffer(bd2->bd_bh);
701 brelse(bd2->bd_bh);
702 gfs2_log_write_page(sdp, page);
703 } else {
704 gfs2_log_write_bh(sdp, bd2->bd_bh);
705 }
706 gfs2_log_lock(sdp);
707 if (++n >= num)
708 break;
709 }
710
711 BUG_ON(total < num);
712 total -= num;
713 }
714 gfs2_log_unlock(sdp);
715}
716
717static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
718{
719 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
720 unsigned int nbuf;
721 if (tr == NULL)
722 return;
723 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
724 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
725}
726
727static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
728{
729 struct list_head *head;
730 struct gfs2_bufdata *bd;
731
732 if (tr == NULL)
733 return;
734
735 head = &tr->tr_buf;
736 while (!list_empty(head)) {
737 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
738 list_del_init(&bd->bd_list);
739 gfs2_unpin(sdp, bd->bd_bh, tr);
740 }
741}
742
743static void buf_lo_before_scan(struct gfs2_jdesc *jd,
744 struct gfs2_log_header_host *head, int pass)
745{
746 if (pass != 0)
747 return;
748
749 jd->jd_found_blocks = 0;
750 jd->jd_replayed_blocks = 0;
751}
752
753static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
754 struct gfs2_log_descriptor *ld, __be64 *ptr,
755 int pass)
756{
757 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
758 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
759 struct gfs2_glock *gl = ip->i_gl;
760 unsigned int blks = be32_to_cpu(ld->ld_data1);
761 struct buffer_head *bh_log, *bh_ip;
762 u64 blkno;
763 int error = 0;
764
765 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
766 return 0;
767
768 gfs2_replay_incr_blk(jd, &start);
769
770 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
771 blkno = be64_to_cpu(*ptr++);
772
773 jd->jd_found_blocks++;
774
775 if (gfs2_revoke_check(jd, blkno, start))
776 continue;
777
778 error = gfs2_replay_read_block(jd, start, &bh_log);
779 if (error)
780 return error;
781
782 bh_ip = gfs2_meta_new(gl, blkno);
783 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
784
785 if (gfs2_meta_check(sdp, bh_ip))
786 error = -EIO;
787 else {
788 struct gfs2_meta_header *mh =
789 (struct gfs2_meta_header *)bh_ip->b_data;
790
791 if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) {
792 struct gfs2_rgrpd *rgd;
793
794 rgd = gfs2_blk2rgrpd(sdp, blkno, false);
795 if (rgd && rgd->rd_addr == blkno &&
796 rgd->rd_bits && rgd->rd_bits->bi_bh) {
797 fs_info(sdp, "Replaying 0x%llx but we "
798 "already have a bh!\n",
799 (unsigned long long)blkno);
800 fs_info(sdp, "busy:%d, pinned:%d\n",
801 buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
802 buffer_pinned(rgd->rd_bits->bi_bh));
803 gfs2_dump_glock(NULL, rgd->rd_gl, true);
804 }
805 }
806 mark_buffer_dirty(bh_ip);
807 }
808 brelse(bh_log);
809 brelse(bh_ip);
810
811 if (error)
812 break;
813
814 jd->jd_replayed_blocks++;
815 }
816
817 return error;
818}
819
820/**
821 * gfs2_meta_sync - Sync all buffers associated with a glock
822 * @gl: The glock
823 *
824 */
825
826static void gfs2_meta_sync(struct gfs2_glock *gl)
827{
828 struct address_space *mapping = gfs2_glock2aspace(gl);
829 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
830 int error;
831
832 if (mapping == NULL)
833 mapping = &sdp->sd_aspace;
834
835 filemap_fdatawrite(mapping);
836 error = filemap_fdatawait(mapping);
837
838 if (error)
839 gfs2_io_error(gl->gl_name.ln_sbd);
840}
841
842static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
843{
844 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
845 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
846
847 if (error) {
848 gfs2_meta_sync(ip->i_gl);
849 return;
850 }
851 if (pass != 1)
852 return;
853
854 gfs2_meta_sync(ip->i_gl);
855
856 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
857 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
858}
859
860static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
861{
862 struct gfs2_meta_header *mh;
863 unsigned int offset;
864 struct list_head *head = &sdp->sd_log_revokes;
865 struct gfs2_bufdata *bd;
866 struct page *page;
867 unsigned int length;
868
869 gfs2_write_revokes(sdp);
870 if (!sdp->sd_log_num_revoke)
871 return;
872
873 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
874 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
875 offset = sizeof(struct gfs2_log_descriptor);
876
877 list_for_each_entry(bd, head, bd_list) {
878 sdp->sd_log_num_revoke--;
879
880 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
881
882 gfs2_log_write_page(sdp, page);
883 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
884 mh = page_address(page);
885 clear_page(mh);
886 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
887 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
888 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
889 offset = sizeof(struct gfs2_meta_header);
890 }
891
892 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
893 offset += sizeof(u64);
894 }
895 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
896
897 gfs2_log_write_page(sdp, page);
898}
899
900static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
901{
902 struct list_head *head = &sdp->sd_log_revokes;
903 struct gfs2_bufdata *bd;
904 struct gfs2_glock *gl;
905
906 while (!list_empty(head)) {
907 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
908 list_del_init(&bd->bd_list);
909 gl = bd->bd_gl;
910 gfs2_glock_remove_revoke(gl);
911 kmem_cache_free(gfs2_bufdata_cachep, bd);
912 }
913}
914
915static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
916 struct gfs2_log_header_host *head, int pass)
917{
918 if (pass != 0)
919 return;
920
921 jd->jd_found_revokes = 0;
922 jd->jd_replay_tail = head->lh_tail;
923}
924
925static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
926 struct gfs2_log_descriptor *ld, __be64 *ptr,
927 int pass)
928{
929 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
930 unsigned int blks = be32_to_cpu(ld->ld_length);
931 unsigned int revokes = be32_to_cpu(ld->ld_data1);
932 struct buffer_head *bh;
933 unsigned int offset;
934 u64 blkno;
935 int first = 1;
936 int error;
937
938 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
939 return 0;
940
941 offset = sizeof(struct gfs2_log_descriptor);
942
943 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
944 error = gfs2_replay_read_block(jd, start, &bh);
945 if (error)
946 return error;
947
948 if (!first)
949 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
950
951 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
952 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
953
954 error = gfs2_revoke_add(jd, blkno, start);
955 if (error < 0) {
956 brelse(bh);
957 return error;
958 }
959 else if (error)
960 jd->jd_found_revokes++;
961
962 if (!--revokes)
963 break;
964 offset += sizeof(u64);
965 }
966
967 brelse(bh);
968 offset = sizeof(struct gfs2_meta_header);
969 first = 0;
970 }
971
972 return 0;
973}
974
975static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
976{
977 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
978
979 if (error) {
980 gfs2_revoke_clean(jd);
981 return;
982 }
983 if (pass != 1)
984 return;
985
986 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
987 jd->jd_jid, jd->jd_found_revokes);
988
989 gfs2_revoke_clean(jd);
990}
991
992/**
993 * databuf_lo_before_commit - Scan the data buffers, writing as we go
994 *
995 */
996
997static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
998{
999 unsigned int limit = databuf_limit(sdp);
1000 unsigned int nbuf;
1001 if (tr == NULL)
1002 return;
1003 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
1004 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
1005}
1006
1007static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
1008 struct gfs2_log_descriptor *ld,
1009 __be64 *ptr, int pass)
1010{
1011 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1012 struct gfs2_glock *gl = ip->i_gl;
1013 unsigned int blks = be32_to_cpu(ld->ld_data1);
1014 struct buffer_head *bh_log, *bh_ip;
1015 u64 blkno;
1016 u64 esc;
1017 int error = 0;
1018
1019 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1020 return 0;
1021
1022 gfs2_replay_incr_blk(jd, &start);
1023 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1024 blkno = be64_to_cpu(*ptr++);
1025 esc = be64_to_cpu(*ptr++);
1026
1027 jd->jd_found_blocks++;
1028
1029 if (gfs2_revoke_check(jd, blkno, start))
1030 continue;
1031
1032 error = gfs2_replay_read_block(jd, start, &bh_log);
1033 if (error)
1034 return error;
1035
1036 bh_ip = gfs2_meta_new(gl, blkno);
1037 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1038
1039 /* Unescape */
1040 if (esc) {
1041 __be32 *eptr = (__be32 *)bh_ip->b_data;
1042 *eptr = cpu_to_be32(GFS2_MAGIC);
1043 }
1044 mark_buffer_dirty(bh_ip);
1045
1046 brelse(bh_log);
1047 brelse(bh_ip);
1048
1049 jd->jd_replayed_blocks++;
1050 }
1051
1052 return error;
1053}
1054
1055/* FIXME: sort out accounting for log blocks etc. */
1056
1057static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1058{
1059 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1060 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1061
1062 if (error) {
1063 gfs2_meta_sync(ip->i_gl);
1064 return;
1065 }
1066 if (pass != 1)
1067 return;
1068
1069 /* data sync? */
1070 gfs2_meta_sync(ip->i_gl);
1071
1072 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1073 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1074}
1075
1076static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1077{
1078 struct list_head *head;
1079 struct gfs2_bufdata *bd;
1080
1081 if (tr == NULL)
1082 return;
1083
1084 head = &tr->tr_databuf;
1085 while (!list_empty(head)) {
1086 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1087 list_del_init(&bd->bd_list);
1088 gfs2_unpin(sdp, bd->bd_bh, tr);
1089 }
1090}
1091
1092
1093static const struct gfs2_log_operations gfs2_buf_lops = {
1094 .lo_before_commit = buf_lo_before_commit,
1095 .lo_after_commit = buf_lo_after_commit,
1096 .lo_before_scan = buf_lo_before_scan,
1097 .lo_scan_elements = buf_lo_scan_elements,
1098 .lo_after_scan = buf_lo_after_scan,
1099 .lo_name = "buf",
1100};
1101
1102static const struct gfs2_log_operations gfs2_revoke_lops = {
1103 .lo_before_commit = revoke_lo_before_commit,
1104 .lo_after_commit = revoke_lo_after_commit,
1105 .lo_before_scan = revoke_lo_before_scan,
1106 .lo_scan_elements = revoke_lo_scan_elements,
1107 .lo_after_scan = revoke_lo_after_scan,
1108 .lo_name = "revoke",
1109};
1110
1111static const struct gfs2_log_operations gfs2_databuf_lops = {
1112 .lo_before_commit = databuf_lo_before_commit,
1113 .lo_after_commit = databuf_lo_after_commit,
1114 .lo_scan_elements = databuf_lo_scan_elements,
1115 .lo_after_scan = databuf_lo_after_scan,
1116 .lo_name = "databuf",
1117};
1118
1119const struct gfs2_log_operations *gfs2_log_ops[] = {
1120 &gfs2_databuf_lops,
1121 &gfs2_buf_lops,
1122 &gfs2_revoke_lops,
1123 NULL,
1124};
1125