Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/completion.h>
16#include <linux/buffer_head.h>
17#include <linux/kallsyms.h>
18#include <linux/gfs2_ondisk.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "glock.h"
23#include "inode.h"
24#include "log.h"
25#include "lops.h"
26#include "meta_io.h"
27#include "trans.h"
28#include "util.h"
29#include "trace_gfs2.h"
30
31int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
32 unsigned int revokes)
33{
34 struct gfs2_trans *tr;
35 int error;
36
37 BUG_ON(current->journal_info);
38 BUG_ON(blocks == 0 && revokes == 0);
39
40 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
41 return -EROFS;
42
43 tr = kzalloc(sizeof(struct gfs2_trans), GFP_NOFS);
44 if (!tr)
45 return -ENOMEM;
46
47 tr->tr_ip = (unsigned long)__builtin_return_address(0);
48 tr->tr_blocks = blocks;
49 tr->tr_revokes = revokes;
50 tr->tr_reserved = 1;
51 if (blocks)
52 tr->tr_reserved += 6 + blocks;
53 if (revokes)
54 tr->tr_reserved += gfs2_struct2blk(sdp, revokes,
55 sizeof(u64));
56 INIT_LIST_HEAD(&tr->tr_databuf);
57 INIT_LIST_HEAD(&tr->tr_buf);
58
59 sb_start_intwrite(sdp->sd_vfs);
60 gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh);
61
62 error = gfs2_glock_nq(&tr->tr_t_gh);
63 if (error)
64 goto fail_holder_uninit;
65
66 error = gfs2_log_reserve(sdp, tr->tr_reserved);
67 if (error)
68 goto fail_gunlock;
69
70 current->journal_info = tr;
71
72 return 0;
73
74fail_gunlock:
75 gfs2_glock_dq(&tr->tr_t_gh);
76
77fail_holder_uninit:
78 sb_end_intwrite(sdp->sd_vfs);
79 gfs2_holder_uninit(&tr->tr_t_gh);
80 kfree(tr);
81
82 return error;
83}
84
85/**
86 * gfs2_log_release - Release a given number of log blocks
87 * @sdp: The GFS2 superblock
88 * @blks: The number of blocks
89 *
90 */
91
92static void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
93{
94
95 atomic_add(blks, &sdp->sd_log_blks_free);
96 trace_gfs2_log_blocks(sdp, blks);
97 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
98 sdp->sd_jdesc->jd_blocks);
99 up_read(&sdp->sd_log_flush_lock);
100}
101
102static void gfs2_print_trans(const struct gfs2_trans *tr)
103{
104 pr_warn("Transaction created at: %pSR\n", (void *)tr->tr_ip);
105 pr_warn("blocks=%u revokes=%u reserved=%u touched=%u\n",
106 tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, tr->tr_touched);
107 pr_warn("Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
108 tr->tr_num_buf_new, tr->tr_num_buf_rm,
109 tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
110 tr->tr_num_revoke, tr->tr_num_revoke_rm);
111}
112
113void gfs2_trans_end(struct gfs2_sbd *sdp)
114{
115 struct gfs2_trans *tr = current->journal_info;
116 s64 nbuf;
117 BUG_ON(!tr);
118 current->journal_info = NULL;
119
120 if (!tr->tr_touched) {
121 gfs2_log_release(sdp, tr->tr_reserved);
122 if (tr->tr_t_gh.gh_gl) {
123 gfs2_glock_dq(&tr->tr_t_gh);
124 gfs2_holder_uninit(&tr->tr_t_gh);
125 kfree(tr);
126 }
127 sb_end_intwrite(sdp->sd_vfs);
128 return;
129 }
130
131 nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new;
132 nbuf -= tr->tr_num_buf_rm;
133 nbuf -= tr->tr_num_databuf_rm;
134
135 if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) &&
136 (tr->tr_num_revoke <= tr->tr_revokes)))
137 gfs2_print_trans(tr);
138
139 gfs2_log_commit(sdp, tr);
140 if (tr->tr_t_gh.gh_gl) {
141 gfs2_glock_dq(&tr->tr_t_gh);
142 gfs2_holder_uninit(&tr->tr_t_gh);
143 if (!tr->tr_attached)
144 kfree(tr);
145 }
146 up_read(&sdp->sd_log_flush_lock);
147
148 if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS)
149 gfs2_log_flush(sdp, NULL);
150 sb_end_intwrite(sdp->sd_vfs);
151}
152
153static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
154 struct buffer_head *bh,
155 const struct gfs2_log_operations *lops)
156{
157 struct gfs2_bufdata *bd;
158
159 bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
160 bd->bd_bh = bh;
161 bd->bd_gl = gl;
162 bd->bd_ops = lops;
163 INIT_LIST_HEAD(&bd->bd_list);
164 bh->b_private = bd;
165 return bd;
166}
167
168/**
169 * gfs2_trans_add_data - Add a databuf to the transaction.
170 * @gl: The inode glock associated with the buffer
171 * @bh: The buffer to add
172 *
173 * This is used in two distinct cases:
174 * i) In ordered write mode
175 * We put the data buffer on a list so that we can ensure that its
176 * synced to disk at the right time
177 * ii) In journaled data mode
178 * We need to journal the data block in the same way as metadata in
179 * the functions above. The difference is that here we have a tag
180 * which is two __be64's being the block number (as per meta data)
181 * and a flag which says whether the data block needs escaping or
182 * not. This means we need a new log entry for each 251 or so data
183 * blocks, which isn't an enormous overhead but twice as much as
184 * for normal metadata blocks.
185 */
186void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
187{
188 struct gfs2_trans *tr = current->journal_info;
189 struct gfs2_sbd *sdp = gl->gl_sbd;
190 struct address_space *mapping = bh->b_page->mapping;
191 struct gfs2_inode *ip = GFS2_I(mapping->host);
192 struct gfs2_bufdata *bd;
193
194 if (!gfs2_is_jdata(ip)) {
195 gfs2_ordered_add_inode(ip);
196 return;
197 }
198
199 lock_buffer(bh);
200 gfs2_log_lock(sdp);
201 bd = bh->b_private;
202 if (bd == NULL) {
203 gfs2_log_unlock(sdp);
204 unlock_buffer(bh);
205 if (bh->b_private == NULL)
206 bd = gfs2_alloc_bufdata(gl, bh, &gfs2_databuf_lops);
207 lock_buffer(bh);
208 gfs2_log_lock(sdp);
209 }
210 gfs2_assert(sdp, bd->bd_gl == gl);
211 tr->tr_touched = 1;
212 if (list_empty(&bd->bd_list)) {
213 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
214 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
215 gfs2_pin(sdp, bd->bd_bh);
216 tr->tr_num_databuf_new++;
217 list_add_tail(&bd->bd_list, &tr->tr_databuf);
218 }
219 gfs2_log_unlock(sdp);
220 unlock_buffer(bh);
221}
222
223static void meta_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
224{
225 struct gfs2_meta_header *mh;
226 struct gfs2_trans *tr;
227
228 tr = current->journal_info;
229 tr->tr_touched = 1;
230 if (!list_empty(&bd->bd_list))
231 return;
232 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
233 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
234 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
235 if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
236 pr_err("Attempting to add uninitialised block to journal (inplace block=%lld)\n",
237 (unsigned long long)bd->bd_bh->b_blocknr);
238 BUG();
239 }
240 gfs2_pin(sdp, bd->bd_bh);
241 mh->__pad0 = cpu_to_be64(0);
242 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
243 list_add(&bd->bd_list, &tr->tr_buf);
244 tr->tr_num_buf_new++;
245}
246
247void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
248{
249
250 struct gfs2_sbd *sdp = gl->gl_sbd;
251 struct gfs2_bufdata *bd;
252
253 lock_buffer(bh);
254 gfs2_log_lock(sdp);
255 bd = bh->b_private;
256 if (bd == NULL) {
257 gfs2_log_unlock(sdp);
258 unlock_buffer(bh);
259 lock_page(bh->b_page);
260 if (bh->b_private == NULL)
261 bd = gfs2_alloc_bufdata(gl, bh, &gfs2_buf_lops);
262 unlock_page(bh->b_page);
263 lock_buffer(bh);
264 gfs2_log_lock(sdp);
265 }
266 gfs2_assert(sdp, bd->bd_gl == gl);
267 meta_lo_add(sdp, bd);
268 gfs2_log_unlock(sdp);
269 unlock_buffer(bh);
270}
271
272void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
273{
274 struct gfs2_trans *tr = current->journal_info;
275
276 BUG_ON(!list_empty(&bd->bd_list));
277 gfs2_add_revoke(sdp, bd);
278 tr->tr_touched = 1;
279 tr->tr_num_revoke++;
280}
281
282void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
283{
284 struct gfs2_bufdata *bd, *tmp;
285 struct gfs2_trans *tr = current->journal_info;
286 unsigned int n = len;
287
288 gfs2_log_lock(sdp);
289 list_for_each_entry_safe(bd, tmp, &sdp->sd_log_le_revoke, bd_list) {
290 if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) {
291 list_del_init(&bd->bd_list);
292 gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
293 sdp->sd_log_num_revoke--;
294 kmem_cache_free(gfs2_bufdata_cachep, bd);
295 tr->tr_num_revoke_rm++;
296 if (--n == 0)
297 break;
298 }
299 }
300 gfs2_log_unlock(sdp);
301}
302
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/sched.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
14#include <linux/kallsyms.h>
15#include <linux/gfs2_ondisk.h>
16
17#include "gfs2.h"
18#include "incore.h"
19#include "glock.h"
20#include "inode.h"
21#include "log.h"
22#include "lops.h"
23#include "meta_io.h"
24#include "trans.h"
25#include "util.h"
26#include "trace_gfs2.h"
27
28static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
29{
30 fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip);
31 fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
32 tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
33 test_bit(TR_TOUCHED, &tr->tr_flags));
34 fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n",
35 tr->tr_num_buf_new, tr->tr_num_buf_rm,
36 tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
37 tr->tr_num_revoke);
38}
39
40int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
41 unsigned int blocks, unsigned int revokes,
42 unsigned long ip)
43{
44 unsigned int extra_revokes;
45
46 if (current->journal_info) {
47 gfs2_print_trans(sdp, current->journal_info);
48 BUG();
49 }
50 BUG_ON(blocks == 0 && revokes == 0);
51
52 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
53 return -EROFS;
54
55 tr->tr_ip = ip;
56 tr->tr_blocks = blocks;
57 tr->tr_revokes = revokes;
58 tr->tr_reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
59 if (blocks) {
60 /*
61 * The reserved blocks are either used for data or metadata.
62 * We can have mixed data and metadata, each with its own log
63 * descriptor block; see calc_reserved().
64 */
65 tr->tr_reserved += blocks + 1 + DIV_ROUND_UP(blocks - 1, databuf_limit(sdp));
66 }
67 INIT_LIST_HEAD(&tr->tr_databuf);
68 INIT_LIST_HEAD(&tr->tr_buf);
69 INIT_LIST_HEAD(&tr->tr_list);
70 INIT_LIST_HEAD(&tr->tr_ail1_list);
71 INIT_LIST_HEAD(&tr->tr_ail2_list);
72
73 if (gfs2_assert_warn(sdp, tr->tr_reserved <= sdp->sd_jdesc->jd_blocks))
74 return -EINVAL;
75
76 sb_start_intwrite(sdp->sd_vfs);
77
78 /*
79 * Try the reservations under sd_log_flush_lock to prevent log flushes
80 * from creating inconsistencies between the number of allocated and
81 * reserved revokes. If that fails, do a full-block allocation outside
82 * of the lock to avoid stalling log flushes. Then, allot the
83 * appropriate number of blocks to revokes, use as many revokes locally
84 * as needed, and "release" the surplus into the revokes pool.
85 */
86
87 down_read(&sdp->sd_log_flush_lock);
88 if (gfs2_log_try_reserve(sdp, tr, &extra_revokes))
89 goto reserved;
90 up_read(&sdp->sd_log_flush_lock);
91 gfs2_log_reserve(sdp, tr, &extra_revokes);
92 down_read(&sdp->sd_log_flush_lock);
93
94reserved:
95 gfs2_log_release_revokes(sdp, extra_revokes);
96 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
97 gfs2_log_release_revokes(sdp, tr->tr_revokes);
98 up_read(&sdp->sd_log_flush_lock);
99 gfs2_log_release(sdp, tr->tr_reserved);
100 sb_end_intwrite(sdp->sd_vfs);
101 return -EROFS;
102 }
103
104 current->journal_info = tr;
105
106 return 0;
107}
108
109int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
110 unsigned int revokes)
111{
112 struct gfs2_trans *tr;
113 int error;
114
115 tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS);
116 if (!tr)
117 return -ENOMEM;
118 error = __gfs2_trans_begin(tr, sdp, blocks, revokes, _RET_IP_);
119 if (error)
120 kmem_cache_free(gfs2_trans_cachep, tr);
121 return error;
122}
123
124void gfs2_trans_end(struct gfs2_sbd *sdp)
125{
126 struct gfs2_trans *tr = current->journal_info;
127 s64 nbuf;
128
129 current->journal_info = NULL;
130
131 if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
132 gfs2_log_release_revokes(sdp, tr->tr_revokes);
133 up_read(&sdp->sd_log_flush_lock);
134 gfs2_log_release(sdp, tr->tr_reserved);
135 if (!test_bit(TR_ONSTACK, &tr->tr_flags))
136 gfs2_trans_free(sdp, tr);
137 sb_end_intwrite(sdp->sd_vfs);
138 return;
139 }
140
141 gfs2_log_release_revokes(sdp, tr->tr_revokes - tr->tr_num_revoke);
142
143 nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new;
144 nbuf -= tr->tr_num_buf_rm;
145 nbuf -= tr->tr_num_databuf_rm;
146
147 if (gfs2_assert_withdraw(sdp, nbuf <= tr->tr_blocks) ||
148 gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes))
149 gfs2_print_trans(sdp, tr);
150
151 gfs2_log_commit(sdp, tr);
152 if (!test_bit(TR_ONSTACK, &tr->tr_flags) &&
153 !test_bit(TR_ATTACHED, &tr->tr_flags))
154 gfs2_trans_free(sdp, tr);
155 up_read(&sdp->sd_log_flush_lock);
156
157 if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
158 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
159 GFS2_LFC_TRANS_END);
160 sb_end_intwrite(sdp->sd_vfs);
161}
162
163static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
164 struct buffer_head *bh)
165{
166 struct gfs2_bufdata *bd;
167
168 bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
169 bd->bd_bh = bh;
170 bd->bd_gl = gl;
171 INIT_LIST_HEAD(&bd->bd_list);
172 INIT_LIST_HEAD(&bd->bd_ail_st_list);
173 INIT_LIST_HEAD(&bd->bd_ail_gl_list);
174 bh->b_private = bd;
175 return bd;
176}
177
178/**
179 * gfs2_trans_add_data - Add a databuf to the transaction.
180 * @gl: The inode glock associated with the buffer
181 * @bh: The buffer to add
182 *
183 * This is used in journaled data mode.
184 * We need to journal the data block in the same way as metadata in
185 * the functions above. The difference is that here we have a tag
186 * which is two __be64's being the block number (as per meta data)
187 * and a flag which says whether the data block needs escaping or
188 * not. This means we need a new log entry for each 251 or so data
189 * blocks, which isn't an enormous overhead but twice as much as
190 * for normal metadata blocks.
191 */
192void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
193{
194 struct gfs2_trans *tr = current->journal_info;
195 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
196 struct gfs2_bufdata *bd;
197
198 lock_buffer(bh);
199 if (buffer_pinned(bh)) {
200 set_bit(TR_TOUCHED, &tr->tr_flags);
201 goto out;
202 }
203 gfs2_log_lock(sdp);
204 bd = bh->b_private;
205 if (bd == NULL) {
206 gfs2_log_unlock(sdp);
207 unlock_buffer(bh);
208 if (bh->b_private == NULL)
209 bd = gfs2_alloc_bufdata(gl, bh);
210 else
211 bd = bh->b_private;
212 lock_buffer(bh);
213 gfs2_log_lock(sdp);
214 }
215 gfs2_assert(sdp, bd->bd_gl == gl);
216 set_bit(TR_TOUCHED, &tr->tr_flags);
217 if (list_empty(&bd->bd_list)) {
218 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
219 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
220 gfs2_pin(sdp, bd->bd_bh);
221 tr->tr_num_databuf_new++;
222 list_add_tail(&bd->bd_list, &tr->tr_databuf);
223 }
224 gfs2_log_unlock(sdp);
225out:
226 unlock_buffer(bh);
227}
228
229void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
230{
231
232 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
233 struct super_block *sb = sdp->sd_vfs;
234 struct gfs2_bufdata *bd;
235 struct gfs2_meta_header *mh;
236 struct gfs2_trans *tr = current->journal_info;
237 bool withdraw = false;
238
239 lock_buffer(bh);
240 if (buffer_pinned(bh)) {
241 set_bit(TR_TOUCHED, &tr->tr_flags);
242 goto out;
243 }
244 gfs2_log_lock(sdp);
245 bd = bh->b_private;
246 if (bd == NULL) {
247 gfs2_log_unlock(sdp);
248 unlock_buffer(bh);
249 lock_page(bh->b_page);
250 if (bh->b_private == NULL)
251 bd = gfs2_alloc_bufdata(gl, bh);
252 else
253 bd = bh->b_private;
254 unlock_page(bh->b_page);
255 lock_buffer(bh);
256 gfs2_log_lock(sdp);
257 }
258 gfs2_assert(sdp, bd->bd_gl == gl);
259 set_bit(TR_TOUCHED, &tr->tr_flags);
260 if (!list_empty(&bd->bd_list))
261 goto out_unlock;
262 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
263 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
264 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
265 if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
266 fs_err(sdp, "Attempting to add uninitialised block to "
267 "journal (inplace block=%lld)\n",
268 (unsigned long long)bd->bd_bh->b_blocknr);
269 BUG();
270 }
271 if (gfs2_withdrawing_or_withdrawn(sdp)) {
272 fs_info(sdp, "GFS2:adding buf while withdrawn! 0x%llx\n",
273 (unsigned long long)bd->bd_bh->b_blocknr);
274 goto out_unlock;
275 }
276 if (unlikely(sb->s_writers.frozen == SB_FREEZE_COMPLETE)) {
277 fs_info(sdp, "GFS2:adding buf while frozen\n");
278 withdraw = true;
279 goto out_unlock;
280 }
281 gfs2_pin(sdp, bd->bd_bh);
282 mh->__pad0 = cpu_to_be64(0);
283 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
284 list_add(&bd->bd_list, &tr->tr_buf);
285 tr->tr_num_buf_new++;
286out_unlock:
287 gfs2_log_unlock(sdp);
288 if (withdraw)
289 gfs2_assert_withdraw(sdp, 0);
290out:
291 unlock_buffer(bh);
292}
293
294void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
295{
296 struct gfs2_trans *tr = current->journal_info;
297
298 BUG_ON(!list_empty(&bd->bd_list));
299 gfs2_add_revoke(sdp, bd);
300 set_bit(TR_TOUCHED, &tr->tr_flags);
301 tr->tr_num_revoke++;
302}
303
304void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
305{
306 struct gfs2_bufdata *bd, *tmp;
307 unsigned int n = len;
308
309 gfs2_log_lock(sdp);
310 list_for_each_entry_safe(bd, tmp, &sdp->sd_log_revokes, bd_list) {
311 if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) {
312 list_del_init(&bd->bd_list);
313 gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
314 sdp->sd_log_num_revoke--;
315 if (bd->bd_gl)
316 gfs2_glock_remove_revoke(bd->bd_gl);
317 kmem_cache_free(gfs2_bufdata_cachep, bd);
318 gfs2_log_release_revokes(sdp, 1);
319 if (--n == 0)
320 break;
321 }
322 }
323 gfs2_log_unlock(sdp);
324}
325
326void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
327{
328 if (tr == NULL)
329 return;
330
331 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
332 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
333 gfs2_assert_warn(sdp, list_empty(&tr->tr_databuf));
334 gfs2_assert_warn(sdp, list_empty(&tr->tr_buf));
335 kmem_cache_free(gfs2_trans_cachep, tr);
336}