Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011 Red Hat, Inc.
4 *
5 * This file is released under the GPL.
6 */
7#include "dm-transaction-manager.h"
8#include "dm-space-map.h"
9#include "dm-space-map-disk.h"
10#include "dm-space-map-metadata.h"
11#include "dm-persistent-data-internal.h"
12
13#include <linux/export.h>
14#include <linux/mutex.h>
15#include <linux/hash.h>
16#include <linux/slab.h>
17#include <linux/device-mapper.h>
18
19#define DM_MSG_PREFIX "transaction manager"
20
21/*----------------------------------------------------------------*/
22
23#define PREFETCH_SIZE 128
24#define PREFETCH_BITS 7
25#define PREFETCH_SENTINEL ((dm_block_t) -1ULL)
26
27struct prefetch_set {
28 struct mutex lock;
29 dm_block_t blocks[PREFETCH_SIZE];
30};
31
32static unsigned int prefetch_hash(dm_block_t b)
33{
34 return hash_64(b, PREFETCH_BITS);
35}
36
37static void prefetch_wipe(struct prefetch_set *p)
38{
39 unsigned int i;
40
41 for (i = 0; i < PREFETCH_SIZE; i++)
42 p->blocks[i] = PREFETCH_SENTINEL;
43}
44
45static void prefetch_init(struct prefetch_set *p)
46{
47 mutex_init(&p->lock);
48 prefetch_wipe(p);
49}
50
51static void prefetch_add(struct prefetch_set *p, dm_block_t b)
52{
53 unsigned int h = prefetch_hash(b);
54
55 mutex_lock(&p->lock);
56 if (p->blocks[h] == PREFETCH_SENTINEL)
57 p->blocks[h] = b;
58
59 mutex_unlock(&p->lock);
60}
61
62static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
63{
64 unsigned int i;
65
66 mutex_lock(&p->lock);
67
68 for (i = 0; i < PREFETCH_SIZE; i++)
69 if (p->blocks[i] != PREFETCH_SENTINEL) {
70 dm_bm_prefetch(bm, p->blocks[i]);
71 p->blocks[i] = PREFETCH_SENTINEL;
72 }
73
74 mutex_unlock(&p->lock);
75}
76
77/*----------------------------------------------------------------*/
78
79struct shadow_info {
80 struct hlist_node hlist;
81 dm_block_t where;
82};
83
84/*
85 * It would be nice if we scaled with the size of transaction.
86 */
87#define DM_HASH_SIZE 256
88#define DM_HASH_MASK (DM_HASH_SIZE - 1)
89
90struct dm_transaction_manager {
91 int is_clone;
92 struct dm_transaction_manager *real;
93
94 struct dm_block_manager *bm;
95 struct dm_space_map *sm;
96
97 spinlock_t lock;
98 struct hlist_head buckets[DM_HASH_SIZE];
99
100 struct prefetch_set prefetches;
101};
102
103/*----------------------------------------------------------------*/
104
105static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
106{
107 int r = 0;
108 unsigned int bucket = dm_hash_block(b, DM_HASH_MASK);
109 struct shadow_info *si;
110
111 spin_lock(&tm->lock);
112 hlist_for_each_entry(si, tm->buckets + bucket, hlist)
113 if (si->where == b) {
114 r = 1;
115 break;
116 }
117 spin_unlock(&tm->lock);
118
119 return r;
120}
121
122/*
123 * This can silently fail if there's no memory. We're ok with this since
124 * creating redundant shadows causes no harm.
125 */
126static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
127{
128 unsigned int bucket;
129 struct shadow_info *si;
130
131 si = kmalloc(sizeof(*si), GFP_NOIO);
132 if (si) {
133 si->where = b;
134 bucket = dm_hash_block(b, DM_HASH_MASK);
135 spin_lock(&tm->lock);
136 hlist_add_head(&si->hlist, tm->buckets + bucket);
137 spin_unlock(&tm->lock);
138 }
139}
140
141static void wipe_shadow_table(struct dm_transaction_manager *tm)
142{
143 struct shadow_info *si;
144 struct hlist_node *tmp;
145 struct hlist_head *bucket;
146 int i;
147
148 spin_lock(&tm->lock);
149 for (i = 0; i < DM_HASH_SIZE; i++) {
150 bucket = tm->buckets + i;
151 hlist_for_each_entry_safe(si, tmp, bucket, hlist)
152 kfree(si);
153
154 INIT_HLIST_HEAD(bucket);
155 }
156
157 spin_unlock(&tm->lock);
158}
159
160/*----------------------------------------------------------------*/
161
162static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
163 struct dm_space_map *sm)
164{
165 int i;
166 struct dm_transaction_manager *tm;
167
168 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
169 if (!tm)
170 return ERR_PTR(-ENOMEM);
171
172 tm->is_clone = 0;
173 tm->real = NULL;
174 tm->bm = bm;
175 tm->sm = sm;
176
177 spin_lock_init(&tm->lock);
178 for (i = 0; i < DM_HASH_SIZE; i++)
179 INIT_HLIST_HEAD(tm->buckets + i);
180
181 prefetch_init(&tm->prefetches);
182
183 return tm;
184}
185
186struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
187{
188 struct dm_transaction_manager *tm;
189
190 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
191 if (tm) {
192 tm->is_clone = 1;
193 tm->real = real;
194 }
195
196 return tm;
197}
198EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
199
200void dm_tm_destroy(struct dm_transaction_manager *tm)
201{
202 if (!tm)
203 return;
204
205 if (!tm->is_clone)
206 wipe_shadow_table(tm);
207
208 kfree(tm);
209}
210EXPORT_SYMBOL_GPL(dm_tm_destroy);
211
212int dm_tm_pre_commit(struct dm_transaction_manager *tm)
213{
214 int r;
215
216 if (tm->is_clone)
217 return -EWOULDBLOCK;
218
219 r = dm_sm_commit(tm->sm);
220 if (r < 0)
221 return r;
222
223 return dm_bm_flush(tm->bm);
224}
225EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
226
227int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
228{
229 if (tm->is_clone)
230 return -EWOULDBLOCK;
231
232 wipe_shadow_table(tm);
233 dm_bm_unlock(root);
234
235 return dm_bm_flush(tm->bm);
236}
237EXPORT_SYMBOL_GPL(dm_tm_commit);
238
239int dm_tm_new_block(struct dm_transaction_manager *tm,
240 struct dm_block_validator *v,
241 struct dm_block **result)
242{
243 int r;
244 dm_block_t new_block;
245
246 if (tm->is_clone)
247 return -EWOULDBLOCK;
248
249 r = dm_sm_new_block(tm->sm, &new_block);
250 if (r < 0)
251 return r;
252
253 r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
254 if (r < 0) {
255 dm_sm_dec_block(tm->sm, new_block);
256 return r;
257 }
258
259 /*
260 * New blocks count as shadows in that they don't need to be
261 * shadowed again.
262 */
263 insert_shadow(tm, new_block);
264
265 return 0;
266}
267
268static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
269 struct dm_block_validator *v,
270 struct dm_block **result)
271{
272 int r;
273 dm_block_t new;
274 struct dm_block *orig_block;
275
276 r = dm_sm_new_block(tm->sm, &new);
277 if (r < 0)
278 return r;
279
280 r = dm_sm_dec_block(tm->sm, orig);
281 if (r < 0)
282 return r;
283
284 r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
285 if (r < 0)
286 return r;
287
288 /*
289 * It would be tempting to use dm_bm_unlock_move here, but some
290 * code, such as the space maps, keeps using the old data structures
291 * secure in the knowledge they won't be changed until the next
292 * transaction. Using unlock_move would force a synchronous read
293 * since the old block would no longer be in the cache.
294 */
295 r = dm_bm_write_lock_zero(tm->bm, new, v, result);
296 if (r) {
297 dm_bm_unlock(orig_block);
298 return r;
299 }
300
301 memcpy(dm_block_data(*result), dm_block_data(orig_block),
302 dm_bm_block_size(tm->bm));
303
304 dm_bm_unlock(orig_block);
305 return r;
306}
307
308int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
309 struct dm_block_validator *v, struct dm_block **result,
310 int *inc_children)
311{
312 int r;
313
314 if (tm->is_clone)
315 return -EWOULDBLOCK;
316
317 r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
318 if (r < 0)
319 return r;
320
321 if (is_shadow(tm, orig) && !*inc_children)
322 return dm_bm_write_lock(tm->bm, orig, v, result);
323
324 r = __shadow_block(tm, orig, v, result);
325 if (r < 0)
326 return r;
327 insert_shadow(tm, dm_block_location(*result));
328
329 return r;
330}
331EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
332
333int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
334 struct dm_block_validator *v,
335 struct dm_block **blk)
336{
337 if (tm->is_clone) {
338 int r = dm_bm_read_try_lock(tm->real->bm, b, v, blk);
339
340 if (r == -EWOULDBLOCK)
341 prefetch_add(&tm->real->prefetches, b);
342
343 return r;
344 }
345
346 return dm_bm_read_lock(tm->bm, b, v, blk);
347}
348EXPORT_SYMBOL_GPL(dm_tm_read_lock);
349
350void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
351{
352 dm_bm_unlock(b);
353}
354EXPORT_SYMBOL_GPL(dm_tm_unlock);
355
356void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
357{
358 /*
359 * The non-blocking clone doesn't support this.
360 */
361 BUG_ON(tm->is_clone);
362
363 dm_sm_inc_block(tm->sm, b);
364}
365EXPORT_SYMBOL_GPL(dm_tm_inc);
366
367void dm_tm_inc_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
368{
369 /*
370 * The non-blocking clone doesn't support this.
371 */
372 BUG_ON(tm->is_clone);
373
374 dm_sm_inc_blocks(tm->sm, b, e);
375}
376EXPORT_SYMBOL_GPL(dm_tm_inc_range);
377
378void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
379{
380 /*
381 * The non-blocking clone doesn't support this.
382 */
383 BUG_ON(tm->is_clone);
384
385 dm_sm_dec_block(tm->sm, b);
386}
387EXPORT_SYMBOL_GPL(dm_tm_dec);
388
389void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
390{
391 /*
392 * The non-blocking clone doesn't support this.
393 */
394 BUG_ON(tm->is_clone);
395
396 dm_sm_dec_blocks(tm->sm, b, e);
397}
398EXPORT_SYMBOL_GPL(dm_tm_dec_range);
399
400void dm_tm_with_runs(struct dm_transaction_manager *tm,
401 const __le64 *value_le, unsigned int count, dm_tm_run_fn fn)
402{
403 uint64_t b, begin, end;
404 bool in_run = false;
405 unsigned int i;
406
407 for (i = 0; i < count; i++, value_le++) {
408 b = le64_to_cpu(*value_le);
409
410 if (in_run) {
411 if (b == end)
412 end++;
413 else {
414 fn(tm, begin, end);
415 begin = b;
416 end = b + 1;
417 }
418 } else {
419 in_run = true;
420 begin = b;
421 end = b + 1;
422 }
423 }
424
425 if (in_run)
426 fn(tm, begin, end);
427}
428EXPORT_SYMBOL_GPL(dm_tm_with_runs);
429
430int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
431 uint32_t *result)
432{
433 if (tm->is_clone)
434 return -EWOULDBLOCK;
435
436 return dm_sm_get_count(tm->sm, b, result);
437}
438
439int dm_tm_block_is_shared(struct dm_transaction_manager *tm, dm_block_t b,
440 int *result)
441{
442 if (tm->is_clone)
443 return -EWOULDBLOCK;
444
445 return dm_sm_count_is_more_than_one(tm->sm, b, result);
446}
447
448struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
449{
450 return tm->bm;
451}
452
453void dm_tm_issue_prefetches(struct dm_transaction_manager *tm)
454{
455 prefetch_issue(&tm->prefetches, tm->bm);
456}
457EXPORT_SYMBOL_GPL(dm_tm_issue_prefetches);
458
459/*----------------------------------------------------------------*/
460
461static int dm_tm_create_internal(struct dm_block_manager *bm,
462 dm_block_t sb_location,
463 struct dm_transaction_manager **tm,
464 struct dm_space_map **sm,
465 int create,
466 void *sm_root, size_t sm_len)
467{
468 int r;
469
470 *sm = dm_sm_metadata_init();
471 if (IS_ERR(*sm))
472 return PTR_ERR(*sm);
473
474 *tm = dm_tm_create(bm, *sm);
475 if (IS_ERR(*tm)) {
476 dm_sm_destroy(*sm);
477 return PTR_ERR(*tm);
478 }
479
480 if (create) {
481 r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
482 sb_location);
483 if (r) {
484 DMERR("couldn't create metadata space map");
485 goto bad;
486 }
487
488 } else {
489 r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len);
490 if (r) {
491 DMERR("couldn't open metadata space map");
492 goto bad;
493 }
494 }
495
496 return 0;
497
498bad:
499 dm_tm_destroy(*tm);
500 dm_sm_destroy(*sm);
501 return r;
502}
503
504int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
505 struct dm_transaction_manager **tm,
506 struct dm_space_map **sm)
507{
508 return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0);
509}
510EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
511
512int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
513 void *sm_root, size_t root_len,
514 struct dm_transaction_manager **tm,
515 struct dm_space_map **sm)
516{
517 return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len);
518}
519EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
520
521/*----------------------------------------------------------------*/
1/*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6#include "dm-transaction-manager.h"
7#include "dm-space-map.h"
8#include "dm-space-map-disk.h"
9#include "dm-space-map-metadata.h"
10#include "dm-persistent-data-internal.h"
11
12#include <linux/export.h>
13#include <linux/mutex.h>
14#include <linux/hash.h>
15#include <linux/slab.h>
16#include <linux/device-mapper.h>
17
18#define DM_MSG_PREFIX "transaction manager"
19
20/*----------------------------------------------------------------*/
21
22#define PREFETCH_SIZE 128
23#define PREFETCH_BITS 7
24#define PREFETCH_SENTINEL ((dm_block_t) -1ULL)
25
26struct prefetch_set {
27 struct mutex lock;
28 dm_block_t blocks[PREFETCH_SIZE];
29};
30
31static unsigned prefetch_hash(dm_block_t b)
32{
33 return hash_64(b, PREFETCH_BITS);
34}
35
36static void prefetch_wipe(struct prefetch_set *p)
37{
38 unsigned i;
39 for (i = 0; i < PREFETCH_SIZE; i++)
40 p->blocks[i] = PREFETCH_SENTINEL;
41}
42
43static void prefetch_init(struct prefetch_set *p)
44{
45 mutex_init(&p->lock);
46 prefetch_wipe(p);
47}
48
49static void prefetch_add(struct prefetch_set *p, dm_block_t b)
50{
51 unsigned h = prefetch_hash(b);
52
53 mutex_lock(&p->lock);
54 if (p->blocks[h] == PREFETCH_SENTINEL)
55 p->blocks[h] = b;
56
57 mutex_unlock(&p->lock);
58}
59
60static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
61{
62 unsigned i;
63
64 mutex_lock(&p->lock);
65
66 for (i = 0; i < PREFETCH_SIZE; i++)
67 if (p->blocks[i] != PREFETCH_SENTINEL) {
68 dm_bm_prefetch(bm, p->blocks[i]);
69 p->blocks[i] = PREFETCH_SENTINEL;
70 }
71
72 mutex_unlock(&p->lock);
73}
74
75/*----------------------------------------------------------------*/
76
77struct shadow_info {
78 struct hlist_node hlist;
79 dm_block_t where;
80};
81
82/*
83 * It would be nice if we scaled with the size of transaction.
84 */
85#define DM_HASH_SIZE 256
86#define DM_HASH_MASK (DM_HASH_SIZE - 1)
87
88struct dm_transaction_manager {
89 int is_clone;
90 struct dm_transaction_manager *real;
91
92 struct dm_block_manager *bm;
93 struct dm_space_map *sm;
94
95 spinlock_t lock;
96 struct hlist_head buckets[DM_HASH_SIZE];
97
98 struct prefetch_set prefetches;
99};
100
101/*----------------------------------------------------------------*/
102
103static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
104{
105 int r = 0;
106 unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
107 struct shadow_info *si;
108
109 spin_lock(&tm->lock);
110 hlist_for_each_entry(si, tm->buckets + bucket, hlist)
111 if (si->where == b) {
112 r = 1;
113 break;
114 }
115 spin_unlock(&tm->lock);
116
117 return r;
118}
119
120/*
121 * This can silently fail if there's no memory. We're ok with this since
122 * creating redundant shadows causes no harm.
123 */
124static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
125{
126 unsigned bucket;
127 struct shadow_info *si;
128
129 si = kmalloc(sizeof(*si), GFP_NOIO);
130 if (si) {
131 si->where = b;
132 bucket = dm_hash_block(b, DM_HASH_MASK);
133 spin_lock(&tm->lock);
134 hlist_add_head(&si->hlist, tm->buckets + bucket);
135 spin_unlock(&tm->lock);
136 }
137}
138
139static void wipe_shadow_table(struct dm_transaction_manager *tm)
140{
141 struct shadow_info *si;
142 struct hlist_node *tmp;
143 struct hlist_head *bucket;
144 int i;
145
146 spin_lock(&tm->lock);
147 for (i = 0; i < DM_HASH_SIZE; i++) {
148 bucket = tm->buckets + i;
149 hlist_for_each_entry_safe(si, tmp, bucket, hlist)
150 kfree(si);
151
152 INIT_HLIST_HEAD(bucket);
153 }
154
155 spin_unlock(&tm->lock);
156}
157
158/*----------------------------------------------------------------*/
159
160static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
161 struct dm_space_map *sm)
162{
163 int i;
164 struct dm_transaction_manager *tm;
165
166 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
167 if (!tm)
168 return ERR_PTR(-ENOMEM);
169
170 tm->is_clone = 0;
171 tm->real = NULL;
172 tm->bm = bm;
173 tm->sm = sm;
174
175 spin_lock_init(&tm->lock);
176 for (i = 0; i < DM_HASH_SIZE; i++)
177 INIT_HLIST_HEAD(tm->buckets + i);
178
179 prefetch_init(&tm->prefetches);
180
181 return tm;
182}
183
184struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
185{
186 struct dm_transaction_manager *tm;
187
188 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
189 if (tm) {
190 tm->is_clone = 1;
191 tm->real = real;
192 }
193
194 return tm;
195}
196EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
197
198void dm_tm_destroy(struct dm_transaction_manager *tm)
199{
200 if (!tm->is_clone)
201 wipe_shadow_table(tm);
202
203 kfree(tm);
204}
205EXPORT_SYMBOL_GPL(dm_tm_destroy);
206
207int dm_tm_pre_commit(struct dm_transaction_manager *tm)
208{
209 int r;
210
211 if (tm->is_clone)
212 return -EWOULDBLOCK;
213
214 r = dm_sm_commit(tm->sm);
215 if (r < 0)
216 return r;
217
218 return dm_bm_flush(tm->bm);
219}
220EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
221
222int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
223{
224 if (tm->is_clone)
225 return -EWOULDBLOCK;
226
227 wipe_shadow_table(tm);
228 dm_bm_unlock(root);
229
230 return dm_bm_flush(tm->bm);
231}
232EXPORT_SYMBOL_GPL(dm_tm_commit);
233
234int dm_tm_new_block(struct dm_transaction_manager *tm,
235 struct dm_block_validator *v,
236 struct dm_block **result)
237{
238 int r;
239 dm_block_t new_block;
240
241 if (tm->is_clone)
242 return -EWOULDBLOCK;
243
244 r = dm_sm_new_block(tm->sm, &new_block);
245 if (r < 0)
246 return r;
247
248 r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
249 if (r < 0) {
250 dm_sm_dec_block(tm->sm, new_block);
251 return r;
252 }
253
254 /*
255 * New blocks count as shadows in that they don't need to be
256 * shadowed again.
257 */
258 insert_shadow(tm, new_block);
259
260 return 0;
261}
262
263static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
264 struct dm_block_validator *v,
265 struct dm_block **result)
266{
267 int r;
268 dm_block_t new;
269 struct dm_block *orig_block;
270
271 r = dm_sm_new_block(tm->sm, &new);
272 if (r < 0)
273 return r;
274
275 r = dm_sm_dec_block(tm->sm, orig);
276 if (r < 0)
277 return r;
278
279 r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
280 if (r < 0)
281 return r;
282
283 /*
284 * It would be tempting to use dm_bm_unlock_move here, but some
285 * code, such as the space maps, keeps using the old data structures
286 * secure in the knowledge they won't be changed until the next
287 * transaction. Using unlock_move would force a synchronous read
288 * since the old block would no longer be in the cache.
289 */
290 r = dm_bm_write_lock_zero(tm->bm, new, v, result);
291 if (r) {
292 dm_bm_unlock(orig_block);
293 return r;
294 }
295
296 memcpy(dm_block_data(*result), dm_block_data(orig_block),
297 dm_bm_block_size(tm->bm));
298
299 dm_bm_unlock(orig_block);
300 return r;
301}
302
303int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
304 struct dm_block_validator *v, struct dm_block **result,
305 int *inc_children)
306{
307 int r;
308
309 if (tm->is_clone)
310 return -EWOULDBLOCK;
311
312 r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
313 if (r < 0)
314 return r;
315
316 if (is_shadow(tm, orig) && !*inc_children)
317 return dm_bm_write_lock(tm->bm, orig, v, result);
318
319 r = __shadow_block(tm, orig, v, result);
320 if (r < 0)
321 return r;
322 insert_shadow(tm, dm_block_location(*result));
323
324 return r;
325}
326EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
327
328int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
329 struct dm_block_validator *v,
330 struct dm_block **blk)
331{
332 if (tm->is_clone) {
333 int r = dm_bm_read_try_lock(tm->real->bm, b, v, blk);
334
335 if (r == -EWOULDBLOCK)
336 prefetch_add(&tm->real->prefetches, b);
337
338 return r;
339 }
340
341 return dm_bm_read_lock(tm->bm, b, v, blk);
342}
343EXPORT_SYMBOL_GPL(dm_tm_read_lock);
344
345void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
346{
347 dm_bm_unlock(b);
348}
349EXPORT_SYMBOL_GPL(dm_tm_unlock);
350
351void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
352{
353 /*
354 * The non-blocking clone doesn't support this.
355 */
356 BUG_ON(tm->is_clone);
357
358 dm_sm_inc_block(tm->sm, b);
359}
360EXPORT_SYMBOL_GPL(dm_tm_inc);
361
362void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
363{
364 /*
365 * The non-blocking clone doesn't support this.
366 */
367 BUG_ON(tm->is_clone);
368
369 dm_sm_dec_block(tm->sm, b);
370}
371EXPORT_SYMBOL_GPL(dm_tm_dec);
372
373int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
374 uint32_t *result)
375{
376 if (tm->is_clone)
377 return -EWOULDBLOCK;
378
379 return dm_sm_get_count(tm->sm, b, result);
380}
381
382struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
383{
384 return tm->bm;
385}
386
387void dm_tm_issue_prefetches(struct dm_transaction_manager *tm)
388{
389 prefetch_issue(&tm->prefetches, tm->bm);
390}
391EXPORT_SYMBOL_GPL(dm_tm_issue_prefetches);
392
393/*----------------------------------------------------------------*/
394
395static int dm_tm_create_internal(struct dm_block_manager *bm,
396 dm_block_t sb_location,
397 struct dm_transaction_manager **tm,
398 struct dm_space_map **sm,
399 int create,
400 void *sm_root, size_t sm_len)
401{
402 int r;
403
404 *sm = dm_sm_metadata_init();
405 if (IS_ERR(*sm))
406 return PTR_ERR(*sm);
407
408 *tm = dm_tm_create(bm, *sm);
409 if (IS_ERR(*tm)) {
410 dm_sm_destroy(*sm);
411 return PTR_ERR(*tm);
412 }
413
414 if (create) {
415 r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
416 sb_location);
417 if (r) {
418 DMERR("couldn't create metadata space map");
419 goto bad;
420 }
421
422 } else {
423 r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len);
424 if (r) {
425 DMERR("couldn't open metadata space map");
426 goto bad;
427 }
428 }
429
430 return 0;
431
432bad:
433 dm_tm_destroy(*tm);
434 dm_sm_destroy(*sm);
435 return r;
436}
437
438int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
439 struct dm_transaction_manager **tm,
440 struct dm_space_map **sm)
441{
442 return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0);
443}
444EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
445
446int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
447 void *sm_root, size_t root_len,
448 struct dm_transaction_manager **tm,
449 struct dm_space_map **sm)
450{
451 return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len);
452}
453EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
454
455/*----------------------------------------------------------------*/