Loading...
1/*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6#include "dm-block-manager.h"
7#include "dm-persistent-data-internal.h"
8
9#include <linux/dm-bufio.h>
10#include <linux/crc32c.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/rwsem.h>
14#include <linux/device-mapper.h>
15#include <linux/stacktrace.h>
16#include <linux/sched/task.h>
17
18#define DM_MSG_PREFIX "block manager"
19
20/*----------------------------------------------------------------*/
21
22#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
23
24/*
25 * This is a read/write semaphore with a couple of differences.
26 *
27 * i) There is a restriction on the number of concurrent read locks that
28 * may be held at once. This is just an implementation detail.
29 *
30 * ii) Recursive locking attempts are detected and return EINVAL. A stack
31 * trace is also emitted for the previous lock acquisition.
32 *
33 * iii) Priority is given to write locks.
34 */
35#define MAX_HOLDERS 4
36#define MAX_STACK 10
37
38struct stack_store {
39 unsigned int nr_entries;
40 unsigned long entries[MAX_STACK];
41};
42
43struct block_lock {
44 spinlock_t lock;
45 __s32 count;
46 struct list_head waiters;
47 struct task_struct *holders[MAX_HOLDERS];
48
49#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
50 struct stack_store traces[MAX_HOLDERS];
51#endif
52};
53
54struct waiter {
55 struct list_head list;
56 struct task_struct *task;
57 int wants_write;
58};
59
60static unsigned __find_holder(struct block_lock *lock,
61 struct task_struct *task)
62{
63 unsigned i;
64
65 for (i = 0; i < MAX_HOLDERS; i++)
66 if (lock->holders[i] == task)
67 break;
68
69 BUG_ON(i == MAX_HOLDERS);
70 return i;
71}
72
73/* call this *after* you increment lock->count */
74static void __add_holder(struct block_lock *lock, struct task_struct *task)
75{
76 unsigned h = __find_holder(lock, NULL);
77#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
78 struct stack_store *t;
79#endif
80
81 get_task_struct(task);
82 lock->holders[h] = task;
83
84#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
85 t = lock->traces + h;
86 t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2);
87#endif
88}
89
90/* call this *before* you decrement lock->count */
91static void __del_holder(struct block_lock *lock, struct task_struct *task)
92{
93 unsigned h = __find_holder(lock, task);
94 lock->holders[h] = NULL;
95 put_task_struct(task);
96}
97
98static int __check_holder(struct block_lock *lock)
99{
100 unsigned i;
101
102 for (i = 0; i < MAX_HOLDERS; i++) {
103 if (lock->holders[i] == current) {
104 DMERR("recursive lock detected in metadata");
105#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
106 DMERR("previously held here:");
107 stack_trace_print(lock->traces[i].entries,
108 lock->traces[i].nr_entries, 4);
109
110 DMERR("subsequent acquisition attempted here:");
111 dump_stack();
112#endif
113 return -EINVAL;
114 }
115 }
116
117 return 0;
118}
119
120static void __wait(struct waiter *w)
121{
122 for (;;) {
123 set_current_state(TASK_UNINTERRUPTIBLE);
124
125 if (!w->task)
126 break;
127
128 schedule();
129 }
130
131 set_current_state(TASK_RUNNING);
132}
133
134static void __wake_waiter(struct waiter *w)
135{
136 struct task_struct *task;
137
138 list_del(&w->list);
139 task = w->task;
140 smp_mb();
141 w->task = NULL;
142 wake_up_process(task);
143}
144
145/*
146 * We either wake a few readers or a single writer.
147 */
148static void __wake_many(struct block_lock *lock)
149{
150 struct waiter *w, *tmp;
151
152 BUG_ON(lock->count < 0);
153 list_for_each_entry_safe(w, tmp, &lock->waiters, list) {
154 if (lock->count >= MAX_HOLDERS)
155 return;
156
157 if (w->wants_write) {
158 if (lock->count > 0)
159 return; /* still read locked */
160
161 lock->count = -1;
162 __add_holder(lock, w->task);
163 __wake_waiter(w);
164 return;
165 }
166
167 lock->count++;
168 __add_holder(lock, w->task);
169 __wake_waiter(w);
170 }
171}
172
173static void bl_init(struct block_lock *lock)
174{
175 int i;
176
177 spin_lock_init(&lock->lock);
178 lock->count = 0;
179 INIT_LIST_HEAD(&lock->waiters);
180 for (i = 0; i < MAX_HOLDERS; i++)
181 lock->holders[i] = NULL;
182}
183
184static int __available_for_read(struct block_lock *lock)
185{
186 return lock->count >= 0 &&
187 lock->count < MAX_HOLDERS &&
188 list_empty(&lock->waiters);
189}
190
191static int bl_down_read(struct block_lock *lock)
192{
193 int r;
194 struct waiter w;
195
196 spin_lock(&lock->lock);
197 r = __check_holder(lock);
198 if (r) {
199 spin_unlock(&lock->lock);
200 return r;
201 }
202
203 if (__available_for_read(lock)) {
204 lock->count++;
205 __add_holder(lock, current);
206 spin_unlock(&lock->lock);
207 return 0;
208 }
209
210 get_task_struct(current);
211
212 w.task = current;
213 w.wants_write = 0;
214 list_add_tail(&w.list, &lock->waiters);
215 spin_unlock(&lock->lock);
216
217 __wait(&w);
218 put_task_struct(current);
219 return 0;
220}
221
222static int bl_down_read_nonblock(struct block_lock *lock)
223{
224 int r;
225
226 spin_lock(&lock->lock);
227 r = __check_holder(lock);
228 if (r)
229 goto out;
230
231 if (__available_for_read(lock)) {
232 lock->count++;
233 __add_holder(lock, current);
234 r = 0;
235 } else
236 r = -EWOULDBLOCK;
237
238out:
239 spin_unlock(&lock->lock);
240 return r;
241}
242
243static void bl_up_read(struct block_lock *lock)
244{
245 spin_lock(&lock->lock);
246 BUG_ON(lock->count <= 0);
247 __del_holder(lock, current);
248 --lock->count;
249 if (!list_empty(&lock->waiters))
250 __wake_many(lock);
251 spin_unlock(&lock->lock);
252}
253
254static int bl_down_write(struct block_lock *lock)
255{
256 int r;
257 struct waiter w;
258
259 spin_lock(&lock->lock);
260 r = __check_holder(lock);
261 if (r) {
262 spin_unlock(&lock->lock);
263 return r;
264 }
265
266 if (lock->count == 0 && list_empty(&lock->waiters)) {
267 lock->count = -1;
268 __add_holder(lock, current);
269 spin_unlock(&lock->lock);
270 return 0;
271 }
272
273 get_task_struct(current);
274 w.task = current;
275 w.wants_write = 1;
276
277 /*
278 * Writers given priority. We know there's only one mutator in the
279 * system, so ignoring the ordering reversal.
280 */
281 list_add(&w.list, &lock->waiters);
282 spin_unlock(&lock->lock);
283
284 __wait(&w);
285 put_task_struct(current);
286
287 return 0;
288}
289
290static void bl_up_write(struct block_lock *lock)
291{
292 spin_lock(&lock->lock);
293 __del_holder(lock, current);
294 lock->count = 0;
295 if (!list_empty(&lock->waiters))
296 __wake_many(lock);
297 spin_unlock(&lock->lock);
298}
299
300static void report_recursive_bug(dm_block_t b, int r)
301{
302 if (r == -EINVAL)
303 DMERR("recursive acquisition of block %llu requested.",
304 (unsigned long long) b);
305}
306
307#else /* !CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
308
309#define bl_init(x) do { } while (0)
310#define bl_down_read(x) 0
311#define bl_down_read_nonblock(x) 0
312#define bl_up_read(x) do { } while (0)
313#define bl_down_write(x) 0
314#define bl_up_write(x) do { } while (0)
315#define report_recursive_bug(x, y) do { } while (0)
316
317#endif /* CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
318
319/*----------------------------------------------------------------*/
320
321/*
322 * Block manager is currently implemented using dm-bufio. struct
323 * dm_block_manager and struct dm_block map directly onto a couple of
324 * structs in the bufio interface. I want to retain the freedom to move
325 * away from bufio in the future. So these structs are just cast within
326 * this .c file, rather than making it through to the public interface.
327 */
328static struct dm_buffer *to_buffer(struct dm_block *b)
329{
330 return (struct dm_buffer *) b;
331}
332
333dm_block_t dm_block_location(struct dm_block *b)
334{
335 return dm_bufio_get_block_number(to_buffer(b));
336}
337EXPORT_SYMBOL_GPL(dm_block_location);
338
339void *dm_block_data(struct dm_block *b)
340{
341 return dm_bufio_get_block_data(to_buffer(b));
342}
343EXPORT_SYMBOL_GPL(dm_block_data);
344
345struct buffer_aux {
346 struct dm_block_validator *validator;
347 int write_locked;
348
349#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
350 struct block_lock lock;
351#endif
352};
353
354static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
355{
356 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
357 aux->validator = NULL;
358 bl_init(&aux->lock);
359}
360
361static void dm_block_manager_write_callback(struct dm_buffer *buf)
362{
363 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
364 if (aux->validator) {
365 aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
366 dm_bufio_get_block_size(dm_bufio_get_client(buf)));
367 }
368}
369
370/*----------------------------------------------------------------
371 * Public interface
372 *--------------------------------------------------------------*/
373struct dm_block_manager {
374 struct dm_bufio_client *bufio;
375 bool read_only:1;
376};
377
378struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
379 unsigned block_size,
380 unsigned max_held_per_thread)
381{
382 int r;
383 struct dm_block_manager *bm;
384
385 bm = kmalloc(sizeof(*bm), GFP_KERNEL);
386 if (!bm) {
387 r = -ENOMEM;
388 goto bad;
389 }
390
391 bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
392 sizeof(struct buffer_aux),
393 dm_block_manager_alloc_callback,
394 dm_block_manager_write_callback,
395 0);
396 if (IS_ERR(bm->bufio)) {
397 r = PTR_ERR(bm->bufio);
398 kfree(bm);
399 goto bad;
400 }
401
402 bm->read_only = false;
403
404 return bm;
405
406bad:
407 return ERR_PTR(r);
408}
409EXPORT_SYMBOL_GPL(dm_block_manager_create);
410
411void dm_block_manager_destroy(struct dm_block_manager *bm)
412{
413 dm_bufio_client_destroy(bm->bufio);
414 kfree(bm);
415}
416EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
417
418unsigned dm_bm_block_size(struct dm_block_manager *bm)
419{
420 return dm_bufio_get_block_size(bm->bufio);
421}
422EXPORT_SYMBOL_GPL(dm_bm_block_size);
423
424dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm)
425{
426 return dm_bufio_get_device_size(bm->bufio);
427}
428
429static int dm_bm_validate_buffer(struct dm_block_manager *bm,
430 struct dm_buffer *buf,
431 struct buffer_aux *aux,
432 struct dm_block_validator *v)
433{
434 if (unlikely(!aux->validator)) {
435 int r;
436 if (!v)
437 return 0;
438 r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
439 if (unlikely(r)) {
440 DMERR_LIMIT("%s validator check failed for block %llu", v->name,
441 (unsigned long long) dm_bufio_get_block_number(buf));
442 return r;
443 }
444 aux->validator = v;
445 } else {
446 if (unlikely(aux->validator != v)) {
447 DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu",
448 aux->validator->name, v ? v->name : "NULL",
449 (unsigned long long) dm_bufio_get_block_number(buf));
450 return -EINVAL;
451 }
452 }
453
454 return 0;
455}
456int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
457 struct dm_block_validator *v,
458 struct dm_block **result)
459{
460 struct buffer_aux *aux;
461 void *p;
462 int r;
463
464 p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
465 if (IS_ERR(p))
466 return PTR_ERR(p);
467
468 aux = dm_bufio_get_aux_data(to_buffer(*result));
469 r = bl_down_read(&aux->lock);
470 if (unlikely(r)) {
471 dm_bufio_release(to_buffer(*result));
472 report_recursive_bug(b, r);
473 return r;
474 }
475
476 aux->write_locked = 0;
477
478 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
479 if (unlikely(r)) {
480 bl_up_read(&aux->lock);
481 dm_bufio_release(to_buffer(*result));
482 return r;
483 }
484
485 return 0;
486}
487EXPORT_SYMBOL_GPL(dm_bm_read_lock);
488
489int dm_bm_write_lock(struct dm_block_manager *bm,
490 dm_block_t b, struct dm_block_validator *v,
491 struct dm_block **result)
492{
493 struct buffer_aux *aux;
494 void *p;
495 int r;
496
497 if (dm_bm_is_read_only(bm))
498 return -EPERM;
499
500 p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
501 if (IS_ERR(p))
502 return PTR_ERR(p);
503
504 aux = dm_bufio_get_aux_data(to_buffer(*result));
505 r = bl_down_write(&aux->lock);
506 if (r) {
507 dm_bufio_release(to_buffer(*result));
508 report_recursive_bug(b, r);
509 return r;
510 }
511
512 aux->write_locked = 1;
513
514 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
515 if (unlikely(r)) {
516 bl_up_write(&aux->lock);
517 dm_bufio_release(to_buffer(*result));
518 return r;
519 }
520
521 return 0;
522}
523EXPORT_SYMBOL_GPL(dm_bm_write_lock);
524
525int dm_bm_read_try_lock(struct dm_block_manager *bm,
526 dm_block_t b, struct dm_block_validator *v,
527 struct dm_block **result)
528{
529 struct buffer_aux *aux;
530 void *p;
531 int r;
532
533 p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
534 if (IS_ERR(p))
535 return PTR_ERR(p);
536 if (unlikely(!p))
537 return -EWOULDBLOCK;
538
539 aux = dm_bufio_get_aux_data(to_buffer(*result));
540 r = bl_down_read_nonblock(&aux->lock);
541 if (r < 0) {
542 dm_bufio_release(to_buffer(*result));
543 report_recursive_bug(b, r);
544 return r;
545 }
546 aux->write_locked = 0;
547
548 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
549 if (unlikely(r)) {
550 bl_up_read(&aux->lock);
551 dm_bufio_release(to_buffer(*result));
552 return r;
553 }
554
555 return 0;
556}
557
558int dm_bm_write_lock_zero(struct dm_block_manager *bm,
559 dm_block_t b, struct dm_block_validator *v,
560 struct dm_block **result)
561{
562 int r;
563 struct buffer_aux *aux;
564 void *p;
565
566 if (dm_bm_is_read_only(bm))
567 return -EPERM;
568
569 p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
570 if (IS_ERR(p))
571 return PTR_ERR(p);
572
573 memset(p, 0, dm_bm_block_size(bm));
574
575 aux = dm_bufio_get_aux_data(to_buffer(*result));
576 r = bl_down_write(&aux->lock);
577 if (r) {
578 dm_bufio_release(to_buffer(*result));
579 return r;
580 }
581
582 aux->write_locked = 1;
583 aux->validator = v;
584
585 return 0;
586}
587EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
588
589void dm_bm_unlock(struct dm_block *b)
590{
591 struct buffer_aux *aux;
592 aux = dm_bufio_get_aux_data(to_buffer(b));
593
594 if (aux->write_locked) {
595 dm_bufio_mark_buffer_dirty(to_buffer(b));
596 bl_up_write(&aux->lock);
597 } else
598 bl_up_read(&aux->lock);
599
600 dm_bufio_release(to_buffer(b));
601}
602EXPORT_SYMBOL_GPL(dm_bm_unlock);
603
604int dm_bm_flush(struct dm_block_manager *bm)
605{
606 if (dm_bm_is_read_only(bm))
607 return -EPERM;
608
609 return dm_bufio_write_dirty_buffers(bm->bufio);
610}
611EXPORT_SYMBOL_GPL(dm_bm_flush);
612
613void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
614{
615 dm_bufio_prefetch(bm->bufio, b, 1);
616}
617
618bool dm_bm_is_read_only(struct dm_block_manager *bm)
619{
620 return (bm ? bm->read_only : true);
621}
622EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
623
624void dm_bm_set_read_only(struct dm_block_manager *bm)
625{
626 if (bm)
627 bm->read_only = true;
628}
629EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
630
631void dm_bm_set_read_write(struct dm_block_manager *bm)
632{
633 if (bm)
634 bm->read_only = false;
635}
636EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
637
638u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
639{
640 return crc32c(~(u32) 0, data, len) ^ init_xor;
641}
642EXPORT_SYMBOL_GPL(dm_bm_checksum);
643
644/*----------------------------------------------------------------*/
645
646MODULE_LICENSE("GPL");
647MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
648MODULE_DESCRIPTION("Immutable metadata library for dm");
649
650/*----------------------------------------------------------------*/
1/*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6#include "dm-block-manager.h"
7#include "dm-persistent-data-internal.h"
8#include "../dm-bufio.h"
9
10#include <linux/crc32c.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/rwsem.h>
14#include <linux/device-mapper.h>
15#include <linux/stacktrace.h>
16
17#define DM_MSG_PREFIX "block manager"
18
19/*----------------------------------------------------------------*/
20
21/*
22 * This is a read/write semaphore with a couple of differences.
23 *
24 * i) There is a restriction on the number of concurrent read locks that
25 * may be held at once. This is just an implementation detail.
26 *
27 * ii) Recursive locking attempts are detected and return EINVAL. A stack
28 * trace is also emitted for the previous lock acquisition.
29 *
30 * iii) Priority is given to write locks.
31 */
32#define MAX_HOLDERS 4
33#define MAX_STACK 10
34
35typedef unsigned long stack_entries[MAX_STACK];
36
37struct block_lock {
38 spinlock_t lock;
39 __s32 count;
40 struct list_head waiters;
41 struct task_struct *holders[MAX_HOLDERS];
42
43#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
44 struct stack_trace traces[MAX_HOLDERS];
45 stack_entries entries[MAX_HOLDERS];
46#endif
47};
48
49struct waiter {
50 struct list_head list;
51 struct task_struct *task;
52 int wants_write;
53};
54
55static unsigned __find_holder(struct block_lock *lock,
56 struct task_struct *task)
57{
58 unsigned i;
59
60 for (i = 0; i < MAX_HOLDERS; i++)
61 if (lock->holders[i] == task)
62 break;
63
64 BUG_ON(i == MAX_HOLDERS);
65 return i;
66}
67
68/* call this *after* you increment lock->count */
69static void __add_holder(struct block_lock *lock, struct task_struct *task)
70{
71 unsigned h = __find_holder(lock, NULL);
72#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
73 struct stack_trace *t;
74#endif
75
76 get_task_struct(task);
77 lock->holders[h] = task;
78
79#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
80 t = lock->traces + h;
81 t->nr_entries = 0;
82 t->max_entries = MAX_STACK;
83 t->entries = lock->entries[h];
84 t->skip = 2;
85 save_stack_trace(t);
86#endif
87}
88
89/* call this *before* you decrement lock->count */
90static void __del_holder(struct block_lock *lock, struct task_struct *task)
91{
92 unsigned h = __find_holder(lock, task);
93 lock->holders[h] = NULL;
94 put_task_struct(task);
95}
96
97static int __check_holder(struct block_lock *lock)
98{
99 unsigned i;
100
101 for (i = 0; i < MAX_HOLDERS; i++) {
102 if (lock->holders[i] == current) {
103 DMERR("recursive lock detected in metadata");
104#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
105 DMERR("previously held here:");
106 print_stack_trace(lock->traces + i, 4);
107
108 DMERR("subsequent acquisition attempted here:");
109 dump_stack();
110#endif
111 return -EINVAL;
112 }
113 }
114
115 return 0;
116}
117
118static void __wait(struct waiter *w)
119{
120 for (;;) {
121 set_task_state(current, TASK_UNINTERRUPTIBLE);
122
123 if (!w->task)
124 break;
125
126 schedule();
127 }
128
129 set_task_state(current, TASK_RUNNING);
130}
131
132static void __wake_waiter(struct waiter *w)
133{
134 struct task_struct *task;
135
136 list_del(&w->list);
137 task = w->task;
138 smp_mb();
139 w->task = NULL;
140 wake_up_process(task);
141}
142
143/*
144 * We either wake a few readers or a single writer.
145 */
146static void __wake_many(struct block_lock *lock)
147{
148 struct waiter *w, *tmp;
149
150 BUG_ON(lock->count < 0);
151 list_for_each_entry_safe(w, tmp, &lock->waiters, list) {
152 if (lock->count >= MAX_HOLDERS)
153 return;
154
155 if (w->wants_write) {
156 if (lock->count > 0)
157 return; /* still read locked */
158
159 lock->count = -1;
160 __add_holder(lock, w->task);
161 __wake_waiter(w);
162 return;
163 }
164
165 lock->count++;
166 __add_holder(lock, w->task);
167 __wake_waiter(w);
168 }
169}
170
171static void bl_init(struct block_lock *lock)
172{
173 int i;
174
175 spin_lock_init(&lock->lock);
176 lock->count = 0;
177 INIT_LIST_HEAD(&lock->waiters);
178 for (i = 0; i < MAX_HOLDERS; i++)
179 lock->holders[i] = NULL;
180}
181
182static int __available_for_read(struct block_lock *lock)
183{
184 return lock->count >= 0 &&
185 lock->count < MAX_HOLDERS &&
186 list_empty(&lock->waiters);
187}
188
189static int bl_down_read(struct block_lock *lock)
190{
191 int r;
192 struct waiter w;
193
194 spin_lock(&lock->lock);
195 r = __check_holder(lock);
196 if (r) {
197 spin_unlock(&lock->lock);
198 return r;
199 }
200
201 if (__available_for_read(lock)) {
202 lock->count++;
203 __add_holder(lock, current);
204 spin_unlock(&lock->lock);
205 return 0;
206 }
207
208 get_task_struct(current);
209
210 w.task = current;
211 w.wants_write = 0;
212 list_add_tail(&w.list, &lock->waiters);
213 spin_unlock(&lock->lock);
214
215 __wait(&w);
216 put_task_struct(current);
217 return 0;
218}
219
220static int bl_down_read_nonblock(struct block_lock *lock)
221{
222 int r;
223
224 spin_lock(&lock->lock);
225 r = __check_holder(lock);
226 if (r)
227 goto out;
228
229 if (__available_for_read(lock)) {
230 lock->count++;
231 __add_holder(lock, current);
232 r = 0;
233 } else
234 r = -EWOULDBLOCK;
235
236out:
237 spin_unlock(&lock->lock);
238 return r;
239}
240
241static void bl_up_read(struct block_lock *lock)
242{
243 spin_lock(&lock->lock);
244 BUG_ON(lock->count <= 0);
245 __del_holder(lock, current);
246 --lock->count;
247 if (!list_empty(&lock->waiters))
248 __wake_many(lock);
249 spin_unlock(&lock->lock);
250}
251
252static int bl_down_write(struct block_lock *lock)
253{
254 int r;
255 struct waiter w;
256
257 spin_lock(&lock->lock);
258 r = __check_holder(lock);
259 if (r) {
260 spin_unlock(&lock->lock);
261 return r;
262 }
263
264 if (lock->count == 0 && list_empty(&lock->waiters)) {
265 lock->count = -1;
266 __add_holder(lock, current);
267 spin_unlock(&lock->lock);
268 return 0;
269 }
270
271 get_task_struct(current);
272 w.task = current;
273 w.wants_write = 1;
274
275 /*
276 * Writers given priority. We know there's only one mutator in the
277 * system, so ignoring the ordering reversal.
278 */
279 list_add(&w.list, &lock->waiters);
280 spin_unlock(&lock->lock);
281
282 __wait(&w);
283 put_task_struct(current);
284
285 return 0;
286}
287
288static void bl_up_write(struct block_lock *lock)
289{
290 spin_lock(&lock->lock);
291 __del_holder(lock, current);
292 lock->count = 0;
293 if (!list_empty(&lock->waiters))
294 __wake_many(lock);
295 spin_unlock(&lock->lock);
296}
297
298static void report_recursive_bug(dm_block_t b, int r)
299{
300 if (r == -EINVAL)
301 DMERR("recursive acquisition of block %llu requested.",
302 (unsigned long long) b);
303}
304
305/*----------------------------------------------------------------*/
306
307/*
308 * Block manager is currently implemented using dm-bufio. struct
309 * dm_block_manager and struct dm_block map directly onto a couple of
310 * structs in the bufio interface. I want to retain the freedom to move
311 * away from bufio in the future. So these structs are just cast within
312 * this .c file, rather than making it through to the public interface.
313 */
314static struct dm_buffer *to_buffer(struct dm_block *b)
315{
316 return (struct dm_buffer *) b;
317}
318
319dm_block_t dm_block_location(struct dm_block *b)
320{
321 return dm_bufio_get_block_number(to_buffer(b));
322}
323EXPORT_SYMBOL_GPL(dm_block_location);
324
325void *dm_block_data(struct dm_block *b)
326{
327 return dm_bufio_get_block_data(to_buffer(b));
328}
329EXPORT_SYMBOL_GPL(dm_block_data);
330
331struct buffer_aux {
332 struct dm_block_validator *validator;
333 struct block_lock lock;
334 int write_locked;
335};
336
337static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
338{
339 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
340 aux->validator = NULL;
341 bl_init(&aux->lock);
342}
343
344static void dm_block_manager_write_callback(struct dm_buffer *buf)
345{
346 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
347 if (aux->validator) {
348 aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
349 dm_bufio_get_block_size(dm_bufio_get_client(buf)));
350 }
351}
352
353/*----------------------------------------------------------------
354 * Public interface
355 *--------------------------------------------------------------*/
356struct dm_block_manager {
357 struct dm_bufio_client *bufio;
358 bool read_only:1;
359};
360
361struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
362 unsigned block_size,
363 unsigned cache_size,
364 unsigned max_held_per_thread)
365{
366 int r;
367 struct dm_block_manager *bm;
368
369 bm = kmalloc(sizeof(*bm), GFP_KERNEL);
370 if (!bm) {
371 r = -ENOMEM;
372 goto bad;
373 }
374
375 bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
376 sizeof(struct buffer_aux),
377 dm_block_manager_alloc_callback,
378 dm_block_manager_write_callback);
379 if (IS_ERR(bm->bufio)) {
380 r = PTR_ERR(bm->bufio);
381 kfree(bm);
382 goto bad;
383 }
384
385 bm->read_only = false;
386
387 return bm;
388
389bad:
390 return ERR_PTR(r);
391}
392EXPORT_SYMBOL_GPL(dm_block_manager_create);
393
394void dm_block_manager_destroy(struct dm_block_manager *bm)
395{
396 dm_bufio_client_destroy(bm->bufio);
397 kfree(bm);
398}
399EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
400
401unsigned dm_bm_block_size(struct dm_block_manager *bm)
402{
403 return dm_bufio_get_block_size(bm->bufio);
404}
405EXPORT_SYMBOL_GPL(dm_bm_block_size);
406
407dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm)
408{
409 return dm_bufio_get_device_size(bm->bufio);
410}
411
412static int dm_bm_validate_buffer(struct dm_block_manager *bm,
413 struct dm_buffer *buf,
414 struct buffer_aux *aux,
415 struct dm_block_validator *v)
416{
417 if (unlikely(!aux->validator)) {
418 int r;
419 if (!v)
420 return 0;
421 r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
422 if (unlikely(r)) {
423 DMERR_LIMIT("%s validator check failed for block %llu", v->name,
424 (unsigned long long) dm_bufio_get_block_number(buf));
425 return r;
426 }
427 aux->validator = v;
428 } else {
429 if (unlikely(aux->validator != v)) {
430 DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu",
431 aux->validator->name, v ? v->name : "NULL",
432 (unsigned long long) dm_bufio_get_block_number(buf));
433 return -EINVAL;
434 }
435 }
436
437 return 0;
438}
439int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
440 struct dm_block_validator *v,
441 struct dm_block **result)
442{
443 struct buffer_aux *aux;
444 void *p;
445 int r;
446
447 p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
448 if (IS_ERR(p))
449 return PTR_ERR(p);
450
451 aux = dm_bufio_get_aux_data(to_buffer(*result));
452 r = bl_down_read(&aux->lock);
453 if (unlikely(r)) {
454 dm_bufio_release(to_buffer(*result));
455 report_recursive_bug(b, r);
456 return r;
457 }
458
459 aux->write_locked = 0;
460
461 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
462 if (unlikely(r)) {
463 bl_up_read(&aux->lock);
464 dm_bufio_release(to_buffer(*result));
465 return r;
466 }
467
468 return 0;
469}
470EXPORT_SYMBOL_GPL(dm_bm_read_lock);
471
472int dm_bm_write_lock(struct dm_block_manager *bm,
473 dm_block_t b, struct dm_block_validator *v,
474 struct dm_block **result)
475{
476 struct buffer_aux *aux;
477 void *p;
478 int r;
479
480 if (bm->read_only)
481 return -EPERM;
482
483 p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
484 if (IS_ERR(p))
485 return PTR_ERR(p);
486
487 aux = dm_bufio_get_aux_data(to_buffer(*result));
488 r = bl_down_write(&aux->lock);
489 if (r) {
490 dm_bufio_release(to_buffer(*result));
491 report_recursive_bug(b, r);
492 return r;
493 }
494
495 aux->write_locked = 1;
496
497 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
498 if (unlikely(r)) {
499 bl_up_write(&aux->lock);
500 dm_bufio_release(to_buffer(*result));
501 return r;
502 }
503
504 return 0;
505}
506EXPORT_SYMBOL_GPL(dm_bm_write_lock);
507
508int dm_bm_read_try_lock(struct dm_block_manager *bm,
509 dm_block_t b, struct dm_block_validator *v,
510 struct dm_block **result)
511{
512 struct buffer_aux *aux;
513 void *p;
514 int r;
515
516 p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
517 if (IS_ERR(p))
518 return PTR_ERR(p);
519 if (unlikely(!p))
520 return -EWOULDBLOCK;
521
522 aux = dm_bufio_get_aux_data(to_buffer(*result));
523 r = bl_down_read_nonblock(&aux->lock);
524 if (r < 0) {
525 dm_bufio_release(to_buffer(*result));
526 report_recursive_bug(b, r);
527 return r;
528 }
529 aux->write_locked = 0;
530
531 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
532 if (unlikely(r)) {
533 bl_up_read(&aux->lock);
534 dm_bufio_release(to_buffer(*result));
535 return r;
536 }
537
538 return 0;
539}
540
541int dm_bm_write_lock_zero(struct dm_block_manager *bm,
542 dm_block_t b, struct dm_block_validator *v,
543 struct dm_block **result)
544{
545 int r;
546 struct buffer_aux *aux;
547 void *p;
548
549 if (bm->read_only)
550 return -EPERM;
551
552 p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
553 if (IS_ERR(p))
554 return PTR_ERR(p);
555
556 memset(p, 0, dm_bm_block_size(bm));
557
558 aux = dm_bufio_get_aux_data(to_buffer(*result));
559 r = bl_down_write(&aux->lock);
560 if (r) {
561 dm_bufio_release(to_buffer(*result));
562 return r;
563 }
564
565 aux->write_locked = 1;
566 aux->validator = v;
567
568 return 0;
569}
570EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
571
572void dm_bm_unlock(struct dm_block *b)
573{
574 struct buffer_aux *aux;
575 aux = dm_bufio_get_aux_data(to_buffer(b));
576
577 if (aux->write_locked) {
578 dm_bufio_mark_buffer_dirty(to_buffer(b));
579 bl_up_write(&aux->lock);
580 } else
581 bl_up_read(&aux->lock);
582
583 dm_bufio_release(to_buffer(b));
584}
585EXPORT_SYMBOL_GPL(dm_bm_unlock);
586
587int dm_bm_flush(struct dm_block_manager *bm)
588{
589 if (bm->read_only)
590 return -EPERM;
591
592 return dm_bufio_write_dirty_buffers(bm->bufio);
593}
594EXPORT_SYMBOL_GPL(dm_bm_flush);
595
596void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
597{
598 dm_bufio_prefetch(bm->bufio, b, 1);
599}
600
601bool dm_bm_is_read_only(struct dm_block_manager *bm)
602{
603 return bm->read_only;
604}
605EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
606
607void dm_bm_set_read_only(struct dm_block_manager *bm)
608{
609 bm->read_only = true;
610}
611EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
612
613void dm_bm_set_read_write(struct dm_block_manager *bm)
614{
615 bm->read_only = false;
616}
617EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
618
619u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
620{
621 return crc32c(~(u32) 0, data, len) ^ init_xor;
622}
623EXPORT_SYMBOL_GPL(dm_bm_checksum);
624
625/*----------------------------------------------------------------*/
626
627MODULE_LICENSE("GPL");
628MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
629MODULE_DESCRIPTION("Immutable metadata library for dm");
630
631/*----------------------------------------------------------------*/