Loading...
1/*
2 * Copyright (C) 2014 Facebook. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include <linux/device-mapper.h>
8
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/blkdev.h>
12#include <linux/bio.h>
13#include <linux/dax.h>
14#include <linux/slab.h>
15#include <linux/kthread.h>
16#include <linux/freezer.h>
17#include <linux/uio.h>
18
19#define DM_MSG_PREFIX "log-writes"
20
21/*
22 * This target will sequentially log all writes to the target device onto the
23 * log device. This is helpful for replaying writes to check for fs consistency
24 * at all times. This target provides a mechanism to mark specific events to
25 * check data at a later time. So for example you would:
26 *
27 * write data
28 * fsync
29 * dmsetup message /dev/whatever mark mymark
30 * unmount /mnt/test
31 *
32 * Then replay the log up to mymark and check the contents of the replay to
33 * verify it matches what was written.
34 *
35 * We log writes only after they have been flushed, this makes the log describe
36 * close to the order in which the data hits the actual disk, not its cache. So
37 * for example the following sequence (W means write, C means complete)
38 *
39 * Wa,Wb,Wc,Cc,Ca,FLUSH,FUAd,Cb,CFLUSH,CFUAd
40 *
41 * Would result in the log looking like this:
42 *
43 * c,a,b,flush,fuad,<other writes>,<next flush>
44 *
45 * This is meant to help expose problems where file systems do not properly wait
46 * on data being written before invoking a FLUSH. FUA bypasses cache so once it
47 * completes it is added to the log as it should be on disk.
48 *
49 * We treat DISCARDs as if they don't bypass cache so that they are logged in
50 * order of completion along with the normal writes. If we didn't do it this
51 * way we would process all the discards first and then write all the data, when
52 * in fact we want to do the data and the discard in the order that they
53 * completed.
54 */
55#define LOG_FLUSH_FLAG (1 << 0)
56#define LOG_FUA_FLAG (1 << 1)
57#define LOG_DISCARD_FLAG (1 << 2)
58#define LOG_MARK_FLAG (1 << 3)
59#define LOG_METADATA_FLAG (1 << 4)
60
61#define WRITE_LOG_VERSION 1ULL
62#define WRITE_LOG_MAGIC 0x6a736677736872ULL
63#define WRITE_LOG_SUPER_SECTOR 0
64
65/*
66 * The disk format for this is braindead simple.
67 *
68 * At byte 0 we have our super, followed by the following sequence for
69 * nr_entries:
70 *
71 * [ 1 sector ][ entry->nr_sectors ]
72 * [log_write_entry][ data written ]
73 *
74 * The log_write_entry takes up a full sector so we can have arbitrary length
75 * marks and it leaves us room for extra content in the future.
76 */
77
78/*
79 * Basic info about the log for userspace.
80 */
81struct log_write_super {
82 __le64 magic;
83 __le64 version;
84 __le64 nr_entries;
85 __le32 sectorsize;
86};
87
88/*
89 * sector - the sector we wrote.
90 * nr_sectors - the number of sectors we wrote.
91 * flags - flags for this log entry.
92 * data_len - the size of the data in this log entry, this is for private log
93 * entry stuff, the MARK data provided by userspace for example.
94 */
95struct log_write_entry {
96 __le64 sector;
97 __le64 nr_sectors;
98 __le64 flags;
99 __le64 data_len;
100};
101
102struct log_writes_c {
103 struct dm_dev *dev;
104 struct dm_dev *logdev;
105 u64 logged_entries;
106 u32 sectorsize;
107 u32 sectorshift;
108 atomic_t io_blocks;
109 atomic_t pending_blocks;
110 sector_t next_sector;
111 sector_t end_sector;
112 bool logging_enabled;
113 bool device_supports_discard;
114 spinlock_t blocks_lock;
115 struct list_head unflushed_blocks;
116 struct list_head logging_blocks;
117 wait_queue_head_t wait;
118 struct task_struct *log_kthread;
119 struct completion super_done;
120};
121
122struct pending_block {
123 int vec_cnt;
124 u64 flags;
125 sector_t sector;
126 sector_t nr_sectors;
127 char *data;
128 u32 datalen;
129 struct list_head list;
130 struct bio_vec vecs[];
131};
132
133struct per_bio_data {
134 struct pending_block *block;
135};
136
137static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc,
138 sector_t sectors)
139{
140 return sectors >> (lc->sectorshift - SECTOR_SHIFT);
141}
142
143static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc,
144 sector_t sectors)
145{
146 return sectors << (lc->sectorshift - SECTOR_SHIFT);
147}
148
149static void put_pending_block(struct log_writes_c *lc)
150{
151 if (atomic_dec_and_test(&lc->pending_blocks)) {
152 smp_mb__after_atomic();
153 if (waitqueue_active(&lc->wait))
154 wake_up(&lc->wait);
155 }
156}
157
158static void put_io_block(struct log_writes_c *lc)
159{
160 if (atomic_dec_and_test(&lc->io_blocks)) {
161 smp_mb__after_atomic();
162 if (waitqueue_active(&lc->wait))
163 wake_up(&lc->wait);
164 }
165}
166
167static void log_end_io(struct bio *bio)
168{
169 struct log_writes_c *lc = bio->bi_private;
170
171 if (bio->bi_status) {
172 unsigned long flags;
173
174 DMERR("Error writing log block, error=%d", bio->bi_status);
175 spin_lock_irqsave(&lc->blocks_lock, flags);
176 lc->logging_enabled = false;
177 spin_unlock_irqrestore(&lc->blocks_lock, flags);
178 }
179
180 bio_free_pages(bio);
181 put_io_block(lc);
182 bio_put(bio);
183}
184
185static void log_end_super(struct bio *bio)
186{
187 struct log_writes_c *lc = bio->bi_private;
188
189 complete(&lc->super_done);
190 log_end_io(bio);
191}
192
193/*
194 * Meant to be called if there is an error, it will free all the pages
195 * associated with the block.
196 */
197static void free_pending_block(struct log_writes_c *lc,
198 struct pending_block *block)
199{
200 int i;
201
202 for (i = 0; i < block->vec_cnt; i++) {
203 if (block->vecs[i].bv_page)
204 __free_page(block->vecs[i].bv_page);
205 }
206 kfree(block->data);
207 kfree(block);
208 put_pending_block(lc);
209}
210
211static int write_metadata(struct log_writes_c *lc, void *entry,
212 size_t entrylen, void *data, size_t datalen,
213 sector_t sector)
214{
215 struct bio *bio;
216 struct page *page;
217 void *ptr;
218 size_t ret;
219
220 bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL);
221 bio->bi_iter.bi_size = 0;
222 bio->bi_iter.bi_sector = sector;
223 bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
224 log_end_super : log_end_io;
225 bio->bi_private = lc;
226
227 page = alloc_page(GFP_KERNEL);
228 if (!page) {
229 DMERR("Couldn't alloc log page");
230 bio_put(bio);
231 goto error;
232 }
233
234 ptr = kmap_atomic(page);
235 memcpy(ptr, entry, entrylen);
236 if (datalen)
237 memcpy(ptr + entrylen, data, datalen);
238 memset(ptr + entrylen + datalen, 0,
239 lc->sectorsize - entrylen - datalen);
240 kunmap_atomic(ptr);
241
242 ret = bio_add_page(bio, page, lc->sectorsize, 0);
243 if (ret != lc->sectorsize) {
244 DMERR("Couldn't add page to the log block");
245 goto error_bio;
246 }
247 submit_bio(bio);
248 return 0;
249error_bio:
250 bio_put(bio);
251 __free_page(page);
252error:
253 put_io_block(lc);
254 return -1;
255}
256
257static int write_inline_data(struct log_writes_c *lc, void *entry,
258 size_t entrylen, void *data, size_t datalen,
259 sector_t sector)
260{
261 int bio_pages, pg_datalen, pg_sectorlen, i;
262 struct page *page;
263 struct bio *bio;
264 size_t ret;
265 void *ptr;
266
267 while (datalen) {
268 bio_pages = bio_max_segs(DIV_ROUND_UP(datalen, PAGE_SIZE));
269
270 atomic_inc(&lc->io_blocks);
271
272 bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE,
273 GFP_KERNEL);
274 bio->bi_iter.bi_size = 0;
275 bio->bi_iter.bi_sector = sector;
276 bio->bi_end_io = log_end_io;
277 bio->bi_private = lc;
278
279 for (i = 0; i < bio_pages; i++) {
280 pg_datalen = min_t(int, datalen, PAGE_SIZE);
281 pg_sectorlen = ALIGN(pg_datalen, lc->sectorsize);
282
283 page = alloc_page(GFP_KERNEL);
284 if (!page) {
285 DMERR("Couldn't alloc inline data page");
286 goto error_bio;
287 }
288
289 ptr = kmap_atomic(page);
290 memcpy(ptr, data, pg_datalen);
291 if (pg_sectorlen > pg_datalen)
292 memset(ptr + pg_datalen, 0, pg_sectorlen - pg_datalen);
293 kunmap_atomic(ptr);
294
295 ret = bio_add_page(bio, page, pg_sectorlen, 0);
296 if (ret != pg_sectorlen) {
297 DMERR("Couldn't add page of inline data");
298 __free_page(page);
299 goto error_bio;
300 }
301
302 datalen -= pg_datalen;
303 data += pg_datalen;
304 }
305 submit_bio(bio);
306
307 sector += bio_pages * PAGE_SECTORS;
308 }
309 return 0;
310error_bio:
311 bio_free_pages(bio);
312 bio_put(bio);
313 put_io_block(lc);
314 return -1;
315}
316
317static int log_one_block(struct log_writes_c *lc,
318 struct pending_block *block, sector_t sector)
319{
320 struct bio *bio;
321 struct log_write_entry entry;
322 size_t metadatalen, ret;
323 int i;
324
325 entry.sector = cpu_to_le64(block->sector);
326 entry.nr_sectors = cpu_to_le64(block->nr_sectors);
327 entry.flags = cpu_to_le64(block->flags);
328 entry.data_len = cpu_to_le64(block->datalen);
329
330 metadatalen = (block->flags & LOG_MARK_FLAG) ? block->datalen : 0;
331 if (write_metadata(lc, &entry, sizeof(entry), block->data,
332 metadatalen, sector)) {
333 free_pending_block(lc, block);
334 return -1;
335 }
336
337 sector += dev_to_bio_sectors(lc, 1);
338
339 if (block->datalen && metadatalen == 0) {
340 if (write_inline_data(lc, &entry, sizeof(entry), block->data,
341 block->datalen, sector)) {
342 free_pending_block(lc, block);
343 return -1;
344 }
345 /* we don't support both inline data & bio data */
346 goto out;
347 }
348
349 if (!block->vec_cnt)
350 goto out;
351
352 atomic_inc(&lc->io_blocks);
353 bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt),
354 REQ_OP_WRITE, GFP_KERNEL);
355 bio->bi_iter.bi_size = 0;
356 bio->bi_iter.bi_sector = sector;
357 bio->bi_end_io = log_end_io;
358 bio->bi_private = lc;
359
360 for (i = 0; i < block->vec_cnt; i++) {
361 /*
362 * The page offset is always 0 because we allocate a new page
363 * for every bvec in the original bio for simplicity sake.
364 */
365 ret = bio_add_page(bio, block->vecs[i].bv_page,
366 block->vecs[i].bv_len, 0);
367 if (ret != block->vecs[i].bv_len) {
368 atomic_inc(&lc->io_blocks);
369 submit_bio(bio);
370 bio = bio_alloc(lc->logdev->bdev,
371 bio_max_segs(block->vec_cnt - i),
372 REQ_OP_WRITE, GFP_KERNEL);
373 bio->bi_iter.bi_size = 0;
374 bio->bi_iter.bi_sector = sector;
375 bio->bi_end_io = log_end_io;
376 bio->bi_private = lc;
377
378 ret = bio_add_page(bio, block->vecs[i].bv_page,
379 block->vecs[i].bv_len, 0);
380 if (ret != block->vecs[i].bv_len) {
381 DMERR("Couldn't add page on new bio?");
382 bio_put(bio);
383 goto error;
384 }
385 }
386 sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
387 }
388 submit_bio(bio);
389out:
390 kfree(block->data);
391 kfree(block);
392 put_pending_block(lc);
393 return 0;
394error:
395 free_pending_block(lc, block);
396 put_io_block(lc);
397 return -1;
398}
399
400static int log_super(struct log_writes_c *lc)
401{
402 struct log_write_super super;
403
404 super.magic = cpu_to_le64(WRITE_LOG_MAGIC);
405 super.version = cpu_to_le64(WRITE_LOG_VERSION);
406 super.nr_entries = cpu_to_le64(lc->logged_entries);
407 super.sectorsize = cpu_to_le32(lc->sectorsize);
408
409 if (write_metadata(lc, &super, sizeof(super), NULL, 0,
410 WRITE_LOG_SUPER_SECTOR)) {
411 DMERR("Couldn't write super");
412 return -1;
413 }
414
415 /*
416 * Super sector should be writen in-order, otherwise the
417 * nr_entries could be rewritten incorrectly by an old bio.
418 */
419 wait_for_completion_io(&lc->super_done);
420
421 return 0;
422}
423
424static inline sector_t logdev_last_sector(struct log_writes_c *lc)
425{
426 return bdev_nr_sectors(lc->logdev->bdev);
427}
428
429static int log_writes_kthread(void *arg)
430{
431 struct log_writes_c *lc = (struct log_writes_c *)arg;
432 sector_t sector = 0;
433
434 while (!kthread_should_stop()) {
435 bool super = false;
436 bool logging_enabled;
437 struct pending_block *block = NULL;
438 int ret;
439
440 spin_lock_irq(&lc->blocks_lock);
441 if (!list_empty(&lc->logging_blocks)) {
442 block = list_first_entry(&lc->logging_blocks,
443 struct pending_block, list);
444 list_del_init(&block->list);
445 if (!lc->logging_enabled)
446 goto next;
447
448 sector = lc->next_sector;
449 if (!(block->flags & LOG_DISCARD_FLAG))
450 lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors);
451 lc->next_sector += dev_to_bio_sectors(lc, 1);
452
453 /*
454 * Apparently the size of the device may not be known
455 * right away, so handle this properly.
456 */
457 if (!lc->end_sector)
458 lc->end_sector = logdev_last_sector(lc);
459 if (lc->end_sector &&
460 lc->next_sector >= lc->end_sector) {
461 DMERR("Ran out of space on the logdev");
462 lc->logging_enabled = false;
463 goto next;
464 }
465 lc->logged_entries++;
466 atomic_inc(&lc->io_blocks);
467
468 super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG));
469 if (super)
470 atomic_inc(&lc->io_blocks);
471 }
472next:
473 logging_enabled = lc->logging_enabled;
474 spin_unlock_irq(&lc->blocks_lock);
475 if (block) {
476 if (logging_enabled) {
477 ret = log_one_block(lc, block, sector);
478 if (!ret && super)
479 ret = log_super(lc);
480 if (ret) {
481 spin_lock_irq(&lc->blocks_lock);
482 lc->logging_enabled = false;
483 spin_unlock_irq(&lc->blocks_lock);
484 }
485 } else
486 free_pending_block(lc, block);
487 continue;
488 }
489
490 if (!try_to_freeze()) {
491 set_current_state(TASK_INTERRUPTIBLE);
492 if (!kthread_should_stop() &&
493 list_empty(&lc->logging_blocks))
494 schedule();
495 __set_current_state(TASK_RUNNING);
496 }
497 }
498 return 0;
499}
500
501/*
502 * Construct a log-writes mapping:
503 * log-writes <dev_path> <log_dev_path>
504 */
505static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
506{
507 struct log_writes_c *lc;
508 struct dm_arg_set as;
509 const char *devname, *logdevname;
510 int ret;
511
512 as.argc = argc;
513 as.argv = argv;
514
515 if (argc < 2) {
516 ti->error = "Invalid argument count";
517 return -EINVAL;
518 }
519
520 lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL);
521 if (!lc) {
522 ti->error = "Cannot allocate context";
523 return -ENOMEM;
524 }
525 spin_lock_init(&lc->blocks_lock);
526 INIT_LIST_HEAD(&lc->unflushed_blocks);
527 INIT_LIST_HEAD(&lc->logging_blocks);
528 init_waitqueue_head(&lc->wait);
529 init_completion(&lc->super_done);
530 atomic_set(&lc->io_blocks, 0);
531 atomic_set(&lc->pending_blocks, 0);
532
533 devname = dm_shift_arg(&as);
534 ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
535 if (ret) {
536 ti->error = "Device lookup failed";
537 goto bad;
538 }
539
540 logdevname = dm_shift_arg(&as);
541 ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table),
542 &lc->logdev);
543 if (ret) {
544 ti->error = "Log device lookup failed";
545 dm_put_device(ti, lc->dev);
546 goto bad;
547 }
548
549 lc->sectorsize = bdev_logical_block_size(lc->dev->bdev);
550 lc->sectorshift = ilog2(lc->sectorsize);
551 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
552 if (IS_ERR(lc->log_kthread)) {
553 ret = PTR_ERR(lc->log_kthread);
554 ti->error = "Couldn't alloc kthread";
555 dm_put_device(ti, lc->dev);
556 dm_put_device(ti, lc->logdev);
557 goto bad;
558 }
559
560 /*
561 * next_sector is in 512b sectors to correspond to what bi_sector expects.
562 * The super starts at sector 0, and the next_sector is the next logical
563 * one based on the sectorsize of the device.
564 */
565 lc->next_sector = lc->sectorsize >> SECTOR_SHIFT;
566 lc->logging_enabled = true;
567 lc->end_sector = logdev_last_sector(lc);
568 lc->device_supports_discard = true;
569
570 ti->num_flush_bios = 1;
571 ti->flush_supported = true;
572 ti->num_discard_bios = 1;
573 ti->discards_supported = true;
574 ti->per_io_data_size = sizeof(struct per_bio_data);
575 ti->private = lc;
576 return 0;
577
578bad:
579 kfree(lc);
580 return ret;
581}
582
583static int log_mark(struct log_writes_c *lc, char *data)
584{
585 struct pending_block *block;
586 size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry);
587
588 block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
589 if (!block) {
590 DMERR("Error allocating pending block");
591 return -ENOMEM;
592 }
593
594 block->data = kstrndup(data, maxsize - 1, GFP_KERNEL);
595 if (!block->data) {
596 DMERR("Error copying mark data");
597 kfree(block);
598 return -ENOMEM;
599 }
600 atomic_inc(&lc->pending_blocks);
601 block->datalen = strlen(block->data);
602 block->flags |= LOG_MARK_FLAG;
603 spin_lock_irq(&lc->blocks_lock);
604 list_add_tail(&block->list, &lc->logging_blocks);
605 spin_unlock_irq(&lc->blocks_lock);
606 wake_up_process(lc->log_kthread);
607 return 0;
608}
609
610static void log_writes_dtr(struct dm_target *ti)
611{
612 struct log_writes_c *lc = ti->private;
613
614 spin_lock_irq(&lc->blocks_lock);
615 list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks);
616 spin_unlock_irq(&lc->blocks_lock);
617
618 /*
619 * This is just nice to have since it'll update the super to include the
620 * unflushed blocks, if it fails we don't really care.
621 */
622 log_mark(lc, "dm-log-writes-end");
623 wake_up_process(lc->log_kthread);
624 wait_event(lc->wait, !atomic_read(&lc->io_blocks) &&
625 !atomic_read(&lc->pending_blocks));
626 kthread_stop(lc->log_kthread);
627
628 WARN_ON(!list_empty(&lc->logging_blocks));
629 WARN_ON(!list_empty(&lc->unflushed_blocks));
630 dm_put_device(ti, lc->dev);
631 dm_put_device(ti, lc->logdev);
632 kfree(lc);
633}
634
635static void normal_map_bio(struct dm_target *ti, struct bio *bio)
636{
637 struct log_writes_c *lc = ti->private;
638
639 bio_set_dev(bio, lc->dev->bdev);
640}
641
642static int log_writes_map(struct dm_target *ti, struct bio *bio)
643{
644 struct log_writes_c *lc = ti->private;
645 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
646 struct pending_block *block;
647 struct bvec_iter iter;
648 struct bio_vec bv;
649 size_t alloc_size;
650 int i = 0;
651 bool flush_bio = (bio->bi_opf & REQ_PREFLUSH);
652 bool fua_bio = (bio->bi_opf & REQ_FUA);
653 bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
654 bool meta_bio = (bio->bi_opf & REQ_META);
655
656 pb->block = NULL;
657
658 /* Don't bother doing anything if logging has been disabled */
659 if (!lc->logging_enabled)
660 goto map_bio;
661
662 /*
663 * Map reads as normal.
664 */
665 if (bio_data_dir(bio) == READ)
666 goto map_bio;
667
668 /* No sectors and not a flush? Don't care */
669 if (!bio_sectors(bio) && !flush_bio)
670 goto map_bio;
671
672 /*
673 * Discards will have bi_size set but there's no actual data, so just
674 * allocate the size of the pending block.
675 */
676 if (discard_bio)
677 alloc_size = sizeof(struct pending_block);
678 else
679 alloc_size = struct_size(block, vecs, bio_segments(bio));
680
681 block = kzalloc(alloc_size, GFP_NOIO);
682 if (!block) {
683 DMERR("Error allocating pending block");
684 spin_lock_irq(&lc->blocks_lock);
685 lc->logging_enabled = false;
686 spin_unlock_irq(&lc->blocks_lock);
687 return DM_MAPIO_KILL;
688 }
689 INIT_LIST_HEAD(&block->list);
690 pb->block = block;
691 atomic_inc(&lc->pending_blocks);
692
693 if (flush_bio)
694 block->flags |= LOG_FLUSH_FLAG;
695 if (fua_bio)
696 block->flags |= LOG_FUA_FLAG;
697 if (discard_bio)
698 block->flags |= LOG_DISCARD_FLAG;
699 if (meta_bio)
700 block->flags |= LOG_METADATA_FLAG;
701
702 block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector);
703 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio));
704
705 /* We don't need the data, just submit */
706 if (discard_bio) {
707 WARN_ON(flush_bio || fua_bio);
708 if (lc->device_supports_discard)
709 goto map_bio;
710 bio_endio(bio);
711 return DM_MAPIO_SUBMITTED;
712 }
713
714 /* Flush bio, splice the unflushed blocks onto this list and submit */
715 if (flush_bio && !bio_sectors(bio)) {
716 spin_lock_irq(&lc->blocks_lock);
717 list_splice_init(&lc->unflushed_blocks, &block->list);
718 spin_unlock_irq(&lc->blocks_lock);
719 goto map_bio;
720 }
721
722 /*
723 * We will write this bio somewhere else way later so we need to copy
724 * the actual contents into new pages so we know the data will always be
725 * there.
726 *
727 * We do this because this could be a bio from O_DIRECT in which case we
728 * can't just hold onto the page until some later point, we have to
729 * manually copy the contents.
730 */
731 bio_for_each_segment(bv, bio, iter) {
732 struct page *page;
733 void *dst;
734
735 page = alloc_page(GFP_NOIO);
736 if (!page) {
737 DMERR("Error allocing page");
738 free_pending_block(lc, block);
739 spin_lock_irq(&lc->blocks_lock);
740 lc->logging_enabled = false;
741 spin_unlock_irq(&lc->blocks_lock);
742 return DM_MAPIO_KILL;
743 }
744
745 dst = kmap_atomic(page);
746 memcpy_from_bvec(dst, &bv);
747 kunmap_atomic(dst);
748 block->vecs[i].bv_page = page;
749 block->vecs[i].bv_len = bv.bv_len;
750 block->vec_cnt++;
751 i++;
752 }
753
754 /* Had a flush with data in it, weird */
755 if (flush_bio) {
756 spin_lock_irq(&lc->blocks_lock);
757 list_splice_init(&lc->unflushed_blocks, &block->list);
758 spin_unlock_irq(&lc->blocks_lock);
759 }
760map_bio:
761 normal_map_bio(ti, bio);
762 return DM_MAPIO_REMAPPED;
763}
764
765static int normal_end_io(struct dm_target *ti, struct bio *bio,
766 blk_status_t *error)
767{
768 struct log_writes_c *lc = ti->private;
769 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
770
771 if (bio_data_dir(bio) == WRITE && pb->block) {
772 struct pending_block *block = pb->block;
773 unsigned long flags;
774
775 spin_lock_irqsave(&lc->blocks_lock, flags);
776 if (block->flags & LOG_FLUSH_FLAG) {
777 list_splice_tail_init(&block->list, &lc->logging_blocks);
778 list_add_tail(&block->list, &lc->logging_blocks);
779 wake_up_process(lc->log_kthread);
780 } else if (block->flags & LOG_FUA_FLAG) {
781 list_add_tail(&block->list, &lc->logging_blocks);
782 wake_up_process(lc->log_kthread);
783 } else
784 list_add_tail(&block->list, &lc->unflushed_blocks);
785 spin_unlock_irqrestore(&lc->blocks_lock, flags);
786 }
787
788 return DM_ENDIO_DONE;
789}
790
791/*
792 * INFO format: <logged entries> <highest allocated sector>
793 */
794static void log_writes_status(struct dm_target *ti, status_type_t type,
795 unsigned status_flags, char *result,
796 unsigned maxlen)
797{
798 unsigned sz = 0;
799 struct log_writes_c *lc = ti->private;
800
801 switch (type) {
802 case STATUSTYPE_INFO:
803 DMEMIT("%llu %llu", lc->logged_entries,
804 (unsigned long long)lc->next_sector - 1);
805 if (!lc->logging_enabled)
806 DMEMIT(" logging_disabled");
807 break;
808
809 case STATUSTYPE_TABLE:
810 DMEMIT("%s %s", lc->dev->name, lc->logdev->name);
811 break;
812
813 case STATUSTYPE_IMA:
814 *result = '\0';
815 break;
816 }
817}
818
819static int log_writes_prepare_ioctl(struct dm_target *ti,
820 struct block_device **bdev)
821{
822 struct log_writes_c *lc = ti->private;
823 struct dm_dev *dev = lc->dev;
824
825 *bdev = dev->bdev;
826 /*
827 * Only pass ioctls through if the device sizes match exactly.
828 */
829 if (ti->len != bdev_nr_sectors(dev->bdev))
830 return 1;
831 return 0;
832}
833
834static int log_writes_iterate_devices(struct dm_target *ti,
835 iterate_devices_callout_fn fn,
836 void *data)
837{
838 struct log_writes_c *lc = ti->private;
839
840 return fn(ti, lc->dev, 0, ti->len, data);
841}
842
843/*
844 * Messages supported:
845 * mark <mark data> - specify the marked data.
846 */
847static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv,
848 char *result, unsigned maxlen)
849{
850 int r = -EINVAL;
851 struct log_writes_c *lc = ti->private;
852
853 if (argc != 2) {
854 DMWARN("Invalid log-writes message arguments, expect 2 arguments, got %d", argc);
855 return r;
856 }
857
858 if (!strcasecmp(argv[0], "mark"))
859 r = log_mark(lc, argv[1]);
860 else
861 DMWARN("Unrecognised log writes target message received: %s", argv[0]);
862
863 return r;
864}
865
866static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits)
867{
868 struct log_writes_c *lc = ti->private;
869
870 if (!bdev_max_discard_sectors(lc->dev->bdev)) {
871 lc->device_supports_discard = false;
872 limits->discard_granularity = lc->sectorsize;
873 limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
874 }
875 limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
876 limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
877 limits->io_min = limits->physical_block_size;
878 limits->dma_alignment = limits->logical_block_size - 1;
879}
880
881#if IS_ENABLED(CONFIG_FS_DAX)
882static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
883 pgoff_t *pgoff)
884{
885 struct log_writes_c *lc = ti->private;
886
887 *pgoff += (get_start_sect(lc->dev->bdev) >> PAGE_SECTORS_SHIFT);
888 return lc->dev->dax_dev;
889}
890
891static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
892 long nr_pages, enum dax_access_mode mode, void **kaddr,
893 pfn_t *pfn)
894{
895 struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
896
897 return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
898}
899
900static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
901 size_t nr_pages)
902{
903 struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
904
905 return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT);
906}
907
908static size_t log_writes_dax_recovery_write(struct dm_target *ti,
909 pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
910{
911 struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
912
913 return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
914}
915
916#else
917#define log_writes_dax_direct_access NULL
918#define log_writes_dax_zero_page_range NULL
919#define log_writes_dax_recovery_write NULL
920#endif
921
922static struct target_type log_writes_target = {
923 .name = "log-writes",
924 .version = {1, 1, 0},
925 .module = THIS_MODULE,
926 .ctr = log_writes_ctr,
927 .dtr = log_writes_dtr,
928 .map = log_writes_map,
929 .end_io = normal_end_io,
930 .status = log_writes_status,
931 .prepare_ioctl = log_writes_prepare_ioctl,
932 .message = log_writes_message,
933 .iterate_devices = log_writes_iterate_devices,
934 .io_hints = log_writes_io_hints,
935 .direct_access = log_writes_dax_direct_access,
936 .dax_zero_page_range = log_writes_dax_zero_page_range,
937 .dax_recovery_write = log_writes_dax_recovery_write,
938};
939
940static int __init dm_log_writes_init(void)
941{
942 int r = dm_register_target(&log_writes_target);
943
944 if (r < 0)
945 DMERR("register failed %d", r);
946
947 return r;
948}
949
950static void __exit dm_log_writes_exit(void)
951{
952 dm_unregister_target(&log_writes_target);
953}
954
955module_init(dm_log_writes_init);
956module_exit(dm_log_writes_exit);
957
958MODULE_DESCRIPTION(DM_NAME " log writes target");
959MODULE_AUTHOR("Josef Bacik <jbacik@fb.com>");
960MODULE_LICENSE("GPL");
1/*
2 * Copyright (C) 2014 Facebook. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include <linux/device-mapper.h>
8
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/blkdev.h>
12#include <linux/bio.h>
13#include <linux/slab.h>
14#include <linux/kthread.h>
15#include <linux/freezer.h>
16
17#define DM_MSG_PREFIX "log-writes"
18
19/*
20 * This target will sequentially log all writes to the target device onto the
21 * log device. This is helpful for replaying writes to check for fs consistency
22 * at all times. This target provides a mechanism to mark specific events to
23 * check data at a later time. So for example you would:
24 *
25 * write data
26 * fsync
27 * dmsetup message /dev/whatever mark mymark
28 * unmount /mnt/test
29 *
30 * Then replay the log up to mymark and check the contents of the replay to
31 * verify it matches what was written.
32 *
33 * We log writes only after they have been flushed, this makes the log describe
34 * close to the order in which the data hits the actual disk, not its cache. So
35 * for example the following sequence (W means write, C means complete)
36 *
37 * Wa,Wb,Wc,Cc,Ca,FLUSH,FUAd,Cb,CFLUSH,CFUAd
38 *
39 * Would result in the log looking like this:
40 *
41 * c,a,flush,fuad,b,<other writes>,<next flush>
42 *
43 * This is meant to help expose problems where file systems do not properly wait
44 * on data being written before invoking a FLUSH. FUA bypasses cache so once it
45 * completes it is added to the log as it should be on disk.
46 *
47 * We treat DISCARDs as if they don't bypass cache so that they are logged in
48 * order of completion along with the normal writes. If we didn't do it this
49 * way we would process all the discards first and then write all the data, when
50 * in fact we want to do the data and the discard in the order that they
51 * completed.
52 */
53#define LOG_FLUSH_FLAG (1 << 0)
54#define LOG_FUA_FLAG (1 << 1)
55#define LOG_DISCARD_FLAG (1 << 2)
56#define LOG_MARK_FLAG (1 << 3)
57
58#define WRITE_LOG_VERSION 1ULL
59#define WRITE_LOG_MAGIC 0x6a736677736872ULL
60
61/*
62 * The disk format for this is braindead simple.
63 *
64 * At byte 0 we have our super, followed by the following sequence for
65 * nr_entries:
66 *
67 * [ 1 sector ][ entry->nr_sectors ]
68 * [log_write_entry][ data written ]
69 *
70 * The log_write_entry takes up a full sector so we can have arbitrary length
71 * marks and it leaves us room for extra content in the future.
72 */
73
74/*
75 * Basic info about the log for userspace.
76 */
77struct log_write_super {
78 __le64 magic;
79 __le64 version;
80 __le64 nr_entries;
81 __le32 sectorsize;
82};
83
84/*
85 * sector - the sector we wrote.
86 * nr_sectors - the number of sectors we wrote.
87 * flags - flags for this log entry.
88 * data_len - the size of the data in this log entry, this is for private log
89 * entry stuff, the MARK data provided by userspace for example.
90 */
91struct log_write_entry {
92 __le64 sector;
93 __le64 nr_sectors;
94 __le64 flags;
95 __le64 data_len;
96};
97
98struct log_writes_c {
99 struct dm_dev *dev;
100 struct dm_dev *logdev;
101 u64 logged_entries;
102 u32 sectorsize;
103 atomic_t io_blocks;
104 atomic_t pending_blocks;
105 sector_t next_sector;
106 sector_t end_sector;
107 bool logging_enabled;
108 bool device_supports_discard;
109 spinlock_t blocks_lock;
110 struct list_head unflushed_blocks;
111 struct list_head logging_blocks;
112 wait_queue_head_t wait;
113 struct task_struct *log_kthread;
114};
115
116struct pending_block {
117 int vec_cnt;
118 u64 flags;
119 sector_t sector;
120 sector_t nr_sectors;
121 char *data;
122 u32 datalen;
123 struct list_head list;
124 struct bio_vec vecs[0];
125};
126
127struct per_bio_data {
128 struct pending_block *block;
129};
130
131static void put_pending_block(struct log_writes_c *lc)
132{
133 if (atomic_dec_and_test(&lc->pending_blocks)) {
134 smp_mb__after_atomic();
135 if (waitqueue_active(&lc->wait))
136 wake_up(&lc->wait);
137 }
138}
139
140static void put_io_block(struct log_writes_c *lc)
141{
142 if (atomic_dec_and_test(&lc->io_blocks)) {
143 smp_mb__after_atomic();
144 if (waitqueue_active(&lc->wait))
145 wake_up(&lc->wait);
146 }
147}
148
149static void log_end_io(struct bio *bio)
150{
151 struct log_writes_c *lc = bio->bi_private;
152 struct bio_vec *bvec;
153 int i;
154
155 if (bio->bi_error) {
156 unsigned long flags;
157
158 DMERR("Error writing log block, error=%d", bio->bi_error);
159 spin_lock_irqsave(&lc->blocks_lock, flags);
160 lc->logging_enabled = false;
161 spin_unlock_irqrestore(&lc->blocks_lock, flags);
162 }
163
164 bio_for_each_segment_all(bvec, bio, i)
165 __free_page(bvec->bv_page);
166
167 put_io_block(lc);
168 bio_put(bio);
169}
170
171/*
172 * Meant to be called if there is an error, it will free all the pages
173 * associated with the block.
174 */
175static void free_pending_block(struct log_writes_c *lc,
176 struct pending_block *block)
177{
178 int i;
179
180 for (i = 0; i < block->vec_cnt; i++) {
181 if (block->vecs[i].bv_page)
182 __free_page(block->vecs[i].bv_page);
183 }
184 kfree(block->data);
185 kfree(block);
186 put_pending_block(lc);
187}
188
189static int write_metadata(struct log_writes_c *lc, void *entry,
190 size_t entrylen, void *data, size_t datalen,
191 sector_t sector)
192{
193 struct bio *bio;
194 struct page *page;
195 void *ptr;
196 size_t ret;
197
198 bio = bio_alloc(GFP_KERNEL, 1);
199 if (!bio) {
200 DMERR("Couldn't alloc log bio");
201 goto error;
202 }
203 bio->bi_iter.bi_size = 0;
204 bio->bi_iter.bi_sector = sector;
205 bio->bi_bdev = lc->logdev->bdev;
206 bio->bi_end_io = log_end_io;
207 bio->bi_private = lc;
208
209 page = alloc_page(GFP_KERNEL);
210 if (!page) {
211 DMERR("Couldn't alloc log page");
212 bio_put(bio);
213 goto error;
214 }
215
216 ptr = kmap_atomic(page);
217 memcpy(ptr, entry, entrylen);
218 if (datalen)
219 memcpy(ptr + entrylen, data, datalen);
220 memset(ptr + entrylen + datalen, 0,
221 lc->sectorsize - entrylen - datalen);
222 kunmap_atomic(ptr);
223
224 ret = bio_add_page(bio, page, lc->sectorsize, 0);
225 if (ret != lc->sectorsize) {
226 DMERR("Couldn't add page to the log block");
227 goto error_bio;
228 }
229 submit_bio(WRITE, bio);
230 return 0;
231error_bio:
232 bio_put(bio);
233 __free_page(page);
234error:
235 put_io_block(lc);
236 return -1;
237}
238
239static int log_one_block(struct log_writes_c *lc,
240 struct pending_block *block, sector_t sector)
241{
242 struct bio *bio;
243 struct log_write_entry entry;
244 size_t ret;
245 int i;
246
247 entry.sector = cpu_to_le64(block->sector);
248 entry.nr_sectors = cpu_to_le64(block->nr_sectors);
249 entry.flags = cpu_to_le64(block->flags);
250 entry.data_len = cpu_to_le64(block->datalen);
251 if (write_metadata(lc, &entry, sizeof(entry), block->data,
252 block->datalen, sector)) {
253 free_pending_block(lc, block);
254 return -1;
255 }
256
257 if (!block->vec_cnt)
258 goto out;
259 sector++;
260
261 bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
262 if (!bio) {
263 DMERR("Couldn't alloc log bio");
264 goto error;
265 }
266 atomic_inc(&lc->io_blocks);
267 bio->bi_iter.bi_size = 0;
268 bio->bi_iter.bi_sector = sector;
269 bio->bi_bdev = lc->logdev->bdev;
270 bio->bi_end_io = log_end_io;
271 bio->bi_private = lc;
272
273 for (i = 0; i < block->vec_cnt; i++) {
274 /*
275 * The page offset is always 0 because we allocate a new page
276 * for every bvec in the original bio for simplicity sake.
277 */
278 ret = bio_add_page(bio, block->vecs[i].bv_page,
279 block->vecs[i].bv_len, 0);
280 if (ret != block->vecs[i].bv_len) {
281 atomic_inc(&lc->io_blocks);
282 submit_bio(WRITE, bio);
283 bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
284 if (!bio) {
285 DMERR("Couldn't alloc log bio");
286 goto error;
287 }
288 bio->bi_iter.bi_size = 0;
289 bio->bi_iter.bi_sector = sector;
290 bio->bi_bdev = lc->logdev->bdev;
291 bio->bi_end_io = log_end_io;
292 bio->bi_private = lc;
293
294 ret = bio_add_page(bio, block->vecs[i].bv_page,
295 block->vecs[i].bv_len, 0);
296 if (ret != block->vecs[i].bv_len) {
297 DMERR("Couldn't add page on new bio?");
298 bio_put(bio);
299 goto error;
300 }
301 }
302 sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
303 }
304 submit_bio(WRITE, bio);
305out:
306 kfree(block->data);
307 kfree(block);
308 put_pending_block(lc);
309 return 0;
310error:
311 free_pending_block(lc, block);
312 put_io_block(lc);
313 return -1;
314}
315
316static int log_super(struct log_writes_c *lc)
317{
318 struct log_write_super super;
319
320 super.magic = cpu_to_le64(WRITE_LOG_MAGIC);
321 super.version = cpu_to_le64(WRITE_LOG_VERSION);
322 super.nr_entries = cpu_to_le64(lc->logged_entries);
323 super.sectorsize = cpu_to_le32(lc->sectorsize);
324
325 if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) {
326 DMERR("Couldn't write super");
327 return -1;
328 }
329
330 return 0;
331}
332
333static inline sector_t logdev_last_sector(struct log_writes_c *lc)
334{
335 return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
336}
337
338static int log_writes_kthread(void *arg)
339{
340 struct log_writes_c *lc = (struct log_writes_c *)arg;
341 sector_t sector = 0;
342
343 while (!kthread_should_stop()) {
344 bool super = false;
345 bool logging_enabled;
346 struct pending_block *block = NULL;
347 int ret;
348
349 spin_lock_irq(&lc->blocks_lock);
350 if (!list_empty(&lc->logging_blocks)) {
351 block = list_first_entry(&lc->logging_blocks,
352 struct pending_block, list);
353 list_del_init(&block->list);
354 if (!lc->logging_enabled)
355 goto next;
356
357 sector = lc->next_sector;
358 if (block->flags & LOG_DISCARD_FLAG)
359 lc->next_sector++;
360 else
361 lc->next_sector += block->nr_sectors + 1;
362
363 /*
364 * Apparently the size of the device may not be known
365 * right away, so handle this properly.
366 */
367 if (!lc->end_sector)
368 lc->end_sector = logdev_last_sector(lc);
369 if (lc->end_sector &&
370 lc->next_sector >= lc->end_sector) {
371 DMERR("Ran out of space on the logdev");
372 lc->logging_enabled = false;
373 goto next;
374 }
375 lc->logged_entries++;
376 atomic_inc(&lc->io_blocks);
377
378 super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG));
379 if (super)
380 atomic_inc(&lc->io_blocks);
381 }
382next:
383 logging_enabled = lc->logging_enabled;
384 spin_unlock_irq(&lc->blocks_lock);
385 if (block) {
386 if (logging_enabled) {
387 ret = log_one_block(lc, block, sector);
388 if (!ret && super)
389 ret = log_super(lc);
390 if (ret) {
391 spin_lock_irq(&lc->blocks_lock);
392 lc->logging_enabled = false;
393 spin_unlock_irq(&lc->blocks_lock);
394 }
395 } else
396 free_pending_block(lc, block);
397 continue;
398 }
399
400 if (!try_to_freeze()) {
401 set_current_state(TASK_INTERRUPTIBLE);
402 if (!kthread_should_stop() &&
403 !atomic_read(&lc->pending_blocks))
404 schedule();
405 __set_current_state(TASK_RUNNING);
406 }
407 }
408 return 0;
409}
410
411/*
412 * Construct a log-writes mapping:
413 * log-writes <dev_path> <log_dev_path>
414 */
415static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
416{
417 struct log_writes_c *lc;
418 struct dm_arg_set as;
419 const char *devname, *logdevname;
420 int ret;
421
422 as.argc = argc;
423 as.argv = argv;
424
425 if (argc < 2) {
426 ti->error = "Invalid argument count";
427 return -EINVAL;
428 }
429
430 lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL);
431 if (!lc) {
432 ti->error = "Cannot allocate context";
433 return -ENOMEM;
434 }
435 spin_lock_init(&lc->blocks_lock);
436 INIT_LIST_HEAD(&lc->unflushed_blocks);
437 INIT_LIST_HEAD(&lc->logging_blocks);
438 init_waitqueue_head(&lc->wait);
439 lc->sectorsize = 1 << SECTOR_SHIFT;
440 atomic_set(&lc->io_blocks, 0);
441 atomic_set(&lc->pending_blocks, 0);
442
443 devname = dm_shift_arg(&as);
444 ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
445 if (ret) {
446 ti->error = "Device lookup failed";
447 goto bad;
448 }
449
450 logdevname = dm_shift_arg(&as);
451 ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table),
452 &lc->logdev);
453 if (ret) {
454 ti->error = "Log device lookup failed";
455 dm_put_device(ti, lc->dev);
456 goto bad;
457 }
458
459 ret = -EINVAL;
460 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
461 if (!lc->log_kthread) {
462 ti->error = "Couldn't alloc kthread";
463 dm_put_device(ti, lc->dev);
464 dm_put_device(ti, lc->logdev);
465 goto bad;
466 }
467
468 /* We put the super at sector 0, start logging at sector 1 */
469 lc->next_sector = 1;
470 lc->logging_enabled = true;
471 lc->end_sector = logdev_last_sector(lc);
472 lc->device_supports_discard = true;
473
474 ti->num_flush_bios = 1;
475 ti->flush_supported = true;
476 ti->num_discard_bios = 1;
477 ti->discards_supported = true;
478 ti->per_io_data_size = sizeof(struct per_bio_data);
479 ti->private = lc;
480 return 0;
481
482bad:
483 kfree(lc);
484 return ret;
485}
486
487static int log_mark(struct log_writes_c *lc, char *data)
488{
489 struct pending_block *block;
490 size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry);
491
492 block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
493 if (!block) {
494 DMERR("Error allocating pending block");
495 return -ENOMEM;
496 }
497
498 block->data = kstrndup(data, maxsize, GFP_KERNEL);
499 if (!block->data) {
500 DMERR("Error copying mark data");
501 kfree(block);
502 return -ENOMEM;
503 }
504 atomic_inc(&lc->pending_blocks);
505 block->datalen = strlen(block->data);
506 block->flags |= LOG_MARK_FLAG;
507 spin_lock_irq(&lc->blocks_lock);
508 list_add_tail(&block->list, &lc->logging_blocks);
509 spin_unlock_irq(&lc->blocks_lock);
510 wake_up_process(lc->log_kthread);
511 return 0;
512}
513
514static void log_writes_dtr(struct dm_target *ti)
515{
516 struct log_writes_c *lc = ti->private;
517
518 spin_lock_irq(&lc->blocks_lock);
519 list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks);
520 spin_unlock_irq(&lc->blocks_lock);
521
522 /*
523 * This is just nice to have since it'll update the super to include the
524 * unflushed blocks, if it fails we don't really care.
525 */
526 log_mark(lc, "dm-log-writes-end");
527 wake_up_process(lc->log_kthread);
528 wait_event(lc->wait, !atomic_read(&lc->io_blocks) &&
529 !atomic_read(&lc->pending_blocks));
530 kthread_stop(lc->log_kthread);
531
532 WARN_ON(!list_empty(&lc->logging_blocks));
533 WARN_ON(!list_empty(&lc->unflushed_blocks));
534 dm_put_device(ti, lc->dev);
535 dm_put_device(ti, lc->logdev);
536 kfree(lc);
537}
538
539static void normal_map_bio(struct dm_target *ti, struct bio *bio)
540{
541 struct log_writes_c *lc = ti->private;
542
543 bio->bi_bdev = lc->dev->bdev;
544}
545
546static int log_writes_map(struct dm_target *ti, struct bio *bio)
547{
548 struct log_writes_c *lc = ti->private;
549 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
550 struct pending_block *block;
551 struct bvec_iter iter;
552 struct bio_vec bv;
553 size_t alloc_size;
554 int i = 0;
555 bool flush_bio = (bio->bi_rw & REQ_FLUSH);
556 bool fua_bio = (bio->bi_rw & REQ_FUA);
557 bool discard_bio = (bio->bi_rw & REQ_DISCARD);
558
559 pb->block = NULL;
560
561 /* Don't bother doing anything if logging has been disabled */
562 if (!lc->logging_enabled)
563 goto map_bio;
564
565 /*
566 * Map reads as normal.
567 */
568 if (bio_data_dir(bio) == READ)
569 goto map_bio;
570
571 /* No sectors and not a flush? Don't care */
572 if (!bio_sectors(bio) && !flush_bio)
573 goto map_bio;
574
575 /*
576 * Discards will have bi_size set but there's no actual data, so just
577 * allocate the size of the pending block.
578 */
579 if (discard_bio)
580 alloc_size = sizeof(struct pending_block);
581 else
582 alloc_size = sizeof(struct pending_block) + sizeof(struct bio_vec) * bio_segments(bio);
583
584 block = kzalloc(alloc_size, GFP_NOIO);
585 if (!block) {
586 DMERR("Error allocating pending block");
587 spin_lock_irq(&lc->blocks_lock);
588 lc->logging_enabled = false;
589 spin_unlock_irq(&lc->blocks_lock);
590 return -ENOMEM;
591 }
592 INIT_LIST_HEAD(&block->list);
593 pb->block = block;
594 atomic_inc(&lc->pending_blocks);
595
596 if (flush_bio)
597 block->flags |= LOG_FLUSH_FLAG;
598 if (fua_bio)
599 block->flags |= LOG_FUA_FLAG;
600 if (discard_bio)
601 block->flags |= LOG_DISCARD_FLAG;
602
603 block->sector = bio->bi_iter.bi_sector;
604 block->nr_sectors = bio_sectors(bio);
605
606 /* We don't need the data, just submit */
607 if (discard_bio) {
608 WARN_ON(flush_bio || fua_bio);
609 if (lc->device_supports_discard)
610 goto map_bio;
611 bio_endio(bio);
612 return DM_MAPIO_SUBMITTED;
613 }
614
615 /* Flush bio, splice the unflushed blocks onto this list and submit */
616 if (flush_bio && !bio_sectors(bio)) {
617 spin_lock_irq(&lc->blocks_lock);
618 list_splice_init(&lc->unflushed_blocks, &block->list);
619 spin_unlock_irq(&lc->blocks_lock);
620 goto map_bio;
621 }
622
623 /*
624 * We will write this bio somewhere else way later so we need to copy
625 * the actual contents into new pages so we know the data will always be
626 * there.
627 *
628 * We do this because this could be a bio from O_DIRECT in which case we
629 * can't just hold onto the page until some later point, we have to
630 * manually copy the contents.
631 */
632 bio_for_each_segment(bv, bio, iter) {
633 struct page *page;
634 void *src, *dst;
635
636 page = alloc_page(GFP_NOIO);
637 if (!page) {
638 DMERR("Error allocing page");
639 free_pending_block(lc, block);
640 spin_lock_irq(&lc->blocks_lock);
641 lc->logging_enabled = false;
642 spin_unlock_irq(&lc->blocks_lock);
643 return -ENOMEM;
644 }
645
646 src = kmap_atomic(bv.bv_page);
647 dst = kmap_atomic(page);
648 memcpy(dst, src + bv.bv_offset, bv.bv_len);
649 kunmap_atomic(dst);
650 kunmap_atomic(src);
651 block->vecs[i].bv_page = page;
652 block->vecs[i].bv_len = bv.bv_len;
653 block->vec_cnt++;
654 i++;
655 }
656
657 /* Had a flush with data in it, weird */
658 if (flush_bio) {
659 spin_lock_irq(&lc->blocks_lock);
660 list_splice_init(&lc->unflushed_blocks, &block->list);
661 spin_unlock_irq(&lc->blocks_lock);
662 }
663map_bio:
664 normal_map_bio(ti, bio);
665 return DM_MAPIO_REMAPPED;
666}
667
668static int normal_end_io(struct dm_target *ti, struct bio *bio, int error)
669{
670 struct log_writes_c *lc = ti->private;
671 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
672
673 if (bio_data_dir(bio) == WRITE && pb->block) {
674 struct pending_block *block = pb->block;
675 unsigned long flags;
676
677 spin_lock_irqsave(&lc->blocks_lock, flags);
678 if (block->flags & LOG_FLUSH_FLAG) {
679 list_splice_tail_init(&block->list, &lc->logging_blocks);
680 list_add_tail(&block->list, &lc->logging_blocks);
681 wake_up_process(lc->log_kthread);
682 } else if (block->flags & LOG_FUA_FLAG) {
683 list_add_tail(&block->list, &lc->logging_blocks);
684 wake_up_process(lc->log_kthread);
685 } else
686 list_add_tail(&block->list, &lc->unflushed_blocks);
687 spin_unlock_irqrestore(&lc->blocks_lock, flags);
688 }
689
690 return error;
691}
692
693/*
694 * INFO format: <logged entries> <highest allocated sector>
695 */
696static void log_writes_status(struct dm_target *ti, status_type_t type,
697 unsigned status_flags, char *result,
698 unsigned maxlen)
699{
700 unsigned sz = 0;
701 struct log_writes_c *lc = ti->private;
702
703 switch (type) {
704 case STATUSTYPE_INFO:
705 DMEMIT("%llu %llu", lc->logged_entries,
706 (unsigned long long)lc->next_sector - 1);
707 if (!lc->logging_enabled)
708 DMEMIT(" logging_disabled");
709 break;
710
711 case STATUSTYPE_TABLE:
712 DMEMIT("%s %s", lc->dev->name, lc->logdev->name);
713 break;
714 }
715}
716
717static int log_writes_prepare_ioctl(struct dm_target *ti,
718 struct block_device **bdev, fmode_t *mode)
719{
720 struct log_writes_c *lc = ti->private;
721 struct dm_dev *dev = lc->dev;
722
723 *bdev = dev->bdev;
724 /*
725 * Only pass ioctls through if the device sizes match exactly.
726 */
727 if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
728 return 1;
729 return 0;
730}
731
732static int log_writes_iterate_devices(struct dm_target *ti,
733 iterate_devices_callout_fn fn,
734 void *data)
735{
736 struct log_writes_c *lc = ti->private;
737
738 return fn(ti, lc->dev, 0, ti->len, data);
739}
740
741/*
742 * Messages supported:
743 * mark <mark data> - specify the marked data.
744 */
745static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv)
746{
747 int r = -EINVAL;
748 struct log_writes_c *lc = ti->private;
749
750 if (argc != 2) {
751 DMWARN("Invalid log-writes message arguments, expect 2 arguments, got %d", argc);
752 return r;
753 }
754
755 if (!strcasecmp(argv[0], "mark"))
756 r = log_mark(lc, argv[1]);
757 else
758 DMWARN("Unrecognised log writes target message received: %s", argv[0]);
759
760 return r;
761}
762
763static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits)
764{
765 struct log_writes_c *lc = ti->private;
766 struct request_queue *q = bdev_get_queue(lc->dev->bdev);
767
768 if (!q || !blk_queue_discard(q)) {
769 lc->device_supports_discard = false;
770 limits->discard_granularity = 1 << SECTOR_SHIFT;
771 limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
772 }
773}
774
775static struct target_type log_writes_target = {
776 .name = "log-writes",
777 .version = {1, 0, 0},
778 .module = THIS_MODULE,
779 .ctr = log_writes_ctr,
780 .dtr = log_writes_dtr,
781 .map = log_writes_map,
782 .end_io = normal_end_io,
783 .status = log_writes_status,
784 .prepare_ioctl = log_writes_prepare_ioctl,
785 .message = log_writes_message,
786 .iterate_devices = log_writes_iterate_devices,
787 .io_hints = log_writes_io_hints,
788};
789
790static int __init dm_log_writes_init(void)
791{
792 int r = dm_register_target(&log_writes_target);
793
794 if (r < 0)
795 DMERR("register failed %d", r);
796
797 return r;
798}
799
800static void __exit dm_log_writes_exit(void)
801{
802 dm_unregister_target(&log_writes_target);
803}
804
805module_init(dm_log_writes_init);
806module_exit(dm_log_writes_exit);
807
808MODULE_DESCRIPTION(DM_NAME " log writes target");
809MODULE_AUTHOR("Josef Bacik <jbacik@fb.com>");
810MODULE_LICENSE("GPL");