Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2018 Red Hat. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/vmalloc.h>
  12#include <linux/kthread.h>
  13#include <linux/dm-io.h>
  14#include <linux/dm-kcopyd.h>
  15#include <linux/dax.h>
  16#include <linux/pfn_t.h>
  17#include <linux/libnvdimm.h>
  18
  19#define DM_MSG_PREFIX "writecache"
  20
  21#define HIGH_WATERMARK			50
  22#define LOW_WATERMARK			45
  23#define MAX_WRITEBACK_JOBS		0
  24#define ENDIO_LATENCY			16
  25#define WRITEBACK_LATENCY		64
  26#define AUTOCOMMIT_BLOCKS_SSD		65536
  27#define AUTOCOMMIT_BLOCKS_PMEM		64
  28#define AUTOCOMMIT_MSEC			1000
  29
  30#define BITMAP_GRANULARITY	65536
  31#if BITMAP_GRANULARITY < PAGE_SIZE
  32#undef BITMAP_GRANULARITY
  33#define BITMAP_GRANULARITY	PAGE_SIZE
  34#endif
  35
  36#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
  37#define DM_WRITECACHE_HAS_PMEM
  38#endif
  39
  40#ifdef DM_WRITECACHE_HAS_PMEM
  41#define pmem_assign(dest, src)					\
  42do {								\
  43	typeof(dest) uniq = (src);				\
  44	memcpy_flushcache(&(dest), &uniq, sizeof(dest));	\
  45} while (0)
  46#else
  47#define pmem_assign(dest, src)	((dest) = (src))
  48#endif
  49
  50#if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
  51#define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
  52#endif
  53
  54#define MEMORY_SUPERBLOCK_MAGIC		0x23489321
  55#define MEMORY_SUPERBLOCK_VERSION	1
  56
  57struct wc_memory_entry {
  58	__le64 original_sector;
  59	__le64 seq_count;
  60};
  61
  62struct wc_memory_superblock {
  63	union {
  64		struct {
  65			__le32 magic;
  66			__le32 version;
  67			__le32 block_size;
  68			__le32 pad;
  69			__le64 n_blocks;
  70			__le64 seq_count;
  71		};
  72		__le64 padding[8];
  73	};
  74	struct wc_memory_entry entries[0];
  75};
  76
  77struct wc_entry {
  78	struct rb_node rb_node;
  79	struct list_head lru;
  80	unsigned short wc_list_contiguous;
  81	bool write_in_progress
  82#if BITS_PER_LONG == 64
  83		:1
  84#endif
  85	;
  86	unsigned long index
  87#if BITS_PER_LONG == 64
  88		:47
  89#endif
  90	;
  91#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
  92	uint64_t original_sector;
  93	uint64_t seq_count;
  94#endif
  95};
  96
  97#ifdef DM_WRITECACHE_HAS_PMEM
  98#define WC_MODE_PMEM(wc)			((wc)->pmem_mode)
  99#define WC_MODE_FUA(wc)				((wc)->writeback_fua)
 100#else
 101#define WC_MODE_PMEM(wc)			false
 102#define WC_MODE_FUA(wc)				false
 103#endif
 104#define WC_MODE_SORT_FREELIST(wc)		(!WC_MODE_PMEM(wc))
 105
 106struct dm_writecache {
 107	struct mutex lock;
 108	struct list_head lru;
 109	union {
 110		struct list_head freelist;
 111		struct {
 112			struct rb_root freetree;
 113			struct wc_entry *current_free;
 114		};
 115	};
 116	struct rb_root tree;
 117
 118	size_t freelist_size;
 119	size_t writeback_size;
 120	size_t freelist_high_watermark;
 121	size_t freelist_low_watermark;
 122
 123	unsigned uncommitted_blocks;
 124	unsigned autocommit_blocks;
 125	unsigned max_writeback_jobs;
 126
 127	int error;
 128
 129	unsigned long autocommit_jiffies;
 130	struct timer_list autocommit_timer;
 131	struct wait_queue_head freelist_wait;
 132
 133	atomic_t bio_in_progress[2];
 134	struct wait_queue_head bio_in_progress_wait[2];
 135
 136	struct dm_target *ti;
 137	struct dm_dev *dev;
 138	struct dm_dev *ssd_dev;
 139	sector_t start_sector;
 140	void *memory_map;
 141	uint64_t memory_map_size;
 142	size_t metadata_sectors;
 143	size_t n_blocks;
 144	uint64_t seq_count;
 145	void *block_start;
 146	struct wc_entry *entries;
 147	unsigned block_size;
 148	unsigned char block_size_bits;
 149
 150	bool pmem_mode:1;
 151	bool writeback_fua:1;
 152
 153	bool overwrote_committed:1;
 154	bool memory_vmapped:1;
 155
 156	bool high_wm_percent_set:1;
 157	bool low_wm_percent_set:1;
 158	bool max_writeback_jobs_set:1;
 159	bool autocommit_blocks_set:1;
 160	bool autocommit_time_set:1;
 161	bool writeback_fua_set:1;
 162	bool flush_on_suspend:1;
 163
 164	unsigned writeback_all;
 165	struct workqueue_struct *writeback_wq;
 166	struct work_struct writeback_work;
 167	struct work_struct flush_work;
 168
 169	struct dm_io_client *dm_io;
 170
 171	raw_spinlock_t endio_list_lock;
 172	struct list_head endio_list;
 173	struct task_struct *endio_thread;
 174
 175	struct task_struct *flush_thread;
 176	struct bio_list flush_list;
 177
 178	struct dm_kcopyd_client *dm_kcopyd;
 179	unsigned long *dirty_bitmap;
 180	unsigned dirty_bitmap_size;
 181
 182	struct bio_set bio_set;
 183	mempool_t copy_pool;
 184};
 185
 186#define WB_LIST_INLINE		16
 187
 188struct writeback_struct {
 189	struct list_head endio_entry;
 190	struct dm_writecache *wc;
 191	struct wc_entry **wc_list;
 192	unsigned wc_list_n;
 193	struct wc_entry *wc_list_inline[WB_LIST_INLINE];
 194	struct bio bio;
 195};
 196
 197struct copy_struct {
 198	struct list_head endio_entry;
 199	struct dm_writecache *wc;
 200	struct wc_entry *e;
 201	unsigned n_entries;
 202	int error;
 203};
 204
 205DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
 206					    "A percentage of time allocated for data copying");
 207
 208static void wc_lock(struct dm_writecache *wc)
 209{
 210	mutex_lock(&wc->lock);
 211}
 212
 213static void wc_unlock(struct dm_writecache *wc)
 214{
 215	mutex_unlock(&wc->lock);
 216}
 217
 218#ifdef DM_WRITECACHE_HAS_PMEM
 219static int persistent_memory_claim(struct dm_writecache *wc)
 220{
 221	int r;
 222	loff_t s;
 223	long p, da;
 224	pfn_t pfn;
 225	int id;
 226	struct page **pages;
 227
 228	wc->memory_vmapped = false;
 229
 230	if (!wc->ssd_dev->dax_dev) {
 231		r = -EOPNOTSUPP;
 232		goto err1;
 233	}
 234	s = wc->memory_map_size;
 235	p = s >> PAGE_SHIFT;
 236	if (!p) {
 237		r = -EINVAL;
 238		goto err1;
 239	}
 240	if (p != s >> PAGE_SHIFT) {
 241		r = -EOVERFLOW;
 242		goto err1;
 243	}
 244
 245	id = dax_read_lock();
 246
 247	da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
 248	if (da < 0) {
 249		wc->memory_map = NULL;
 250		r = da;
 251		goto err2;
 252	}
 253	if (!pfn_t_has_page(pfn)) {
 254		wc->memory_map = NULL;
 255		r = -EOPNOTSUPP;
 256		goto err2;
 257	}
 258	if (da != p) {
 259		long i;
 260		wc->memory_map = NULL;
 261		pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
 262		if (!pages) {
 263			r = -ENOMEM;
 264			goto err2;
 265		}
 266		i = 0;
 267		do {
 268			long daa;
 269			daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
 270						NULL, &pfn);
 271			if (daa <= 0) {
 272				r = daa ? daa : -EINVAL;
 273				goto err3;
 274			}
 275			if (!pfn_t_has_page(pfn)) {
 276				r = -EOPNOTSUPP;
 277				goto err3;
 278			}
 279			while (daa-- && i < p) {
 280				pages[i++] = pfn_t_to_page(pfn);
 281				pfn.val++;
 282			}
 283		} while (i < p);
 284		wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
 285		if (!wc->memory_map) {
 286			r = -ENOMEM;
 287			goto err3;
 288		}
 289		kvfree(pages);
 290		wc->memory_vmapped = true;
 291	}
 292
 293	dax_read_unlock(id);
 294
 295	wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
 296	wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
 297
 298	return 0;
 299err3:
 300	kvfree(pages);
 301err2:
 302	dax_read_unlock(id);
 303err1:
 304	return r;
 305}
 306#else
 307static int persistent_memory_claim(struct dm_writecache *wc)
 308{
 309	BUG();
 310}
 311#endif
 312
 313static void persistent_memory_release(struct dm_writecache *wc)
 314{
 315	if (wc->memory_vmapped)
 316		vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
 317}
 318
 319static struct page *persistent_memory_page(void *addr)
 320{
 321	if (is_vmalloc_addr(addr))
 322		return vmalloc_to_page(addr);
 323	else
 324		return virt_to_page(addr);
 325}
 326
 327static unsigned persistent_memory_page_offset(void *addr)
 328{
 329	return (unsigned long)addr & (PAGE_SIZE - 1);
 330}
 331
 332static void persistent_memory_flush_cache(void *ptr, size_t size)
 333{
 334	if (is_vmalloc_addr(ptr))
 335		flush_kernel_vmap_range(ptr, size);
 336}
 337
 338static void persistent_memory_invalidate_cache(void *ptr, size_t size)
 339{
 340	if (is_vmalloc_addr(ptr))
 341		invalidate_kernel_vmap_range(ptr, size);
 342}
 343
 344static struct wc_memory_superblock *sb(struct dm_writecache *wc)
 345{
 346	return wc->memory_map;
 347}
 348
 349static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
 350{
 351	return &sb(wc)->entries[e->index];
 352}
 353
 354static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
 355{
 356	return (char *)wc->block_start + (e->index << wc->block_size_bits);
 357}
 358
 359static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
 360{
 361	return wc->start_sector + wc->metadata_sectors +
 362		((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
 363}
 364
 365static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
 366{
 367#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
 368	return e->original_sector;
 369#else
 370	return le64_to_cpu(memory_entry(wc, e)->original_sector);
 371#endif
 372}
 373
 374static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
 375{
 376#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
 377	return e->seq_count;
 378#else
 379	return le64_to_cpu(memory_entry(wc, e)->seq_count);
 380#endif
 381}
 382
 383static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
 384{
 385#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
 386	e->seq_count = -1;
 387#endif
 388	pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
 389}
 390
 391static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
 392					    uint64_t original_sector, uint64_t seq_count)
 393{
 394	struct wc_memory_entry me;
 395#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
 396	e->original_sector = original_sector;
 397	e->seq_count = seq_count;
 398#endif
 399	me.original_sector = cpu_to_le64(original_sector);
 400	me.seq_count = cpu_to_le64(seq_count);
 401	pmem_assign(*memory_entry(wc, e), me);
 402}
 403
 404#define writecache_error(wc, err, msg, arg...)				\
 405do {									\
 406	if (!cmpxchg(&(wc)->error, 0, err))				\
 407		DMERR(msg, ##arg);					\
 408	wake_up(&(wc)->freelist_wait);					\
 409} while (0)
 410
 411#define writecache_has_error(wc)	(unlikely(READ_ONCE((wc)->error)))
 412
 413static void writecache_flush_all_metadata(struct dm_writecache *wc)
 414{
 415	if (!WC_MODE_PMEM(wc))
 416		memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
 417}
 418
 419static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
 420{
 421	if (!WC_MODE_PMEM(wc))
 422		__set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
 423			  wc->dirty_bitmap);
 424}
 425
 426static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
 427
 428struct io_notify {
 429	struct dm_writecache *wc;
 430	struct completion c;
 431	atomic_t count;
 432};
 433
 434static void writecache_notify_io(unsigned long error, void *context)
 435{
 436	struct io_notify *endio = context;
 437
 438	if (unlikely(error != 0))
 439		writecache_error(endio->wc, -EIO, "error writing metadata");
 440	BUG_ON(atomic_read(&endio->count) <= 0);
 441	if (atomic_dec_and_test(&endio->count))
 442		complete(&endio->c);
 443}
 444
 445static void ssd_commit_flushed(struct dm_writecache *wc)
 446{
 447	struct dm_io_region region;
 448	struct dm_io_request req;
 449	struct io_notify endio = {
 450		wc,
 451		COMPLETION_INITIALIZER_ONSTACK(endio.c),
 452		ATOMIC_INIT(1),
 453	};
 454	unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
 455	unsigned i = 0;
 456
 457	while (1) {
 458		unsigned j;
 459		i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
 460		if (unlikely(i == bitmap_bits))
 461			break;
 462		j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
 463
 464		region.bdev = wc->ssd_dev->bdev;
 465		region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
 466		region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
 467
 468		if (unlikely(region.sector >= wc->metadata_sectors))
 469			break;
 470		if (unlikely(region.sector + region.count > wc->metadata_sectors))
 471			region.count = wc->metadata_sectors - region.sector;
 472
 473		region.sector += wc->start_sector;
 474		atomic_inc(&endio.count);
 475		req.bi_op = REQ_OP_WRITE;
 476		req.bi_op_flags = REQ_SYNC;
 477		req.mem.type = DM_IO_VMA;
 478		req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
 479		req.client = wc->dm_io;
 480		req.notify.fn = writecache_notify_io;
 481		req.notify.context = &endio;
 482
 483		/* writing via async dm-io (implied by notify.fn above) won't return an error */
 484	        (void) dm_io(&req, 1, &region, NULL);
 485		i = j;
 486	}
 487
 488	writecache_notify_io(0, &endio);
 489	wait_for_completion_io(&endio.c);
 490
 491	writecache_disk_flush(wc, wc->ssd_dev);
 492
 493	memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
 494}
 495
 496static void writecache_commit_flushed(struct dm_writecache *wc)
 497{
 498	if (WC_MODE_PMEM(wc))
 499		wmb();
 500	else
 501		ssd_commit_flushed(wc);
 502}
 503
 504static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
 505{
 506	int r;
 507	struct dm_io_region region;
 508	struct dm_io_request req;
 509
 510	region.bdev = dev->bdev;
 511	region.sector = 0;
 512	region.count = 0;
 513	req.bi_op = REQ_OP_WRITE;
 514	req.bi_op_flags = REQ_PREFLUSH;
 515	req.mem.type = DM_IO_KMEM;
 516	req.mem.ptr.addr = NULL;
 517	req.client = wc->dm_io;
 518	req.notify.fn = NULL;
 519
 520	r = dm_io(&req, 1, &region, NULL);
 521	if (unlikely(r))
 522		writecache_error(wc, r, "error flushing metadata: %d", r);
 523}
 524
 525static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
 526{
 527	wait_event(wc->bio_in_progress_wait[direction],
 528		   !atomic_read(&wc->bio_in_progress[direction]));
 529}
 530
 531#define WFE_RETURN_FOLLOWING	1
 532#define WFE_LOWEST_SEQ		2
 533
 534static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
 535					      uint64_t block, int flags)
 536{
 537	struct wc_entry *e;
 538	struct rb_node *node = wc->tree.rb_node;
 539
 540	if (unlikely(!node))
 541		return NULL;
 542
 543	while (1) {
 544		e = container_of(node, struct wc_entry, rb_node);
 545		if (read_original_sector(wc, e) == block)
 546			break;
 547
 548		node = (read_original_sector(wc, e) >= block ?
 549			e->rb_node.rb_left : e->rb_node.rb_right);
 550		if (unlikely(!node)) {
 551			if (!(flags & WFE_RETURN_FOLLOWING))
 552				return NULL;
 553			if (read_original_sector(wc, e) >= block) {
 554				return e;
 555			} else {
 556				node = rb_next(&e->rb_node);
 557				if (unlikely(!node))
 558					return NULL;
 559				e = container_of(node, struct wc_entry, rb_node);
 560				return e;
 561			}
 562		}
 563	}
 564
 565	while (1) {
 566		struct wc_entry *e2;
 567		if (flags & WFE_LOWEST_SEQ)
 568			node = rb_prev(&e->rb_node);
 569		else
 570			node = rb_next(&e->rb_node);
 571		if (unlikely(!node))
 572			return e;
 573		e2 = container_of(node, struct wc_entry, rb_node);
 574		if (read_original_sector(wc, e2) != block)
 575			return e;
 576		e = e2;
 577	}
 578}
 579
 580static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
 581{
 582	struct wc_entry *e;
 583	struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
 584
 585	while (*node) {
 586		e = container_of(*node, struct wc_entry, rb_node);
 587		parent = &e->rb_node;
 588		if (read_original_sector(wc, e) > read_original_sector(wc, ins))
 589			node = &parent->rb_left;
 590		else
 591			node = &parent->rb_right;
 592	}
 593	rb_link_node(&ins->rb_node, parent, node);
 594	rb_insert_color(&ins->rb_node, &wc->tree);
 595	list_add(&ins->lru, &wc->lru);
 596}
 597
 598static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
 599{
 600	list_del(&e->lru);
 601	rb_erase(&e->rb_node, &wc->tree);
 602}
 603
 604static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
 605{
 606	if (WC_MODE_SORT_FREELIST(wc)) {
 607		struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
 608		if (unlikely(!*node))
 609			wc->current_free = e;
 610		while (*node) {
 611			parent = *node;
 612			if (&e->rb_node < *node)
 613				node = &parent->rb_left;
 614			else
 615				node = &parent->rb_right;
 616		}
 617		rb_link_node(&e->rb_node, parent, node);
 618		rb_insert_color(&e->rb_node, &wc->freetree);
 619	} else {
 620		list_add_tail(&e->lru, &wc->freelist);
 621	}
 622	wc->freelist_size++;
 623}
 624
 625static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
 626{
 627	struct wc_entry *e;
 628
 629	if (WC_MODE_SORT_FREELIST(wc)) {
 630		struct rb_node *next;
 631		if (unlikely(!wc->current_free))
 632			return NULL;
 633		e = wc->current_free;
 634		next = rb_next(&e->rb_node);
 635		rb_erase(&e->rb_node, &wc->freetree);
 636		if (unlikely(!next))
 637			next = rb_first(&wc->freetree);
 638		wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
 639	} else {
 640		if (unlikely(list_empty(&wc->freelist)))
 641			return NULL;
 642		e = container_of(wc->freelist.next, struct wc_entry, lru);
 643		list_del(&e->lru);
 644	}
 645	wc->freelist_size--;
 646	if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
 647		queue_work(wc->writeback_wq, &wc->writeback_work);
 648
 649	return e;
 650}
 651
 652static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
 653{
 654	writecache_unlink(wc, e);
 655	writecache_add_to_freelist(wc, e);
 656	clear_seq_count(wc, e);
 657	writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
 658	if (unlikely(waitqueue_active(&wc->freelist_wait)))
 659		wake_up(&wc->freelist_wait);
 660}
 661
 662static void writecache_wait_on_freelist(struct dm_writecache *wc)
 663{
 664	DEFINE_WAIT(wait);
 665
 666	prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
 667	wc_unlock(wc);
 668	io_schedule();
 669	finish_wait(&wc->freelist_wait, &wait);
 670	wc_lock(wc);
 671}
 672
 673static void writecache_poison_lists(struct dm_writecache *wc)
 674{
 675	/*
 676	 * Catch incorrect access to these values while the device is suspended.
 677	 */
 678	memset(&wc->tree, -1, sizeof wc->tree);
 679	wc->lru.next = LIST_POISON1;
 680	wc->lru.prev = LIST_POISON2;
 681	wc->freelist.next = LIST_POISON1;
 682	wc->freelist.prev = LIST_POISON2;
 683}
 684
 685static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
 686{
 687	writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
 688	if (WC_MODE_PMEM(wc))
 689		writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
 690}
 691
 692static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
 693{
 694	return read_seq_count(wc, e) < wc->seq_count;
 695}
 696
 697static void writecache_flush(struct dm_writecache *wc)
 698{
 699	struct wc_entry *e, *e2;
 700	bool need_flush_after_free;
 701
 702	wc->uncommitted_blocks = 0;
 703	del_timer(&wc->autocommit_timer);
 704
 705	if (list_empty(&wc->lru))
 706		return;
 707
 708	e = container_of(wc->lru.next, struct wc_entry, lru);
 709	if (writecache_entry_is_committed(wc, e)) {
 710		if (wc->overwrote_committed) {
 711			writecache_wait_for_ios(wc, WRITE);
 712			writecache_disk_flush(wc, wc->ssd_dev);
 713			wc->overwrote_committed = false;
 714		}
 715		return;
 716	}
 717	while (1) {
 718		writecache_flush_entry(wc, e);
 719		if (unlikely(e->lru.next == &wc->lru))
 720			break;
 721		e2 = container_of(e->lru.next, struct wc_entry, lru);
 722		if (writecache_entry_is_committed(wc, e2))
 723			break;
 724		e = e2;
 725		cond_resched();
 726	}
 727	writecache_commit_flushed(wc);
 728
 729	if (!WC_MODE_PMEM(wc))
 730		writecache_wait_for_ios(wc, WRITE);
 731
 732	wc->seq_count++;
 733	pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
 734	writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
 735	writecache_commit_flushed(wc);
 736
 737	wc->overwrote_committed = false;
 738
 739	need_flush_after_free = false;
 740	while (1) {
 741		/* Free another committed entry with lower seq-count */
 742		struct rb_node *rb_node = rb_prev(&e->rb_node);
 743
 744		if (rb_node) {
 745			e2 = container_of(rb_node, struct wc_entry, rb_node);
 746			if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
 747			    likely(!e2->write_in_progress)) {
 748				writecache_free_entry(wc, e2);
 749				need_flush_after_free = true;
 750			}
 751		}
 752		if (unlikely(e->lru.prev == &wc->lru))
 753			break;
 754		e = container_of(e->lru.prev, struct wc_entry, lru);
 755		cond_resched();
 756	}
 757
 758	if (need_flush_after_free)
 759		writecache_commit_flushed(wc);
 760}
 761
 762static void writecache_flush_work(struct work_struct *work)
 763{
 764	struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
 765
 766	wc_lock(wc);
 767	writecache_flush(wc);
 768	wc_unlock(wc);
 769}
 770
 771static void writecache_autocommit_timer(struct timer_list *t)
 772{
 773	struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
 774	if (!writecache_has_error(wc))
 775		queue_work(wc->writeback_wq, &wc->flush_work);
 776}
 777
 778static void writecache_schedule_autocommit(struct dm_writecache *wc)
 779{
 780	if (!timer_pending(&wc->autocommit_timer))
 781		mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
 782}
 783
 784static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
 785{
 786	struct wc_entry *e;
 787	bool discarded_something = false;
 788
 789	e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
 790	if (unlikely(!e))
 791		return;
 792
 793	while (read_original_sector(wc, e) < end) {
 794		struct rb_node *node = rb_next(&e->rb_node);
 795
 796		if (likely(!e->write_in_progress)) {
 797			if (!discarded_something) {
 798				writecache_wait_for_ios(wc, READ);
 799				writecache_wait_for_ios(wc, WRITE);
 800				discarded_something = true;
 801			}
 802			writecache_free_entry(wc, e);
 803		}
 804
 805		if (unlikely(!node))
 806			break;
 807
 808		e = container_of(node, struct wc_entry, rb_node);
 809	}
 810
 811	if (discarded_something)
 812		writecache_commit_flushed(wc);
 813}
 814
 815static bool writecache_wait_for_writeback(struct dm_writecache *wc)
 816{
 817	if (wc->writeback_size) {
 818		writecache_wait_on_freelist(wc);
 819		return true;
 820	}
 821	return false;
 822}
 823
 824static void writecache_suspend(struct dm_target *ti)
 825{
 826	struct dm_writecache *wc = ti->private;
 827	bool flush_on_suspend;
 828
 829	del_timer_sync(&wc->autocommit_timer);
 830
 831	wc_lock(wc);
 832	writecache_flush(wc);
 833	flush_on_suspend = wc->flush_on_suspend;
 834	if (flush_on_suspend) {
 835		wc->flush_on_suspend = false;
 836		wc->writeback_all++;
 837		queue_work(wc->writeback_wq, &wc->writeback_work);
 838	}
 839	wc_unlock(wc);
 840
 841	flush_workqueue(wc->writeback_wq);
 842
 843	wc_lock(wc);
 844	if (flush_on_suspend)
 845		wc->writeback_all--;
 846	while (writecache_wait_for_writeback(wc));
 847
 848	if (WC_MODE_PMEM(wc))
 849		persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
 850
 851	writecache_poison_lists(wc);
 852
 853	wc_unlock(wc);
 854}
 855
 856static int writecache_alloc_entries(struct dm_writecache *wc)
 857{
 858	size_t b;
 859
 860	if (wc->entries)
 861		return 0;
 862	wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
 863	if (!wc->entries)
 864		return -ENOMEM;
 865	for (b = 0; b < wc->n_blocks; b++) {
 866		struct wc_entry *e = &wc->entries[b];
 867		e->index = b;
 868		e->write_in_progress = false;
 869	}
 870
 871	return 0;
 872}
 873
 874static void writecache_resume(struct dm_target *ti)
 875{
 876	struct dm_writecache *wc = ti->private;
 877	size_t b;
 878	bool need_flush = false;
 879	__le64 sb_seq_count;
 880	int r;
 881
 882	wc_lock(wc);
 883
 884	if (WC_MODE_PMEM(wc))
 885		persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
 886
 887	wc->tree = RB_ROOT;
 888	INIT_LIST_HEAD(&wc->lru);
 889	if (WC_MODE_SORT_FREELIST(wc)) {
 890		wc->freetree = RB_ROOT;
 891		wc->current_free = NULL;
 892	} else {
 893		INIT_LIST_HEAD(&wc->freelist);
 894	}
 895	wc->freelist_size = 0;
 896
 897	r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
 898	if (r) {
 899		writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
 900		sb_seq_count = cpu_to_le64(0);
 901	}
 902	wc->seq_count = le64_to_cpu(sb_seq_count);
 903
 904#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
 905	for (b = 0; b < wc->n_blocks; b++) {
 906		struct wc_entry *e = &wc->entries[b];
 907		struct wc_memory_entry wme;
 908		if (writecache_has_error(wc)) {
 909			e->original_sector = -1;
 910			e->seq_count = -1;
 911			continue;
 912		}
 913		r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
 914		if (r) {
 915			writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
 916					 (unsigned long)b, r);
 917			e->original_sector = -1;
 918			e->seq_count = -1;
 919		} else {
 920			e->original_sector = le64_to_cpu(wme.original_sector);
 921			e->seq_count = le64_to_cpu(wme.seq_count);
 922		}
 923	}
 924#endif
 925	for (b = 0; b < wc->n_blocks; b++) {
 926		struct wc_entry *e = &wc->entries[b];
 927		if (!writecache_entry_is_committed(wc, e)) {
 928			if (read_seq_count(wc, e) != -1) {
 929erase_this:
 930				clear_seq_count(wc, e);
 931				need_flush = true;
 932			}
 933			writecache_add_to_freelist(wc, e);
 934		} else {
 935			struct wc_entry *old;
 936
 937			old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
 938			if (!old) {
 939				writecache_insert_entry(wc, e);
 940			} else {
 941				if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
 942					writecache_error(wc, -EINVAL,
 943						 "two identical entries, position %llu, sector %llu, sequence %llu",
 944						 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
 945						 (unsigned long long)read_seq_count(wc, e));
 946				}
 947				if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
 948					goto erase_this;
 949				} else {
 950					writecache_free_entry(wc, old);
 951					writecache_insert_entry(wc, e);
 952					need_flush = true;
 953				}
 954			}
 955		}
 956		cond_resched();
 957	}
 958
 959	if (need_flush) {
 960		writecache_flush_all_metadata(wc);
 961		writecache_commit_flushed(wc);
 962	}
 963
 964	wc_unlock(wc);
 965}
 966
 967static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
 968{
 969	if (argc != 1)
 970		return -EINVAL;
 971
 972	wc_lock(wc);
 973	if (dm_suspended(wc->ti)) {
 974		wc_unlock(wc);
 975		return -EBUSY;
 976	}
 977	if (writecache_has_error(wc)) {
 978		wc_unlock(wc);
 979		return -EIO;
 980	}
 981
 982	writecache_flush(wc);
 983	wc->writeback_all++;
 984	queue_work(wc->writeback_wq, &wc->writeback_work);
 985	wc_unlock(wc);
 986
 987	flush_workqueue(wc->writeback_wq);
 988
 989	wc_lock(wc);
 990	wc->writeback_all--;
 991	if (writecache_has_error(wc)) {
 992		wc_unlock(wc);
 993		return -EIO;
 994	}
 995	wc_unlock(wc);
 996
 997	return 0;
 998}
 999
1000static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1001{
1002	if (argc != 1)
1003		return -EINVAL;
1004
1005	wc_lock(wc);
1006	wc->flush_on_suspend = true;
1007	wc_unlock(wc);
1008
1009	return 0;
1010}
1011
1012static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1013			      char *result, unsigned maxlen)
1014{
1015	int r = -EINVAL;
1016	struct dm_writecache *wc = ti->private;
1017
1018	if (!strcasecmp(argv[0], "flush"))
1019		r = process_flush_mesg(argc, argv, wc);
1020	else if (!strcasecmp(argv[0], "flush_on_suspend"))
1021		r = process_flush_on_suspend_mesg(argc, argv, wc);
1022	else
1023		DMERR("unrecognised message received: %s", argv[0]);
1024
1025	return r;
1026}
1027
1028static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1029{
1030	void *buf;
1031	unsigned long flags;
1032	unsigned size;
1033	int rw = bio_data_dir(bio);
1034	unsigned remaining_size = wc->block_size;
1035
1036	do {
1037		struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1038		buf = bvec_kmap_irq(&bv, &flags);
1039		size = bv.bv_len;
1040		if (unlikely(size > remaining_size))
1041			size = remaining_size;
1042
1043		if (rw == READ) {
1044			int r;
1045			r = memcpy_mcsafe(buf, data, size);
1046			flush_dcache_page(bio_page(bio));
1047			if (unlikely(r)) {
1048				writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1049				bio->bi_status = BLK_STS_IOERR;
1050			}
1051		} else {
1052			flush_dcache_page(bio_page(bio));
1053			memcpy_flushcache(data, buf, size);
1054		}
1055
1056		bvec_kunmap_irq(buf, &flags);
1057
1058		data = (char *)data + size;
1059		remaining_size -= size;
1060		bio_advance(bio, size);
1061	} while (unlikely(remaining_size));
1062}
1063
1064static int writecache_flush_thread(void *data)
1065{
1066	struct dm_writecache *wc = data;
1067
1068	while (1) {
1069		struct bio *bio;
1070
1071		wc_lock(wc);
1072		bio = bio_list_pop(&wc->flush_list);
1073		if (!bio) {
1074			set_current_state(TASK_INTERRUPTIBLE);
1075			wc_unlock(wc);
1076
1077			if (unlikely(kthread_should_stop())) {
1078				set_current_state(TASK_RUNNING);
1079				break;
1080			}
1081
1082			schedule();
1083			continue;
1084		}
1085
1086		if (bio_op(bio) == REQ_OP_DISCARD) {
1087			writecache_discard(wc, bio->bi_iter.bi_sector,
1088					   bio_end_sector(bio));
1089			wc_unlock(wc);
1090			bio_set_dev(bio, wc->dev->bdev);
1091			generic_make_request(bio);
1092		} else {
1093			writecache_flush(wc);
1094			wc_unlock(wc);
1095			if (writecache_has_error(wc))
1096				bio->bi_status = BLK_STS_IOERR;
1097			bio_endio(bio);
1098		}
1099	}
1100
1101	return 0;
1102}
1103
1104static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1105{
1106	if (bio_list_empty(&wc->flush_list))
1107		wake_up_process(wc->flush_thread);
1108	bio_list_add(&wc->flush_list, bio);
1109}
1110
1111static int writecache_map(struct dm_target *ti, struct bio *bio)
1112{
1113	struct wc_entry *e;
1114	struct dm_writecache *wc = ti->private;
1115
1116	bio->bi_private = NULL;
1117
1118	wc_lock(wc);
1119
1120	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1121		if (writecache_has_error(wc))
1122			goto unlock_error;
1123		if (WC_MODE_PMEM(wc)) {
1124			writecache_flush(wc);
1125			if (writecache_has_error(wc))
1126				goto unlock_error;
1127			goto unlock_submit;
1128		} else {
1129			writecache_offload_bio(wc, bio);
1130			goto unlock_return;
1131		}
1132	}
1133
1134	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1135
1136	if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1137				(wc->block_size / 512 - 1)) != 0)) {
1138		DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1139		      (unsigned long long)bio->bi_iter.bi_sector,
1140		      bio->bi_iter.bi_size, wc->block_size);
1141		goto unlock_error;
1142	}
1143
1144	if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1145		if (writecache_has_error(wc))
1146			goto unlock_error;
1147		if (WC_MODE_PMEM(wc)) {
1148			writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1149			goto unlock_remap_origin;
1150		} else {
1151			writecache_offload_bio(wc, bio);
1152			goto unlock_return;
1153		}
1154	}
1155
1156	if (bio_data_dir(bio) == READ) {
1157read_next_block:
1158		e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1159		if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1160			if (WC_MODE_PMEM(wc)) {
1161				bio_copy_block(wc, bio, memory_data(wc, e));
1162				if (bio->bi_iter.bi_size)
1163					goto read_next_block;
1164				goto unlock_submit;
1165			} else {
1166				dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1167				bio_set_dev(bio, wc->ssd_dev->bdev);
1168				bio->bi_iter.bi_sector = cache_sector(wc, e);
1169				if (!writecache_entry_is_committed(wc, e))
1170					writecache_wait_for_ios(wc, WRITE);
1171				goto unlock_remap;
1172			}
1173		} else {
1174			if (e) {
1175				sector_t next_boundary =
1176					read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1177				if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1178					dm_accept_partial_bio(bio, next_boundary);
1179				}
1180			}
1181			goto unlock_remap_origin;
1182		}
1183	} else {
1184		do {
1185			if (writecache_has_error(wc))
1186				goto unlock_error;
1187			e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1188			if (e) {
1189				if (!writecache_entry_is_committed(wc, e))
1190					goto bio_copy;
1191				if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1192					wc->overwrote_committed = true;
1193					goto bio_copy;
1194				}
1195			}
1196			e = writecache_pop_from_freelist(wc);
1197			if (unlikely(!e)) {
1198				writecache_wait_on_freelist(wc);
1199				continue;
1200			}
1201			write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1202			writecache_insert_entry(wc, e);
1203			wc->uncommitted_blocks++;
1204bio_copy:
1205			if (WC_MODE_PMEM(wc)) {
1206				bio_copy_block(wc, bio, memory_data(wc, e));
1207			} else {
1208				dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1209				bio_set_dev(bio, wc->ssd_dev->bdev);
1210				bio->bi_iter.bi_sector = cache_sector(wc, e);
1211				if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1212					wc->uncommitted_blocks = 0;
1213					queue_work(wc->writeback_wq, &wc->flush_work);
1214				} else {
1215					writecache_schedule_autocommit(wc);
1216				}
1217				goto unlock_remap;
1218			}
1219		} while (bio->bi_iter.bi_size);
1220
1221		if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks))
1222			writecache_flush(wc);
1223		else
1224			writecache_schedule_autocommit(wc);
1225		goto unlock_submit;
1226	}
1227
1228unlock_remap_origin:
1229	bio_set_dev(bio, wc->dev->bdev);
1230	wc_unlock(wc);
1231	return DM_MAPIO_REMAPPED;
1232
1233unlock_remap:
1234	/* make sure that writecache_end_io decrements bio_in_progress: */
1235	bio->bi_private = (void *)1;
1236	atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1237	wc_unlock(wc);
1238	return DM_MAPIO_REMAPPED;
1239
1240unlock_submit:
1241	wc_unlock(wc);
1242	bio_endio(bio);
1243	return DM_MAPIO_SUBMITTED;
1244
1245unlock_return:
1246	wc_unlock(wc);
1247	return DM_MAPIO_SUBMITTED;
1248
1249unlock_error:
1250	wc_unlock(wc);
1251	bio_io_error(bio);
1252	return DM_MAPIO_SUBMITTED;
1253}
1254
1255static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1256{
1257	struct dm_writecache *wc = ti->private;
1258
1259	if (bio->bi_private != NULL) {
1260		int dir = bio_data_dir(bio);
1261		if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1262			if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1263				wake_up(&wc->bio_in_progress_wait[dir]);
1264	}
1265	return 0;
1266}
1267
1268static int writecache_iterate_devices(struct dm_target *ti,
1269				      iterate_devices_callout_fn fn, void *data)
1270{
1271	struct dm_writecache *wc = ti->private;
1272
1273	return fn(ti, wc->dev, 0, ti->len, data);
1274}
1275
1276static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1277{
1278	struct dm_writecache *wc = ti->private;
1279
1280	if (limits->logical_block_size < wc->block_size)
1281		limits->logical_block_size = wc->block_size;
1282
1283	if (limits->physical_block_size < wc->block_size)
1284		limits->physical_block_size = wc->block_size;
1285
1286	if (limits->io_min < wc->block_size)
1287		limits->io_min = wc->block_size;
1288}
1289
1290
1291static void writecache_writeback_endio(struct bio *bio)
1292{
1293	struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1294	struct dm_writecache *wc = wb->wc;
1295	unsigned long flags;
1296
1297	raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1298	if (unlikely(list_empty(&wc->endio_list)))
1299		wake_up_process(wc->endio_thread);
1300	list_add_tail(&wb->endio_entry, &wc->endio_list);
1301	raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1302}
1303
1304static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1305{
1306	struct copy_struct *c = ptr;
1307	struct dm_writecache *wc = c->wc;
1308
1309	c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1310
1311	raw_spin_lock_irq(&wc->endio_list_lock);
1312	if (unlikely(list_empty(&wc->endio_list)))
1313		wake_up_process(wc->endio_thread);
1314	list_add_tail(&c->endio_entry, &wc->endio_list);
1315	raw_spin_unlock_irq(&wc->endio_list_lock);
1316}
1317
1318static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1319{
1320	unsigned i;
1321	struct writeback_struct *wb;
1322	struct wc_entry *e;
1323	unsigned long n_walked = 0;
1324
1325	do {
1326		wb = list_entry(list->next, struct writeback_struct, endio_entry);
1327		list_del(&wb->endio_entry);
1328
1329		if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1330			writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1331					"write error %d", wb->bio.bi_status);
1332		i = 0;
1333		do {
1334			e = wb->wc_list[i];
1335			BUG_ON(!e->write_in_progress);
1336			e->write_in_progress = false;
1337			INIT_LIST_HEAD(&e->lru);
1338			if (!writecache_has_error(wc))
1339				writecache_free_entry(wc, e);
1340			BUG_ON(!wc->writeback_size);
1341			wc->writeback_size--;
1342			n_walked++;
1343			if (unlikely(n_walked >= ENDIO_LATENCY)) {
1344				writecache_commit_flushed(wc);
1345				wc_unlock(wc);
1346				wc_lock(wc);
1347				n_walked = 0;
1348			}
1349		} while (++i < wb->wc_list_n);
1350
1351		if (wb->wc_list != wb->wc_list_inline)
1352			kfree(wb->wc_list);
1353		bio_put(&wb->bio);
1354	} while (!list_empty(list));
1355}
1356
1357static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1358{
1359	struct copy_struct *c;
1360	struct wc_entry *e;
1361
1362	do {
1363		c = list_entry(list->next, struct copy_struct, endio_entry);
1364		list_del(&c->endio_entry);
1365
1366		if (unlikely(c->error))
1367			writecache_error(wc, c->error, "copy error");
1368
1369		e = c->e;
1370		do {
1371			BUG_ON(!e->write_in_progress);
1372			e->write_in_progress = false;
1373			INIT_LIST_HEAD(&e->lru);
1374			if (!writecache_has_error(wc))
1375				writecache_free_entry(wc, e);
1376
1377			BUG_ON(!wc->writeback_size);
1378			wc->writeback_size--;
1379			e++;
1380		} while (--c->n_entries);
1381		mempool_free(c, &wc->copy_pool);
1382	} while (!list_empty(list));
1383}
1384
1385static int writecache_endio_thread(void *data)
1386{
1387	struct dm_writecache *wc = data;
1388
1389	while (1) {
1390		struct list_head list;
1391
1392		raw_spin_lock_irq(&wc->endio_list_lock);
1393		if (!list_empty(&wc->endio_list))
1394			goto pop_from_list;
1395		set_current_state(TASK_INTERRUPTIBLE);
1396		raw_spin_unlock_irq(&wc->endio_list_lock);
1397
1398		if (unlikely(kthread_should_stop())) {
1399			set_current_state(TASK_RUNNING);
1400			break;
1401		}
1402
1403		schedule();
1404
1405		continue;
1406
1407pop_from_list:
1408		list = wc->endio_list;
1409		list.next->prev = list.prev->next = &list;
1410		INIT_LIST_HEAD(&wc->endio_list);
1411		raw_spin_unlock_irq(&wc->endio_list_lock);
1412
1413		if (!WC_MODE_FUA(wc))
1414			writecache_disk_flush(wc, wc->dev);
1415
1416		wc_lock(wc);
1417
1418		if (WC_MODE_PMEM(wc)) {
1419			__writecache_endio_pmem(wc, &list);
1420		} else {
1421			__writecache_endio_ssd(wc, &list);
1422			writecache_wait_for_ios(wc, READ);
1423		}
1424
1425		writecache_commit_flushed(wc);
1426
1427		wc_unlock(wc);
1428	}
1429
1430	return 0;
1431}
1432
1433static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1434{
1435	struct dm_writecache *wc = wb->wc;
1436	unsigned block_size = wc->block_size;
1437	void *address = memory_data(wc, e);
1438
1439	persistent_memory_flush_cache(address, block_size);
1440	return bio_add_page(&wb->bio, persistent_memory_page(address),
1441			    block_size, persistent_memory_page_offset(address)) != 0;
1442}
1443
1444struct writeback_list {
1445	struct list_head list;
1446	size_t size;
1447};
1448
1449static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1450{
1451	if (unlikely(wc->max_writeback_jobs)) {
1452		if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1453			wc_lock(wc);
1454			while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1455				writecache_wait_on_freelist(wc);
1456			wc_unlock(wc);
1457		}
1458	}
1459	cond_resched();
1460}
1461
1462static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1463{
1464	struct wc_entry *e, *f;
1465	struct bio *bio;
1466	struct writeback_struct *wb;
1467	unsigned max_pages;
1468
1469	while (wbl->size) {
1470		wbl->size--;
1471		e = container_of(wbl->list.prev, struct wc_entry, lru);
1472		list_del(&e->lru);
1473
1474		max_pages = e->wc_list_contiguous;
1475
1476		bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1477		wb = container_of(bio, struct writeback_struct, bio);
1478		wb->wc = wc;
1479		bio->bi_end_io = writecache_writeback_endio;
1480		bio_set_dev(bio, wc->dev->bdev);
1481		bio->bi_iter.bi_sector = read_original_sector(wc, e);
1482		if (max_pages <= WB_LIST_INLINE ||
1483		    unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1484							   GFP_NOIO | __GFP_NORETRY |
1485							   __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1486			wb->wc_list = wb->wc_list_inline;
1487			max_pages = WB_LIST_INLINE;
1488		}
1489
1490		BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1491
1492		wb->wc_list[0] = e;
1493		wb->wc_list_n = 1;
1494
1495		while (wbl->size && wb->wc_list_n < max_pages) {
1496			f = container_of(wbl->list.prev, struct wc_entry, lru);
1497			if (read_original_sector(wc, f) !=
1498			    read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1499				break;
1500			if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1501				break;
1502			wbl->size--;
1503			list_del(&f->lru);
1504			wb->wc_list[wb->wc_list_n++] = f;
1505			e = f;
1506		}
1507		bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
1508		if (writecache_has_error(wc)) {
1509			bio->bi_status = BLK_STS_IOERR;
1510			bio_endio(bio);
1511		} else {
1512			submit_bio(bio);
1513		}
1514
1515		__writeback_throttle(wc, wbl);
1516	}
1517}
1518
1519static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1520{
1521	struct wc_entry *e, *f;
1522	struct dm_io_region from, to;
1523	struct copy_struct *c;
1524
1525	while (wbl->size) {
1526		unsigned n_sectors;
1527
1528		wbl->size--;
1529		e = container_of(wbl->list.prev, struct wc_entry, lru);
1530		list_del(&e->lru);
1531
1532		n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1533
1534		from.bdev = wc->ssd_dev->bdev;
1535		from.sector = cache_sector(wc, e);
1536		from.count = n_sectors;
1537		to.bdev = wc->dev->bdev;
1538		to.sector = read_original_sector(wc, e);
1539		to.count = n_sectors;
1540
1541		c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1542		c->wc = wc;
1543		c->e = e;
1544		c->n_entries = e->wc_list_contiguous;
1545
1546		while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1547			wbl->size--;
1548			f = container_of(wbl->list.prev, struct wc_entry, lru);
1549			BUG_ON(f != e + 1);
1550			list_del(&f->lru);
1551			e = f;
1552		}
1553
1554		dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1555
1556		__writeback_throttle(wc, wbl);
1557	}
1558}
1559
1560static void writecache_writeback(struct work_struct *work)
1561{
1562	struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1563	struct blk_plug plug;
1564	struct wc_entry *f, *g, *e = NULL;
1565	struct rb_node *node, *next_node;
1566	struct list_head skipped;
1567	struct writeback_list wbl;
1568	unsigned long n_walked;
1569
1570	wc_lock(wc);
1571restart:
1572	if (writecache_has_error(wc)) {
1573		wc_unlock(wc);
1574		return;
1575	}
1576
1577	if (unlikely(wc->writeback_all)) {
1578		if (writecache_wait_for_writeback(wc))
1579			goto restart;
1580	}
1581
1582	if (wc->overwrote_committed) {
1583		writecache_wait_for_ios(wc, WRITE);
1584	}
1585
1586	n_walked = 0;
1587	INIT_LIST_HEAD(&skipped);
1588	INIT_LIST_HEAD(&wbl.list);
1589	wbl.size = 0;
1590	while (!list_empty(&wc->lru) &&
1591	       (wc->writeback_all ||
1592		wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark)) {
1593
1594		n_walked++;
1595		if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1596		    likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1597			queue_work(wc->writeback_wq, &wc->writeback_work);
1598			break;
1599		}
1600
1601		if (unlikely(wc->writeback_all)) {
1602			if (unlikely(!e)) {
1603				writecache_flush(wc);
1604				e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1605			} else
1606				e = g;
1607		} else
1608			e = container_of(wc->lru.prev, struct wc_entry, lru);
1609		BUG_ON(e->write_in_progress);
1610		if (unlikely(!writecache_entry_is_committed(wc, e))) {
1611			writecache_flush(wc);
1612		}
1613		node = rb_prev(&e->rb_node);
1614		if (node) {
1615			f = container_of(node, struct wc_entry, rb_node);
1616			if (unlikely(read_original_sector(wc, f) ==
1617				     read_original_sector(wc, e))) {
1618				BUG_ON(!f->write_in_progress);
1619				list_del(&e->lru);
1620				list_add(&e->lru, &skipped);
1621				cond_resched();
1622				continue;
1623			}
1624		}
1625		wc->writeback_size++;
1626		list_del(&e->lru);
1627		list_add(&e->lru, &wbl.list);
1628		wbl.size++;
1629		e->write_in_progress = true;
1630		e->wc_list_contiguous = 1;
1631
1632		f = e;
1633
1634		while (1) {
1635			next_node = rb_next(&f->rb_node);
1636			if (unlikely(!next_node))
1637				break;
1638			g = container_of(next_node, struct wc_entry, rb_node);
1639			if (unlikely(read_original_sector(wc, g) ==
1640			    read_original_sector(wc, f))) {
1641				f = g;
1642				continue;
1643			}
1644			if (read_original_sector(wc, g) !=
1645			    read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1646				break;
1647			if (unlikely(g->write_in_progress))
1648				break;
1649			if (unlikely(!writecache_entry_is_committed(wc, g)))
1650				break;
1651
1652			if (!WC_MODE_PMEM(wc)) {
1653				if (g != f + 1)
1654					break;
1655			}
1656
1657			n_walked++;
1658			//if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1659			//	break;
1660
1661			wc->writeback_size++;
1662			list_del(&g->lru);
1663			list_add(&g->lru, &wbl.list);
1664			wbl.size++;
1665			g->write_in_progress = true;
1666			g->wc_list_contiguous = BIO_MAX_PAGES;
1667			f = g;
1668			e->wc_list_contiguous++;
1669			if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
1670				if (unlikely(wc->writeback_all)) {
1671					next_node = rb_next(&f->rb_node);
1672					if (likely(next_node))
1673						g = container_of(next_node, struct wc_entry, rb_node);
1674				}
1675				break;
1676			}
1677		}
1678		cond_resched();
1679	}
1680
1681	if (!list_empty(&skipped)) {
1682		list_splice_tail(&skipped, &wc->lru);
1683		/*
1684		 * If we didn't do any progress, we must wait until some
1685		 * writeback finishes to avoid burning CPU in a loop
1686		 */
1687		if (unlikely(!wbl.size))
1688			writecache_wait_for_writeback(wc);
1689	}
1690
1691	wc_unlock(wc);
1692
1693	blk_start_plug(&plug);
1694
1695	if (WC_MODE_PMEM(wc))
1696		__writecache_writeback_pmem(wc, &wbl);
1697	else
1698		__writecache_writeback_ssd(wc, &wbl);
1699
1700	blk_finish_plug(&plug);
1701
1702	if (unlikely(wc->writeback_all)) {
1703		wc_lock(wc);
1704		while (writecache_wait_for_writeback(wc));
1705		wc_unlock(wc);
1706	}
1707}
1708
1709static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1710				 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1711{
1712	uint64_t n_blocks, offset;
1713	struct wc_entry e;
1714
1715	n_blocks = device_size;
1716	do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1717
1718	while (1) {
1719		if (!n_blocks)
1720			return -ENOSPC;
1721		/* Verify the following entries[n_blocks] won't overflow */
1722		if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1723				 sizeof(struct wc_memory_entry)))
1724			return -EFBIG;
1725		offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1726		offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1727		if (offset + n_blocks * block_size <= device_size)
1728			break;
1729		n_blocks--;
1730	}
1731
1732	/* check if the bit field overflows */
1733	e.index = n_blocks;
1734	if (e.index != n_blocks)
1735		return -EFBIG;
1736
1737	if (n_blocks_p)
1738		*n_blocks_p = n_blocks;
1739	if (n_metadata_blocks_p)
1740		*n_metadata_blocks_p = offset >> __ffs(block_size);
1741	return 0;
1742}
1743
1744static int init_memory(struct dm_writecache *wc)
1745{
1746	size_t b;
1747	int r;
1748
1749	r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
1750	if (r)
1751		return r;
1752
1753	r = writecache_alloc_entries(wc);
1754	if (r)
1755		return r;
1756
1757	for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
1758		pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
1759	pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
1760	pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
1761	pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
1762	pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
1763
1764	for (b = 0; b < wc->n_blocks; b++)
1765		write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
1766
1767	writecache_flush_all_metadata(wc);
1768	writecache_commit_flushed(wc);
1769	pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
1770	writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
1771	writecache_commit_flushed(wc);
1772
1773	return 0;
1774}
1775
1776static void writecache_dtr(struct dm_target *ti)
1777{
1778	struct dm_writecache *wc = ti->private;
1779
1780	if (!wc)
1781		return;
1782
1783	if (wc->endio_thread)
1784		kthread_stop(wc->endio_thread);
1785
1786	if (wc->flush_thread)
1787		kthread_stop(wc->flush_thread);
1788
1789	bioset_exit(&wc->bio_set);
1790
1791	mempool_exit(&wc->copy_pool);
1792
1793	if (wc->writeback_wq)
1794		destroy_workqueue(wc->writeback_wq);
1795
1796	if (wc->dev)
1797		dm_put_device(ti, wc->dev);
1798
1799	if (wc->ssd_dev)
1800		dm_put_device(ti, wc->ssd_dev);
1801
1802	if (wc->entries)
1803		vfree(wc->entries);
1804
1805	if (wc->memory_map) {
1806		if (WC_MODE_PMEM(wc))
1807			persistent_memory_release(wc);
1808		else
1809			vfree(wc->memory_map);
1810	}
1811
1812	if (wc->dm_kcopyd)
1813		dm_kcopyd_client_destroy(wc->dm_kcopyd);
1814
1815	if (wc->dm_io)
1816		dm_io_client_destroy(wc->dm_io);
1817
1818	if (wc->dirty_bitmap)
1819		vfree(wc->dirty_bitmap);
1820
1821	kfree(wc);
1822}
1823
1824static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1825{
1826	struct dm_writecache *wc;
1827	struct dm_arg_set as;
1828	const char *string;
1829	unsigned opt_params;
1830	size_t offset, data_size;
1831	int i, r;
1832	char dummy;
1833	int high_wm_percent = HIGH_WATERMARK;
1834	int low_wm_percent = LOW_WATERMARK;
1835	uint64_t x;
1836	struct wc_memory_superblock s;
1837
1838	static struct dm_arg _args[] = {
1839		{0, 10, "Invalid number of feature args"},
1840	};
1841
1842	as.argc = argc;
1843	as.argv = argv;
1844
1845	wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
1846	if (!wc) {
1847		ti->error = "Cannot allocate writecache structure";
1848		r = -ENOMEM;
1849		goto bad;
1850	}
1851	ti->private = wc;
1852	wc->ti = ti;
1853
1854	mutex_init(&wc->lock);
1855	writecache_poison_lists(wc);
1856	init_waitqueue_head(&wc->freelist_wait);
1857	timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
1858
1859	for (i = 0; i < 2; i++) {
1860		atomic_set(&wc->bio_in_progress[i], 0);
1861		init_waitqueue_head(&wc->bio_in_progress_wait[i]);
1862	}
1863
1864	wc->dm_io = dm_io_client_create();
1865	if (IS_ERR(wc->dm_io)) {
1866		r = PTR_ERR(wc->dm_io);
1867		ti->error = "Unable to allocate dm-io client";
1868		wc->dm_io = NULL;
1869		goto bad;
1870	}
1871
1872	wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
1873	if (!wc->writeback_wq) {
1874		r = -ENOMEM;
1875		ti->error = "Could not allocate writeback workqueue";
1876		goto bad;
1877	}
1878	INIT_WORK(&wc->writeback_work, writecache_writeback);
1879	INIT_WORK(&wc->flush_work, writecache_flush_work);
1880
1881	raw_spin_lock_init(&wc->endio_list_lock);
1882	INIT_LIST_HEAD(&wc->endio_list);
1883	wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
1884	if (IS_ERR(wc->endio_thread)) {
1885		r = PTR_ERR(wc->endio_thread);
1886		wc->endio_thread = NULL;
1887		ti->error = "Couldn't spawn endio thread";
1888		goto bad;
1889	}
1890	wake_up_process(wc->endio_thread);
1891
1892	/*
1893	 * Parse the mode (pmem or ssd)
1894	 */
1895	string = dm_shift_arg(&as);
1896	if (!string)
1897		goto bad_arguments;
1898
1899	if (!strcasecmp(string, "s")) {
1900		wc->pmem_mode = false;
1901	} else if (!strcasecmp(string, "p")) {
1902#ifdef DM_WRITECACHE_HAS_PMEM
1903		wc->pmem_mode = true;
1904		wc->writeback_fua = true;
1905#else
1906		/*
1907		 * If the architecture doesn't support persistent memory or
1908		 * the kernel doesn't support any DAX drivers, this driver can
1909		 * only be used in SSD-only mode.
1910		 */
1911		r = -EOPNOTSUPP;
1912		ti->error = "Persistent memory or DAX not supported on this system";
1913		goto bad;
1914#endif
1915	} else {
1916		goto bad_arguments;
1917	}
1918
1919	if (WC_MODE_PMEM(wc)) {
1920		r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
1921				offsetof(struct writeback_struct, bio),
1922				BIOSET_NEED_BVECS);
1923		if (r) {
1924			ti->error = "Could not allocate bio set";
1925			goto bad;
1926		}
1927	} else {
1928		r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
1929		if (r) {
1930			ti->error = "Could not allocate mempool";
1931			goto bad;
1932		}
1933	}
1934
1935	/*
1936	 * Parse the origin data device
1937	 */
1938	string = dm_shift_arg(&as);
1939	if (!string)
1940		goto bad_arguments;
1941	r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
1942	if (r) {
1943		ti->error = "Origin data device lookup failed";
1944		goto bad;
1945	}
1946
1947	/*
1948	 * Parse cache data device (be it pmem or ssd)
1949	 */
1950	string = dm_shift_arg(&as);
1951	if (!string)
1952		goto bad_arguments;
1953
1954	r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
1955	if (r) {
1956		ti->error = "Cache data device lookup failed";
1957		goto bad;
1958	}
1959	wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
1960
1961	/*
1962	 * Parse the cache block size
1963	 */
1964	string = dm_shift_arg(&as);
1965	if (!string)
1966		goto bad_arguments;
1967	if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
1968	    wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
1969	    (wc->block_size & (wc->block_size - 1))) {
1970		r = -EINVAL;
1971		ti->error = "Invalid block size";
1972		goto bad;
1973	}
1974	wc->block_size_bits = __ffs(wc->block_size);
1975
1976	wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
1977	wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
1978	wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
1979
1980	/*
1981	 * Parse optional arguments
1982	 */
1983	r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1984	if (r)
1985		goto bad;
1986
1987	while (opt_params) {
1988		string = dm_shift_arg(&as), opt_params--;
1989		if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
1990			unsigned long long start_sector;
1991			string = dm_shift_arg(&as), opt_params--;
1992			if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
1993				goto invalid_optional;
1994			wc->start_sector = start_sector;
1995			if (wc->start_sector != start_sector ||
1996			    wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
1997				goto invalid_optional;
1998		} else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
1999			string = dm_shift_arg(&as), opt_params--;
2000			if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2001				goto invalid_optional;
2002			if (high_wm_percent < 0 || high_wm_percent > 100)
2003				goto invalid_optional;
2004			wc->high_wm_percent_set = true;
2005		} else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2006			string = dm_shift_arg(&as), opt_params--;
2007			if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2008				goto invalid_optional;
2009			if (low_wm_percent < 0 || low_wm_percent > 100)
2010				goto invalid_optional;
2011			wc->low_wm_percent_set = true;
2012		} else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2013			string = dm_shift_arg(&as), opt_params--;
2014			if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2015				goto invalid_optional;
2016			wc->max_writeback_jobs_set = true;
2017		} else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2018			string = dm_shift_arg(&as), opt_params--;
2019			if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2020				goto invalid_optional;
2021			wc->autocommit_blocks_set = true;
2022		} else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2023			unsigned autocommit_msecs;
2024			string = dm_shift_arg(&as), opt_params--;
2025			if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2026				goto invalid_optional;
2027			if (autocommit_msecs > 3600000)
2028				goto invalid_optional;
2029			wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2030			wc->autocommit_time_set = true;
2031		} else if (!strcasecmp(string, "fua")) {
2032			if (WC_MODE_PMEM(wc)) {
2033				wc->writeback_fua = true;
2034				wc->writeback_fua_set = true;
2035			} else goto invalid_optional;
2036		} else if (!strcasecmp(string, "nofua")) {
2037			if (WC_MODE_PMEM(wc)) {
2038				wc->writeback_fua = false;
2039				wc->writeback_fua_set = true;
2040			} else goto invalid_optional;
2041		} else {
2042invalid_optional:
2043			r = -EINVAL;
2044			ti->error = "Invalid optional argument";
2045			goto bad;
2046		}
2047	}
2048
2049	if (high_wm_percent < low_wm_percent) {
2050		r = -EINVAL;
2051		ti->error = "High watermark must be greater than or equal to low watermark";
2052		goto bad;
2053	}
2054
2055	if (WC_MODE_PMEM(wc)) {
2056		r = persistent_memory_claim(wc);
2057		if (r) {
2058			ti->error = "Unable to map persistent memory for cache";
2059			goto bad;
2060		}
2061	} else {
2062		struct dm_io_region region;
2063		struct dm_io_request req;
2064		size_t n_blocks, n_metadata_blocks;
2065		uint64_t n_bitmap_bits;
2066
2067		wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2068
2069		bio_list_init(&wc->flush_list);
2070		wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2071		if (IS_ERR(wc->flush_thread)) {
2072			r = PTR_ERR(wc->flush_thread);
2073			wc->flush_thread = NULL;
2074			ti->error = "Couldn't spawn flush thread";
2075			goto bad;
2076		}
2077		wake_up_process(wc->flush_thread);
2078
2079		r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2080					  &n_blocks, &n_metadata_blocks);
2081		if (r) {
2082			ti->error = "Invalid device size";
2083			goto bad;
2084		}
2085
2086		n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2087				 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2088		/* this is limitation of test_bit functions */
2089		if (n_bitmap_bits > 1U << 31) {
2090			r = -EFBIG;
2091			ti->error = "Invalid device size";
2092			goto bad;
2093		}
2094
2095		wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2096		if (!wc->memory_map) {
2097			r = -ENOMEM;
2098			ti->error = "Unable to allocate memory for metadata";
2099			goto bad;
2100		}
2101
2102		wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2103		if (IS_ERR(wc->dm_kcopyd)) {
2104			r = PTR_ERR(wc->dm_kcopyd);
2105			ti->error = "Unable to allocate dm-kcopyd client";
2106			wc->dm_kcopyd = NULL;
2107			goto bad;
2108		}
2109
2110		wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2111		wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2112			BITS_PER_LONG * sizeof(unsigned long);
2113		wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2114		if (!wc->dirty_bitmap) {
2115			r = -ENOMEM;
2116			ti->error = "Unable to allocate dirty bitmap";
2117			goto bad;
2118		}
2119
2120		region.bdev = wc->ssd_dev->bdev;
2121		region.sector = wc->start_sector;
2122		region.count = wc->metadata_sectors;
2123		req.bi_op = REQ_OP_READ;
2124		req.bi_op_flags = REQ_SYNC;
2125		req.mem.type = DM_IO_VMA;
2126		req.mem.ptr.vma = (char *)wc->memory_map;
2127		req.client = wc->dm_io;
2128		req.notify.fn = NULL;
2129
2130		r = dm_io(&req, 1, &region, NULL);
2131		if (r) {
2132			ti->error = "Unable to read metadata";
2133			goto bad;
2134		}
2135	}
2136
2137	r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2138	if (r) {
2139		ti->error = "Hardware memory error when reading superblock";
2140		goto bad;
2141	}
2142	if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2143		r = init_memory(wc);
2144		if (r) {
2145			ti->error = "Unable to initialize device";
2146			goto bad;
2147		}
2148		r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2149		if (r) {
2150			ti->error = "Hardware memory error when reading superblock";
2151			goto bad;
2152		}
2153	}
2154
2155	if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2156		ti->error = "Invalid magic in the superblock";
2157		r = -EINVAL;
2158		goto bad;
2159	}
2160
2161	if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2162		ti->error = "Invalid version in the superblock";
2163		r = -EINVAL;
2164		goto bad;
2165	}
2166
2167	if (le32_to_cpu(s.block_size) != wc->block_size) {
2168		ti->error = "Block size does not match superblock";
2169		r = -EINVAL;
2170		goto bad;
2171	}
2172
2173	wc->n_blocks = le64_to_cpu(s.n_blocks);
2174
2175	offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2176	if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2177overflow:
2178		ti->error = "Overflow in size calculation";
2179		r = -EINVAL;
2180		goto bad;
2181	}
2182	offset += sizeof(struct wc_memory_superblock);
2183	if (offset < sizeof(struct wc_memory_superblock))
2184		goto overflow;
2185	offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2186	data_size = wc->n_blocks * (size_t)wc->block_size;
2187	if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2188	    (offset + data_size < offset))
2189		goto overflow;
2190	if (offset + data_size > wc->memory_map_size) {
2191		ti->error = "Memory area is too small";
2192		r = -EINVAL;
2193		goto bad;
2194	}
2195
2196	wc->metadata_sectors = offset >> SECTOR_SHIFT;
2197	wc->block_start = (char *)sb(wc) + offset;
2198
2199	x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2200	x += 50;
2201	do_div(x, 100);
2202	wc->freelist_high_watermark = x;
2203	x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2204	x += 50;
2205	do_div(x, 100);
2206	wc->freelist_low_watermark = x;
2207
2208	r = writecache_alloc_entries(wc);
2209	if (r) {
2210		ti->error = "Cannot allocate memory";
2211		goto bad;
2212	}
2213
2214	ti->num_flush_bios = 1;
2215	ti->flush_supported = true;
2216	ti->num_discard_bios = 1;
2217
2218	if (WC_MODE_PMEM(wc))
2219		persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2220
2221	return 0;
2222
2223bad_arguments:
2224	r = -EINVAL;
2225	ti->error = "Bad arguments";
2226bad:
2227	writecache_dtr(ti);
2228	return r;
2229}
2230
2231static void writecache_status(struct dm_target *ti, status_type_t type,
2232			      unsigned status_flags, char *result, unsigned maxlen)
2233{
2234	struct dm_writecache *wc = ti->private;
2235	unsigned extra_args;
2236	unsigned sz = 0;
2237	uint64_t x;
2238
2239	switch (type) {
2240	case STATUSTYPE_INFO:
2241		DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2242		       (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2243		       (unsigned long long)wc->writeback_size);
2244		break;
2245	case STATUSTYPE_TABLE:
2246		DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2247				wc->dev->name, wc->ssd_dev->name, wc->block_size);
2248		extra_args = 0;
2249		if (wc->start_sector)
2250			extra_args += 2;
2251		if (wc->high_wm_percent_set)
2252			extra_args += 2;
2253		if (wc->low_wm_percent_set)
2254			extra_args += 2;
2255		if (wc->max_writeback_jobs_set)
2256			extra_args += 2;
2257		if (wc->autocommit_blocks_set)
2258			extra_args += 2;
2259		if (wc->autocommit_time_set)
2260			extra_args += 2;
2261		if (wc->writeback_fua_set)
2262			extra_args++;
2263
2264		DMEMIT("%u", extra_args);
2265		if (wc->start_sector)
2266			DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
2267		if (wc->high_wm_percent_set) {
2268			x = (uint64_t)wc->freelist_high_watermark * 100;
2269			x += wc->n_blocks / 2;
2270			do_div(x, (size_t)wc->n_blocks);
2271			DMEMIT(" high_watermark %u", 100 - (unsigned)x);
2272		}
2273		if (wc->low_wm_percent_set) {
2274			x = (uint64_t)wc->freelist_low_watermark * 100;
2275			x += wc->n_blocks / 2;
2276			do_div(x, (size_t)wc->n_blocks);
2277			DMEMIT(" low_watermark %u", 100 - (unsigned)x);
2278		}
2279		if (wc->max_writeback_jobs_set)
2280			DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2281		if (wc->autocommit_blocks_set)
2282			DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2283		if (wc->autocommit_time_set)
2284			DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
2285		if (wc->writeback_fua_set)
2286			DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2287		break;
2288	}
2289}
2290
2291static struct target_type writecache_target = {
2292	.name			= "writecache",
2293	.version		= {1, 1, 1},
2294	.module			= THIS_MODULE,
2295	.ctr			= writecache_ctr,
2296	.dtr			= writecache_dtr,
2297	.status			= writecache_status,
2298	.postsuspend		= writecache_suspend,
2299	.resume			= writecache_resume,
2300	.message		= writecache_message,
2301	.map			= writecache_map,
2302	.end_io			= writecache_end_io,
2303	.iterate_devices	= writecache_iterate_devices,
2304	.io_hints		= writecache_io_hints,
2305};
2306
2307static int __init dm_writecache_init(void)
2308{
2309	int r;
2310
2311	r = dm_register_target(&writecache_target);
2312	if (r < 0) {
2313		DMERR("register failed %d", r);
2314		return r;
2315	}
2316
2317	return 0;
2318}
2319
2320static void __exit dm_writecache_exit(void)
2321{
2322	dm_unregister_target(&writecache_target);
2323}
2324
2325module_init(dm_writecache_init);
2326module_exit(dm_writecache_exit);
2327
2328MODULE_DESCRIPTION(DM_NAME " writecache target");
2329MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2330MODULE_LICENSE("GPL");