Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v4.10.11
   1/*
   2 * fs/dax.c - Direct Access filesystem code
   3 * Copyright (c) 2013-2014 Intel Corporation
   4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 */
  16
  17#include <linux/atomic.h>
  18#include <linux/blkdev.h>
  19#include <linux/buffer_head.h>
  20#include <linux/dax.h>
  21#include <linux/fs.h>
  22#include <linux/genhd.h>
  23#include <linux/highmem.h>
  24#include <linux/memcontrol.h>
  25#include <linux/mm.h>
  26#include <linux/mutex.h>
  27#include <linux/pagevec.h>
  28#include <linux/pmem.h>
  29#include <linux/sched.h>
 
  30#include <linux/uio.h>
  31#include <linux/vmstat.h>
  32#include <linux/pfn_t.h>
  33#include <linux/sizes.h>
  34#include <linux/mmu_notifier.h>
  35#include <linux/iomap.h>
  36#include "internal.h"
  37
 
 
 
  38/* We choose 4096 entries - same as per-zone page wait tables */
  39#define DAX_WAIT_TABLE_BITS 12
  40#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
  41
 
 
 
 
  42static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
  43
  44static int __init init_dax_wait_table(void)
  45{
  46	int i;
  47
  48	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
  49		init_waitqueue_head(wait_table + i);
  50	return 0;
  51}
  52fs_initcall(init_dax_wait_table);
  53
  54static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
  55{
  56	struct request_queue *q = bdev->bd_queue;
  57	long rc = -EIO;
 
 
 
 
 
 
 
 
 
 
  58
  59	dax->addr = ERR_PTR(-EIO);
  60	if (blk_queue_enter(q, true) != 0)
  61		return rc;
 
  62
  63	rc = bdev_direct_access(bdev, dax);
  64	if (rc < 0) {
  65		dax->addr = ERR_PTR(rc);
  66		blk_queue_exit(q);
  67		return rc;
  68	}
  69	return rc;
  70}
  71
  72static void dax_unmap_atomic(struct block_device *bdev,
  73		const struct blk_dax_ctl *dax)
  74{
  75	if (IS_ERR(dax->addr))
  76		return;
  77	blk_queue_exit(bdev->bd_queue);
  78}
  79
  80static int dax_is_pmd_entry(void *entry)
  81{
  82	return (unsigned long)entry & RADIX_DAX_PMD;
  83}
  84
  85static int dax_is_pte_entry(void *entry)
  86{
  87	return !((unsigned long)entry & RADIX_DAX_PMD);
  88}
  89
  90static int dax_is_zero_entry(void *entry)
  91{
  92	return (unsigned long)entry & RADIX_DAX_HZP;
  93}
  94
  95static int dax_is_empty_entry(void *entry)
  96{
  97	return (unsigned long)entry & RADIX_DAX_EMPTY;
  98}
  99
 100struct page *read_dax_sector(struct block_device *bdev, sector_t n)
 101{
 102	struct page *page = alloc_pages(GFP_KERNEL, 0);
 103	struct blk_dax_ctl dax = {
 104		.size = PAGE_SIZE,
 105		.sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
 106	};
 107	long rc;
 108
 109	if (!page)
 110		return ERR_PTR(-ENOMEM);
 111
 112	rc = dax_map_atomic(bdev, &dax);
 113	if (rc < 0)
 114		return ERR_PTR(rc);
 115	memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
 116	dax_unmap_atomic(bdev, &dax);
 117	return page;
 118}
 119
 120/*
 121 * DAX radix tree locking
 122 */
 123struct exceptional_entry_key {
 124	struct address_space *mapping;
 125	pgoff_t entry_start;
 126};
 127
 128struct wait_exceptional_entry_queue {
 129	wait_queue_t wait;
 130	struct exceptional_entry_key key;
 131};
 132
 133static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
 134		pgoff_t index, void *entry, struct exceptional_entry_key *key)
 135{
 136	unsigned long hash;
 137
 138	/*
 139	 * If 'entry' is a PMD, align the 'index' that we use for the wait
 140	 * queue to the start of that PMD.  This ensures that all offsets in
 141	 * the range covered by the PMD map to the same bit lock.
 142	 */
 143	if (dax_is_pmd_entry(entry))
 144		index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
 145
 146	key->mapping = mapping;
 147	key->entry_start = index;
 148
 149	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
 150	return wait_table + hash;
 151}
 152
 153static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
 154				       int sync, void *keyp)
 155{
 156	struct exceptional_entry_key *key = keyp;
 157	struct wait_exceptional_entry_queue *ewait =
 158		container_of(wait, struct wait_exceptional_entry_queue, wait);
 159
 160	if (key->mapping != ewait->key.mapping ||
 161	    key->entry_start != ewait->key.entry_start)
 162		return 0;
 163	return autoremove_wake_function(wait, mode, sync, NULL);
 164}
 165
 166/*
 167 * Check whether the given slot is locked. The function must be called with
 168 * mapping->tree_lock held
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 169 */
 170static inline int slot_locked(struct address_space *mapping, void **slot)
 171{
 172	unsigned long entry = (unsigned long)
 173		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
 174	return entry & RADIX_DAX_ENTRY_LOCK;
 175}
 176
 177/*
 178 * Mark the given slot is locked. The function must be called with
 179 * mapping->tree_lock held
 180 */
 181static inline void *lock_slot(struct address_space *mapping, void **slot)
 182{
 183	unsigned long entry = (unsigned long)
 184		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
 185
 186	entry |= RADIX_DAX_ENTRY_LOCK;
 187	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
 188	return (void *)entry;
 189}
 190
 191/*
 192 * Mark the given slot is unlocked. The function must be called with
 193 * mapping->tree_lock held
 194 */
 195static inline void *unlock_slot(struct address_space *mapping, void **slot)
 196{
 197	unsigned long entry = (unsigned long)
 198		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
 199
 200	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
 201	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
 202	return (void *)entry;
 203}
 204
 205/*
 206 * Lookup entry in radix tree, wait for it to become unlocked if it is
 207 * exceptional entry and return it. The caller must call
 208 * put_unlocked_mapping_entry() when he decided not to lock the entry or
 209 * put_locked_mapping_entry() when he locked the entry and now wants to
 210 * unlock it.
 211 *
 212 * The function must be called with mapping->tree_lock held.
 213 */
 214static void *get_unlocked_mapping_entry(struct address_space *mapping,
 215					pgoff_t index, void ***slotp)
 216{
 217	void *entry, **slot;
 218	struct wait_exceptional_entry_queue ewait;
 219	wait_queue_head_t *wq;
 220
 221	init_wait(&ewait.wait);
 222	ewait.wait.func = wake_exceptional_entry_func;
 223
 224	for (;;) {
 225		entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
 226					  &slot);
 227		if (!entry || !radix_tree_exceptional_entry(entry) ||
 
 228		    !slot_locked(mapping, slot)) {
 229			if (slotp)
 230				*slotp = slot;
 231			return entry;
 232		}
 233
 234		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
 235		prepare_to_wait_exclusive(wq, &ewait.wait,
 236					  TASK_UNINTERRUPTIBLE);
 237		spin_unlock_irq(&mapping->tree_lock);
 238		schedule();
 239		finish_wait(wq, &ewait.wait);
 240		spin_lock_irq(&mapping->tree_lock);
 241	}
 242}
 243
 244static void dax_unlock_mapping_entry(struct address_space *mapping,
 245				     pgoff_t index)
 246{
 247	void *entry, **slot;
 248
 249	spin_lock_irq(&mapping->tree_lock);
 250	entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
 251	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
 252			 !slot_locked(mapping, slot))) {
 253		spin_unlock_irq(&mapping->tree_lock);
 254		return;
 255	}
 256	unlock_slot(mapping, slot);
 257	spin_unlock_irq(&mapping->tree_lock);
 258	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 259}
 260
 261static void put_locked_mapping_entry(struct address_space *mapping,
 262				     pgoff_t index, void *entry)
 263{
 264	if (!radix_tree_exceptional_entry(entry)) {
 265		unlock_page(entry);
 266		put_page(entry);
 267	} else {
 268		dax_unlock_mapping_entry(mapping, index);
 269	}
 270}
 271
 272/*
 273 * Called when we are done with radix tree entry we looked up via
 274 * get_unlocked_mapping_entry() and which we didn't lock in the end.
 275 */
 276static void put_unlocked_mapping_entry(struct address_space *mapping,
 277				       pgoff_t index, void *entry)
 278{
 279	if (!radix_tree_exceptional_entry(entry))
 280		return;
 281
 282	/* We have to wake up next waiter for the radix tree entry lock */
 283	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 284}
 285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 286/*
 287 * Find radix tree entry at given index. If it points to a page, return with
 288 * the page locked. If it points to the exceptional entry, return with the
 289 * radix tree entry locked. If the radix tree doesn't contain given index,
 290 * create empty exceptional entry for the index and return with it locked.
 291 *
 292 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
 293 * either return that locked entry or will return an error.  This error will
 294 * happen if there are any 4k entries (either zero pages or DAX entries)
 295 * within the 2MiB range that we are requesting.
 296 *
 297 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
 298 * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
 299 * insertion will fail if it finds any 4k entries already in the tree, and a
 300 * 4k insertion will cause an existing 2MiB entry to be unmapped and
 301 * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
 302 * well as 2MiB empty entries.
 303 *
 304 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
 305 * real storage backing them.  We will leave these real 2MiB DAX entries in
 306 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
 307 *
 308 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 309 * persistent memory the benefit is doubtful. We can add that later if we can
 310 * show it helps.
 311 */
 312static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 313		unsigned long size_flag)
 314{
 315	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
 316	void *entry, **slot;
 317
 318restart:
 319	spin_lock_irq(&mapping->tree_lock);
 320	entry = get_unlocked_mapping_entry(mapping, index, &slot);
 321
 
 
 
 
 
 322	if (entry) {
 323		if (size_flag & RADIX_DAX_PMD) {
 324			if (!radix_tree_exceptional_entry(entry) ||
 325			    dax_is_pte_entry(entry)) {
 326				put_unlocked_mapping_entry(mapping, index,
 327						entry);
 328				entry = ERR_PTR(-EEXIST);
 329				goto out_unlock;
 330			}
 331		} else { /* trying to grab a PTE entry */
 332			if (radix_tree_exceptional_entry(entry) &&
 333			    dax_is_pmd_entry(entry) &&
 334			    (dax_is_zero_entry(entry) ||
 335			     dax_is_empty_entry(entry))) {
 336				pmd_downgrade = true;
 337			}
 338		}
 339	}
 340
 341	/* No entry for given index? Make sure radix tree is big enough. */
 342	if (!entry || pmd_downgrade) {
 343		int err;
 344
 345		if (pmd_downgrade) {
 346			/*
 347			 * Make sure 'entry' remains valid while we drop
 348			 * mapping->tree_lock.
 349			 */
 350			entry = lock_slot(mapping, slot);
 351		}
 352
 353		spin_unlock_irq(&mapping->tree_lock);
 354		/*
 355		 * Besides huge zero pages the only other thing that gets
 356		 * downgraded are empty entries which don't need to be
 357		 * unmapped.
 358		 */
 359		if (pmd_downgrade && dax_is_zero_entry(entry))
 360			unmap_mapping_range(mapping,
 361				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
 362
 363		err = radix_tree_preload(
 364				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
 365		if (err) {
 366			if (pmd_downgrade)
 367				put_locked_mapping_entry(mapping, index, entry);
 368			return ERR_PTR(err);
 369		}
 370		spin_lock_irq(&mapping->tree_lock);
 371
 372		if (!entry) {
 373			/*
 374			 * We needed to drop the page_tree lock while calling
 375			 * radix_tree_preload() and we didn't have an entry to
 376			 * lock.  See if another thread inserted an entry at
 377			 * our index during this time.
 378			 */
 379			entry = __radix_tree_lookup(&mapping->page_tree, index,
 380					NULL, &slot);
 381			if (entry) {
 382				radix_tree_preload_end();
 383				spin_unlock_irq(&mapping->tree_lock);
 384				goto restart;
 385			}
 386		}
 387
 388		if (pmd_downgrade) {
 389			radix_tree_delete(&mapping->page_tree, index);
 
 390			mapping->nrexceptional--;
 391			dax_wake_mapping_entry_waiter(mapping, index, entry,
 392					true);
 393		}
 394
 395		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
 396
 397		err = __radix_tree_insert(&mapping->page_tree, index,
 398				dax_radix_order(entry), entry);
 399		radix_tree_preload_end();
 400		if (err) {
 401			spin_unlock_irq(&mapping->tree_lock);
 402			/*
 403			 * Our insertion of a DAX entry failed, most likely
 404			 * because we were inserting a PMD entry and it
 405			 * collided with a PTE sized entry at a different
 406			 * index in the PMD range.  We haven't inserted
 407			 * anything into the radix tree and have no waiters to
 408			 * wake.
 409			 */
 410			return ERR_PTR(err);
 411		}
 412		/* Good, we have inserted empty locked entry into the tree. */
 413		mapping->nrexceptional++;
 414		spin_unlock_irq(&mapping->tree_lock);
 415		return entry;
 416	}
 417	/* Normal page in radix tree? */
 418	if (!radix_tree_exceptional_entry(entry)) {
 419		struct page *page = entry;
 420
 421		get_page(page);
 422		spin_unlock_irq(&mapping->tree_lock);
 423		lock_page(page);
 424		/* Page got truncated? Retry... */
 425		if (unlikely(page->mapping != mapping)) {
 426			unlock_page(page);
 427			put_page(page);
 428			goto restart;
 429		}
 430		return page;
 431	}
 432	entry = lock_slot(mapping, slot);
 433 out_unlock:
 434	spin_unlock_irq(&mapping->tree_lock);
 435	return entry;
 436}
 437
 438/*
 439 * We do not necessarily hold the mapping->tree_lock when we call this
 440 * function so it is possible that 'entry' is no longer a valid item in the
 441 * radix tree.  This is okay because all we really need to do is to find the
 442 * correct waitqueue where tasks might be waiting for that old 'entry' and
 443 * wake them.
 444 */
 445void dax_wake_mapping_entry_waiter(struct address_space *mapping,
 446		pgoff_t index, void *entry, bool wake_all)
 447{
 448	struct exceptional_entry_key key;
 449	wait_queue_head_t *wq;
 450
 451	wq = dax_entry_waitqueue(mapping, index, entry, &key);
 452
 453	/*
 454	 * Checking for locked entry and prepare_to_wait_exclusive() happens
 455	 * under mapping->tree_lock, ditto for entry handling in our callers.
 456	 * So at this point all tasks that could have seen our entry locked
 457	 * must be in the waitqueue and the following check will see them.
 458	 */
 459	if (waitqueue_active(wq))
 460		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 461}
 462
 463static int __dax_invalidate_mapping_entry(struct address_space *mapping,
 464					  pgoff_t index, bool trunc)
 465{
 466	int ret = 0;
 467	void *entry;
 468	struct radix_tree_root *page_tree = &mapping->page_tree;
 469
 470	spin_lock_irq(&mapping->tree_lock);
 471	entry = get_unlocked_mapping_entry(mapping, index, NULL);
 472	if (!entry || !radix_tree_exceptional_entry(entry))
 473		goto out;
 474	if (!trunc &&
 475	    (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
 476	     radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
 477		goto out;
 478	radix_tree_delete(page_tree, index);
 
 479	mapping->nrexceptional--;
 480	ret = 1;
 481out:
 482	put_unlocked_mapping_entry(mapping, index, entry);
 483	spin_unlock_irq(&mapping->tree_lock);
 484	return ret;
 485}
 486/*
 487 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
 488 * entry to get unlocked before deleting it.
 489 */
 490int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 491{
 492	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
 493
 494	/*
 495	 * This gets called from truncate / punch_hole path. As such, the caller
 496	 * must hold locks protecting against concurrent modifications of the
 497	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
 498	 * caller has seen exceptional entry for this index, we better find it
 499	 * at that index as well...
 500	 */
 501	WARN_ON_ONCE(!ret);
 502	return ret;
 503}
 504
 505/*
 506 * Invalidate exceptional DAX entry if easily possible. This handles DAX
 507 * entries for invalidate_inode_pages() so we evict the entry only if we can
 508 * do so without blocking.
 509 */
 510int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
 511{
 512	int ret = 0;
 513	void *entry, **slot;
 514	struct radix_tree_root *page_tree = &mapping->page_tree;
 515
 516	spin_lock_irq(&mapping->tree_lock);
 517	entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
 518	if (!entry || !radix_tree_exceptional_entry(entry) ||
 519	    slot_locked(mapping, slot))
 520		goto out;
 521	if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
 522	    radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
 523		goto out;
 524	radix_tree_delete(page_tree, index);
 525	mapping->nrexceptional--;
 526	ret = 1;
 527out:
 528	spin_unlock_irq(&mapping->tree_lock);
 529	if (ret)
 530		dax_wake_mapping_entry_waiter(mapping, index, entry, true);
 531	return ret;
 532}
 533
 534/*
 535 * Invalidate exceptional DAX entry if it is clean.
 536 */
 537int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 538				      pgoff_t index)
 539{
 540	return __dax_invalidate_mapping_entry(mapping, index, false);
 541}
 542
 543/*
 544 * The user has performed a load from a hole in the file.  Allocating
 545 * a new page in the file would cause excessive storage usage for
 546 * workloads with sparse files.  We allocate a page cache page instead.
 547 * We'll kick it out of the page cache if it's ever written to,
 548 * otherwise it will simply fall out of the page cache under memory
 549 * pressure without ever having been dirtied.
 550 */
 551static int dax_load_hole(struct address_space *mapping, void **entry,
 552			 struct vm_fault *vmf)
 553{
 554	struct page *page;
 555	int ret;
 556
 557	/* Hole page already exists? Return it...  */
 558	if (!radix_tree_exceptional_entry(*entry)) {
 559		page = *entry;
 560		goto out;
 561	}
 562
 563	/* This will replace locked radix tree entry with a hole page */
 564	page = find_or_create_page(mapping, vmf->pgoff,
 565				   vmf->gfp_mask | __GFP_ZERO);
 566	if (!page)
 567		return VM_FAULT_OOM;
 568 out:
 569	vmf->page = page;
 570	ret = finish_fault(vmf);
 571	vmf->page = NULL;
 572	*entry = page;
 573	if (!ret) {
 574		/* Grab reference for PTE that is now referencing the page */
 575		get_page(page);
 576		return VM_FAULT_NOPAGE;
 577	}
 578	return ret;
 579}
 580
 581static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
 582		struct page *to, unsigned long vaddr)
 583{
 584	struct blk_dax_ctl dax = {
 585		.sector = sector,
 586		.size = size,
 587	};
 588	void *vto;
 589
 590	if (dax_map_atomic(bdev, &dax) < 0)
 591		return PTR_ERR(dax.addr);
 592	vto = kmap_atomic(to);
 593	copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
 594	kunmap_atomic(vto);
 595	dax_unmap_atomic(bdev, &dax);
 596	return 0;
 597}
 598
 599/*
 600 * By this point grab_mapping_entry() has ensured that we have a locked entry
 601 * of the appropriate size so we don't have to worry about downgrading PMDs to
 602 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 603 * already in the tree, we will skip the insertion and just dirty the PMD as
 604 * appropriate.
 605 */
 606static void *dax_insert_mapping_entry(struct address_space *mapping,
 607				      struct vm_fault *vmf,
 608				      void *entry, sector_t sector,
 609				      unsigned long flags)
 610{
 611	struct radix_tree_root *page_tree = &mapping->page_tree;
 612	int error = 0;
 613	bool hole_fill = false;
 614	void *new_entry;
 615	pgoff_t index = vmf->pgoff;
 
 616
 617	if (vmf->flags & FAULT_FLAG_WRITE)
 618		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 619
 620	/* Replacing hole page with block mapping? */
 621	if (!radix_tree_exceptional_entry(entry)) {
 622		hole_fill = true;
 623		/*
 624		 * Unmap the page now before we remove it from page cache below.
 625		 * The page is locked so it cannot be faulted in again.
 626		 */
 627		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
 628				    PAGE_SIZE, 0);
 629		error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
 630		if (error)
 631			return ERR_PTR(error);
 632	} else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
 633		/* replacing huge zero page with PMD block mapping */
 634		unmap_mapping_range(mapping,
 635			(vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
 636	}
 637
 638	spin_lock_irq(&mapping->tree_lock);
 639	new_entry = dax_radix_locked_entry(sector, flags);
 640
 641	if (hole_fill) {
 642		__delete_from_page_cache(entry, NULL);
 643		/* Drop pagecache reference */
 644		put_page(entry);
 645		error = __radix_tree_insert(page_tree, index,
 646				dax_radix_order(new_entry), new_entry);
 647		if (error) {
 648			new_entry = ERR_PTR(error);
 649			goto unlock;
 650		}
 651		mapping->nrexceptional++;
 652	} else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 653		/*
 654		 * Only swap our new entry into the radix tree if the current
 655		 * entry is a zero page or an empty entry.  If a normal PTE or
 656		 * PMD entry is already in the tree, we leave it alone.  This
 657		 * means that if we are trying to insert a PTE and the
 658		 * existing entry is a PMD, we will just leave the PMD in the
 659		 * tree and dirty it if necessary.
 660		 */
 661		struct radix_tree_node *node;
 662		void **slot;
 663		void *ret;
 664
 665		ret = __radix_tree_lookup(page_tree, index, &node, &slot);
 666		WARN_ON_ONCE(ret != entry);
 667		__radix_tree_replace(page_tree, node, slot,
 668				     new_entry, NULL, NULL);
 669	}
 670	if (vmf->flags & FAULT_FLAG_WRITE)
 671		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
 672 unlock:
 673	spin_unlock_irq(&mapping->tree_lock);
 674	if (hole_fill) {
 675		radix_tree_preload_end();
 676		/*
 677		 * We don't need hole page anymore, it has been replaced with
 678		 * locked radix tree entry now.
 679		 */
 680		if (mapping->a_ops->freepage)
 681			mapping->a_ops->freepage(entry);
 682		unlock_page(entry);
 683		put_page(entry);
 684	}
 685	return new_entry;
 
 
 
 
 
 686}
 687
 688static inline unsigned long
 689pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 690{
 691	unsigned long address;
 692
 693	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 694	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
 695	return address;
 696}
 697
 698/* Walk all mappings of a given index of a file and writeprotect them */
 699static void dax_mapping_entry_mkclean(struct address_space *mapping,
 700				      pgoff_t index, unsigned long pfn)
 701{
 702	struct vm_area_struct *vma;
 703	pte_t pte, *ptep = NULL;
 704	pmd_t *pmdp = NULL;
 705	spinlock_t *ptl;
 706	bool changed;
 707
 708	i_mmap_lock_read(mapping);
 709	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
 710		unsigned long address;
 711
 712		cond_resched();
 713
 714		if (!(vma->vm_flags & VM_SHARED))
 715			continue;
 716
 717		address = pgoff_address(index, vma);
 718		changed = false;
 719		if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
 
 
 
 
 
 720			continue;
 721
 
 
 
 
 
 
 
 722		if (pmdp) {
 723#ifdef CONFIG_FS_DAX_PMD
 724			pmd_t pmd;
 725
 726			if (pfn != pmd_pfn(*pmdp))
 727				goto unlock_pmd;
 728			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
 729				goto unlock_pmd;
 730
 731			flush_cache_page(vma, address, pfn);
 732			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
 733			pmd = pmd_wrprotect(pmd);
 734			pmd = pmd_mkclean(pmd);
 735			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 736			changed = true;
 737unlock_pmd:
 738			spin_unlock(ptl);
 739#endif
 
 740		} else {
 741			if (pfn != pte_pfn(*ptep))
 742				goto unlock_pte;
 743			if (!pte_dirty(*ptep) && !pte_write(*ptep))
 744				goto unlock_pte;
 745
 746			flush_cache_page(vma, address, pfn);
 747			pte = ptep_clear_flush(vma, address, ptep);
 748			pte = pte_wrprotect(pte);
 749			pte = pte_mkclean(pte);
 750			set_pte_at(vma->vm_mm, address, ptep, pte);
 751			changed = true;
 752unlock_pte:
 753			pte_unmap_unlock(ptep, ptl);
 754		}
 755
 756		if (changed)
 757			mmu_notifier_invalidate_page(vma->vm_mm, address);
 758	}
 759	i_mmap_unlock_read(mapping);
 760}
 761
 762static int dax_writeback_one(struct block_device *bdev,
 763		struct address_space *mapping, pgoff_t index, void *entry)
 764{
 765	struct radix_tree_root *page_tree = &mapping->page_tree;
 766	struct blk_dax_ctl dax;
 767	void *entry2, **slot;
 768	int ret = 0;
 
 
 769
 770	/*
 771	 * A page got tagged dirty in DAX mapping? Something is seriously
 772	 * wrong.
 773	 */
 774	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
 775		return -EIO;
 776
 777	spin_lock_irq(&mapping->tree_lock);
 778	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
 779	/* Entry got punched out / reallocated? */
 780	if (!entry2 || !radix_tree_exceptional_entry(entry2))
 781		goto put_unlocked;
 782	/*
 783	 * Entry got reallocated elsewhere? No need to writeback. We have to
 784	 * compare sectors as we must not bail out due to difference in lockbit
 785	 * or entry type.
 786	 */
 787	if (dax_radix_sector(entry2) != dax_radix_sector(entry))
 788		goto put_unlocked;
 789	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 790				dax_is_zero_entry(entry))) {
 791		ret = -EIO;
 792		goto put_unlocked;
 793	}
 794
 795	/* Another fsync thread may have already written back this entry */
 796	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
 797		goto put_unlocked;
 798	/* Lock the entry to serialize with page faults */
 799	entry = lock_slot(mapping, slot);
 800	/*
 801	 * We can clear the tag now but we have to be careful so that concurrent
 802	 * dax_writeback_one() calls for the same index cannot finish before we
 803	 * actually flush the caches. This is achieved as the calls will look
 804	 * at the entry only under tree_lock and once they do that they will
 805	 * see the entry locked and wait for it to unlock.
 806	 */
 807	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
 808	spin_unlock_irq(&mapping->tree_lock);
 809
 810	/*
 811	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
 812	 * in the middle of a PMD, the 'index' we are given will be aligned to
 813	 * the start index of the PMD, as will the sector we pull from
 814	 * 'entry'.  This allows us to flush for PMD_SIZE and not have to
 815	 * worry about partial PMD writebacks.
 816	 */
 817	dax.sector = dax_radix_sector(entry);
 818	dax.size = PAGE_SIZE << dax_radix_order(entry);
 819
 820	/*
 821	 * We cannot hold tree_lock while calling dax_map_atomic() because it
 822	 * eventually calls cond_resched().
 823	 */
 824	ret = dax_map_atomic(bdev, &dax);
 825	if (ret < 0) {
 826		put_locked_mapping_entry(mapping, index, entry);
 827		return ret;
 828	}
 829
 830	if (WARN_ON_ONCE(ret < dax.size)) {
 831		ret = -EIO;
 832		goto unmap;
 833	}
 834
 835	dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
 836	wb_cache_pmem(dax.addr, dax.size);
 837	/*
 838	 * After we have flushed the cache, we can clear the dirty tag. There
 839	 * cannot be new dirty data in the pfn after the flush has completed as
 840	 * the pfn mappings are writeprotected and fault waits for mapping
 841	 * entry lock.
 842	 */
 843	spin_lock_irq(&mapping->tree_lock);
 844	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
 845	spin_unlock_irq(&mapping->tree_lock);
 846 unmap:
 847	dax_unmap_atomic(bdev, &dax);
 848	put_locked_mapping_entry(mapping, index, entry);
 849	return ret;
 850
 851 put_unlocked:
 852	put_unlocked_mapping_entry(mapping, index, entry2);
 853	spin_unlock_irq(&mapping->tree_lock);
 854	return ret;
 855}
 856
 857/*
 858 * Flush the mapping to the persistent domain within the byte range of [start,
 859 * end]. This is required by data integrity operations to ensure file data is
 860 * on persistent storage prior to completion of the operation.
 861 */
 862int dax_writeback_mapping_range(struct address_space *mapping,
 863		struct block_device *bdev, struct writeback_control *wbc)
 864{
 865	struct inode *inode = mapping->host;
 866	pgoff_t start_index, end_index;
 867	pgoff_t indices[PAGEVEC_SIZE];
 
 868	struct pagevec pvec;
 869	bool done = false;
 870	int i, ret = 0;
 871
 872	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
 873		return -EIO;
 874
 875	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
 876		return 0;
 877
 
 
 
 
 878	start_index = wbc->range_start >> PAGE_SHIFT;
 879	end_index = wbc->range_end >> PAGE_SHIFT;
 880
 
 
 881	tag_pages_for_writeback(mapping, start_index, end_index);
 882
 883	pagevec_init(&pvec, 0);
 884	while (!done) {
 885		pvec.nr = find_get_entries_tag(mapping, start_index,
 886				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
 887				pvec.pages, indices);
 888
 889		if (pvec.nr == 0)
 890			break;
 891
 892		for (i = 0; i < pvec.nr; i++) {
 893			if (indices[i] > end_index) {
 894				done = true;
 895				break;
 896			}
 897
 898			ret = dax_writeback_one(bdev, mapping, indices[i],
 899					pvec.pages[i]);
 900			if (ret < 0)
 901				return ret;
 
 
 902		}
 
 903	}
 904	return 0;
 
 
 
 905}
 906EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 907
 908static int dax_insert_mapping(struct address_space *mapping,
 909		struct block_device *bdev, sector_t sector, size_t size,
 910		void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
 911{
 912	unsigned long vaddr = vmf->address;
 913	struct blk_dax_ctl dax = {
 914		.sector = sector,
 915		.size = size,
 916	};
 917	void *ret;
 918	void *entry = *entryp;
 919
 920	if (dax_map_atomic(bdev, &dax) < 0)
 921		return PTR_ERR(dax.addr);
 922	dax_unmap_atomic(bdev, &dax);
 923
 924	ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
 925	if (IS_ERR(ret))
 926		return PTR_ERR(ret);
 927	*entryp = ret;
 
 
 
 
 928
 929	return vm_insert_mixed(vma, vaddr, dax.pfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 930}
 931
 932/**
 933 * dax_pfn_mkwrite - handle first write to DAX page
 934 * @vma: The virtual memory area where the fault occurred
 935 * @vmf: The description of the fault
 
 
 936 */
 937int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 938{
 939	struct file *file = vma->vm_file;
 940	struct address_space *mapping = file->f_mapping;
 941	void *entry, **slot;
 942	pgoff_t index = vmf->pgoff;
 
 
 943
 944	spin_lock_irq(&mapping->tree_lock);
 945	entry = get_unlocked_mapping_entry(mapping, index, &slot);
 946	if (!entry || !radix_tree_exceptional_entry(entry)) {
 947		if (entry)
 948			put_unlocked_mapping_entry(mapping, index, entry);
 949		spin_unlock_irq(&mapping->tree_lock);
 950		return VM_FAULT_NOPAGE;
 951	}
 952	radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
 953	entry = lock_slot(mapping, slot);
 954	spin_unlock_irq(&mapping->tree_lock);
 955	/*
 956	 * If we race with somebody updating the PTE and finish_mkwrite_fault()
 957	 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
 958	 * the fault in either case.
 959	 */
 960	finish_mkwrite_fault(vmf);
 961	put_locked_mapping_entry(mapping, index, entry);
 962	return VM_FAULT_NOPAGE;
 
 
 963}
 964EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
 965
 966static bool dax_range_is_aligned(struct block_device *bdev,
 967				 unsigned int offset, unsigned int length)
 968{
 969	unsigned short sector_size = bdev_logical_block_size(bdev);
 970
 971	if (!IS_ALIGNED(offset, sector_size))
 972		return false;
 973	if (!IS_ALIGNED(length, sector_size))
 974		return false;
 975
 976	return true;
 977}
 978
 979int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
 980		unsigned int offset, unsigned int length)
 
 981{
 982	struct blk_dax_ctl dax = {
 983		.sector		= sector,
 984		.size		= PAGE_SIZE,
 985	};
 986
 987	if (dax_range_is_aligned(bdev, offset, length)) {
 988		sector_t start_sector = dax.sector + (offset >> 9);
 989
 990		return blkdev_issue_zeroout(bdev, start_sector,
 991				length >> 9, GFP_NOFS, true);
 992	} else {
 993		if (dax_map_atomic(bdev, &dax) < 0)
 994			return PTR_ERR(dax.addr);
 995		clear_pmem(dax.addr + offset, length);
 996		dax_unmap_atomic(bdev, &dax);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 997	}
 998	return 0;
 999}
1000EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1001
1002static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
1003{
1004	return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
1005}
1006
1007static loff_t
1008dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1009		struct iomap *iomap)
1010{
 
 
1011	struct iov_iter *iter = data;
1012	loff_t end = pos + length, done = 0;
1013	ssize_t ret = 0;
 
1014
1015	if (iov_iter_rw(iter) == READ) {
1016		end = min(end, i_size_read(inode));
1017		if (pos >= end)
1018			return 0;
1019
1020		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1021			return iov_iter_zero(min(length, end - pos), iter);
1022	}
1023
1024	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1025		return -EIO;
1026
1027	/*
1028	 * Write can allocate block for an area which has a hole page mapped
1029	 * into page tables. We have to tear down these mappings so that data
1030	 * written by write(2) is visible in mmap.
1031	 */
1032	if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
1033		invalidate_inode_pages2_range(inode->i_mapping,
1034					      pos >> PAGE_SHIFT,
1035					      (end - 1) >> PAGE_SHIFT);
1036	}
1037
 
1038	while (pos < end) {
1039		unsigned offset = pos & (PAGE_SIZE - 1);
1040		struct blk_dax_ctl dax = { 0 };
 
1041		ssize_t map_len;
 
 
 
1042
1043		if (fatal_signal_pending(current)) {
1044			ret = -EINTR;
1045			break;
1046		}
1047
1048		dax.sector = dax_iomap_sector(iomap, pos);
1049		dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1050		map_len = dax_map_atomic(iomap->bdev, &dax);
 
 
 
1051		if (map_len < 0) {
1052			ret = map_len;
1053			break;
1054		}
1055
1056		dax.addr += offset;
 
1057		map_len -= offset;
1058		if (map_len > end - pos)
1059			map_len = end - pos;
1060
 
 
 
 
 
1061		if (iov_iter_rw(iter) == WRITE)
1062			map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
 
1063		else
1064			map_len = copy_to_iter(dax.addr, map_len, iter);
1065		dax_unmap_atomic(iomap->bdev, &dax);
1066		if (map_len <= 0) {
1067			ret = map_len ? map_len : -EFAULT;
1068			break;
1069		}
1070
1071		pos += map_len;
1072		length -= map_len;
1073		done += map_len;
1074	}
 
1075
1076	return done ? done : ret;
1077}
1078
1079/**
1080 * dax_iomap_rw - Perform I/O to a DAX file
1081 * @iocb:	The control block for this I/O
1082 * @iter:	The addresses to do I/O from or to
1083 * @ops:	iomap ops passed from the file system
1084 *
1085 * This function performs read and write operations to directly mapped
1086 * persistent memory.  The callers needs to take care of read/write exclusion
1087 * and evicting any page cache pages in the region under I/O.
1088 */
1089ssize_t
1090dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1091		struct iomap_ops *ops)
1092{
1093	struct address_space *mapping = iocb->ki_filp->f_mapping;
1094	struct inode *inode = mapping->host;
1095	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1096	unsigned flags = 0;
1097
1098	if (iov_iter_rw(iter) == WRITE)
 
1099		flags |= IOMAP_WRITE;
 
 
 
1100
1101	while (iov_iter_count(iter)) {
1102		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1103				iter, dax_iomap_actor);
1104		if (ret <= 0)
1105			break;
1106		pos += ret;
1107		done += ret;
1108	}
1109
1110	iocb->ki_pos += done;
1111	return done ? done : ret;
1112}
1113EXPORT_SYMBOL_GPL(dax_iomap_rw);
1114
1115static int dax_fault_return(int error)
1116{
1117	if (error == 0)
1118		return VM_FAULT_NOPAGE;
1119	if (error == -ENOMEM)
1120		return VM_FAULT_OOM;
1121	return VM_FAULT_SIGBUS;
1122}
1123
1124/**
1125 * dax_iomap_fault - handle a page fault on a DAX file
1126 * @vma: The virtual memory area where the fault occurred
1127 * @vmf: The description of the fault
1128 * @ops: iomap ops passed from the file system
1129 *
1130 * When a page fault occurs, filesystems may call this helper in their fault
1131 * or mkwrite handler for DAX files. Assumes the caller has done all the
1132 * necessary locking for the page fault to proceed successfully.
1133 */
1134int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1135			struct iomap_ops *ops)
 
 
 
 
 
 
 
1136{
 
1137	struct address_space *mapping = vma->vm_file->f_mapping;
1138	struct inode *inode = mapping->host;
1139	unsigned long vaddr = vmf->address;
1140	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1141	sector_t sector;
1142	struct iomap iomap = { 0 };
1143	unsigned flags = IOMAP_FAULT;
1144	int error, major = 0;
 
 
1145	int vmf_ret = 0;
1146	void *entry;
 
1147
 
1148	/*
1149	 * Check whether offset isn't beyond end of file now. Caller is supposed
1150	 * to hold locks serializing us with truncate / punch hole so this is
1151	 * a reliable test.
1152	 */
1153	if (pos >= i_size_read(inode))
1154		return VM_FAULT_SIGBUS;
 
 
1155
1156	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1157		flags |= IOMAP_WRITE;
1158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1159	/*
1160	 * Note that we don't bother to use iomap_apply here: DAX required
1161	 * the file system block size to be equal the page size, which means
1162	 * that we never have to deal with more than a single extent here.
1163	 */
1164	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1165	if (error)
1166		return dax_fault_return(error);
1167	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1168		vmf_ret = dax_fault_return(-EIO);	/* fs corruption? */
1169		goto finish_iomap;
1170	}
1171
1172	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1173	if (IS_ERR(entry)) {
1174		vmf_ret = dax_fault_return(PTR_ERR(entry));
1175		goto finish_iomap;
1176	}
1177
1178	sector = dax_iomap_sector(&iomap, pos);
1179
1180	if (vmf->cow_page) {
 
 
1181		switch (iomap.type) {
1182		case IOMAP_HOLE:
1183		case IOMAP_UNWRITTEN:
1184			clear_user_highpage(vmf->cow_page, vaddr);
1185			break;
1186		case IOMAP_MAPPED:
1187			error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
1188					vmf->cow_page, vaddr);
1189			break;
1190		default:
1191			WARN_ON_ONCE(1);
1192			error = -EIO;
1193			break;
1194		}
1195
1196		if (error)
1197			goto error_unlock_entry;
1198
1199		__SetPageUptodate(vmf->cow_page);
1200		vmf_ret = finish_fault(vmf);
1201		if (!vmf_ret)
1202			vmf_ret = VM_FAULT_DONE_COW;
1203		goto unlock_entry;
1204	}
1205
 
 
1206	switch (iomap.type) {
1207	case IOMAP_MAPPED:
1208		if (iomap.flags & IOMAP_F_NEW) {
1209			count_vm_event(PGMAJFAULT);
1210			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1211			major = VM_FAULT_MAJOR;
1212		}
1213		error = dax_insert_mapping(mapping, iomap.bdev, sector,
1214				PAGE_SIZE, &entry, vma, vmf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1215		/* -EBUSY is fine, somebody else faulted on the same PTE */
1216		if (error == -EBUSY)
1217			error = 0;
1218		break;
1219	case IOMAP_UNWRITTEN:
1220	case IOMAP_HOLE:
1221		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1222			vmf_ret = dax_load_hole(mapping, &entry, vmf);
1223			goto unlock_entry;
1224		}
1225		/*FALLTHRU*/
1226	default:
1227		WARN_ON_ONCE(1);
1228		error = -EIO;
1229		break;
1230	}
1231
1232 error_unlock_entry:
1233	vmf_ret = dax_fault_return(error) | major;
1234 unlock_entry:
1235	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1236 finish_iomap:
1237	if (ops->iomap_end) {
1238		int copied = PAGE_SIZE;
1239
1240		if (vmf_ret & VM_FAULT_ERROR)
1241			copied = 0;
1242		/*
1243		 * The fault is done by now and there's no way back (other
1244		 * thread may be already happily using PTE we have installed).
1245		 * Just ignore error from ->iomap_end since we cannot do much
1246		 * with it.
1247		 */
1248		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1249	}
 
 
 
 
1250	return vmf_ret;
1251}
1252EXPORT_SYMBOL_GPL(dax_iomap_fault);
1253
1254#ifdef CONFIG_FS_DAX_PMD
1255/*
1256 * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
1257 * more often than one might expect in the below functions.
1258 */
1259#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
1260
1261static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
1262		struct vm_fault *vmf, unsigned long address,
1263		struct iomap *iomap, loff_t pos, bool write, void **entryp)
1264{
1265	struct address_space *mapping = vma->vm_file->f_mapping;
1266	struct block_device *bdev = iomap->bdev;
1267	struct blk_dax_ctl dax = {
1268		.sector = dax_iomap_sector(iomap, pos),
1269		.size = PMD_SIZE,
1270	};
1271	long length = dax_map_atomic(bdev, &dax);
1272	void *ret;
1273
1274	if (length < 0) /* dax_map_atomic() failed */
1275		return VM_FAULT_FALLBACK;
1276	if (length < PMD_SIZE)
1277		goto unmap_fallback;
1278	if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1279		goto unmap_fallback;
1280	if (!pfn_t_devmap(dax.pfn))
1281		goto unmap_fallback;
1282
1283	dax_unmap_atomic(bdev, &dax);
1284
1285	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1286			RADIX_DAX_PMD);
1287	if (IS_ERR(ret))
1288		return VM_FAULT_FALLBACK;
1289	*entryp = ret;
1290
1291	return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
1292
1293 unmap_fallback:
1294	dax_unmap_atomic(bdev, &dax);
1295	return VM_FAULT_FALLBACK;
1296}
1297
1298static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
1299		struct vm_fault *vmf, unsigned long address,
1300		struct iomap *iomap, void **entryp)
1301{
1302	struct address_space *mapping = vma->vm_file->f_mapping;
1303	unsigned long pmd_addr = address & PMD_MASK;
1304	struct page *zero_page;
 
1305	spinlock_t *ptl;
1306	pmd_t pmd_entry;
1307	void *ret;
1308
1309	zero_page = mm_get_huge_zero_page(vma->vm_mm);
1310
1311	if (unlikely(!zero_page))
1312		return VM_FAULT_FALLBACK;
1313
1314	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1315			RADIX_DAX_PMD | RADIX_DAX_HZP);
 
1316	if (IS_ERR(ret))
1317		return VM_FAULT_FALLBACK;
1318	*entryp = ret;
1319
1320	ptl = pmd_lock(vma->vm_mm, pmd);
1321	if (!pmd_none(*pmd)) {
1322		spin_unlock(ptl);
1323		return VM_FAULT_FALLBACK;
1324	}
1325
1326	pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
1327	pmd_entry = pmd_mkhuge(pmd_entry);
1328	set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
1329	spin_unlock(ptl);
 
1330	return VM_FAULT_NOPAGE;
 
 
 
 
1331}
1332
1333int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1334		pmd_t *pmd, unsigned int flags, struct iomap_ops *ops)
1335{
 
1336	struct address_space *mapping = vma->vm_file->f_mapping;
1337	unsigned long pmd_addr = address & PMD_MASK;
1338	bool write = flags & FAULT_FLAG_WRITE;
 
1339	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1340	struct inode *inode = mapping->host;
1341	int result = VM_FAULT_FALLBACK;
1342	struct iomap iomap = { 0 };
1343	pgoff_t max_pgoff, pgoff;
1344	struct vm_fault vmf;
1345	void *entry;
1346	loff_t pos;
1347	int error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1348
1349	/* Fall back to PTEs if we're going to COW */
1350	if (write && !(vma->vm_flags & VM_SHARED))
1351		goto fallback;
1352
1353	/* If the PMD would extend outside the VMA */
1354	if (pmd_addr < vma->vm_start)
1355		goto fallback;
1356	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1357		goto fallback;
1358
1359	/*
1360	 * Check whether offset isn't beyond end of file now. Caller is
1361	 * supposed to hold locks serializing us with truncate / punch hole so
1362	 * this is a reliable test.
1363	 */
1364	pgoff = linear_page_index(vma, pmd_addr);
1365	max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1366
1367	if (pgoff > max_pgoff)
1368		return VM_FAULT_SIGBUS;
1369
1370	/* If the PMD would extend beyond the file size */
1371	if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
 
 
 
 
 
 
 
 
 
 
1372		goto fallback;
1373
1374	/*
 
 
 
 
 
 
 
 
 
 
 
 
1375	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1376	 * setting up a mapping, so really we're using iomap_begin() as a way
1377	 * to look up our filesystem block.
1378	 */
1379	pos = (loff_t)pgoff << PAGE_SHIFT;
1380	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1381	if (error)
1382		goto fallback;
1383
1384	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1385		goto finish_iomap;
1386
1387	/*
1388	 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1389	 * PMD or a HZP entry.  If it can't (because a 4k page is already in
1390	 * the tree, for instance), it will return -EEXIST and we just fall
1391	 * back to 4k entries.
1392	 */
1393	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1394	if (IS_ERR(entry))
1395		goto finish_iomap;
1396
1397	vmf.pgoff = pgoff;
1398	vmf.flags = flags;
1399	vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
1400
1401	switch (iomap.type) {
1402	case IOMAP_MAPPED:
1403		result = dax_pmd_insert_mapping(vma, pmd, &vmf, address,
1404				&iomap, pos, write, &entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1405		break;
1406	case IOMAP_UNWRITTEN:
1407	case IOMAP_HOLE:
1408		if (WARN_ON_ONCE(write))
1409			goto unlock_entry;
1410		result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
1411				&entry);
1412		break;
1413	default:
1414		WARN_ON_ONCE(1);
1415		break;
1416	}
1417
1418 unlock_entry:
1419	put_locked_mapping_entry(mapping, pgoff, entry);
1420 finish_iomap:
1421	if (ops->iomap_end) {
1422		int copied = PMD_SIZE;
1423
1424		if (result == VM_FAULT_FALLBACK)
1425			copied = 0;
1426		/*
1427		 * The fault is done by now and there's no way back (other
1428		 * thread may be already happily using PMD we have installed).
1429		 * Just ignore error from ->iomap_end since we cannot do much
1430		 * with it.
1431		 */
1432		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1433				&iomap);
1434	}
 
 
1435 fallback:
1436	if (result == VM_FAULT_FALLBACK) {
1437		split_huge_pmd(vma, pmd, address);
1438		count_vm_event(THP_FAULT_FALLBACK);
1439	}
 
 
1440	return result;
1441}
1442EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
 
 
 
 
 
1443#endif /* CONFIG_FS_DAX_PMD */
v4.17
   1/*
   2 * fs/dax.c - Direct Access filesystem code
   3 * Copyright (c) 2013-2014 Intel Corporation
   4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 */
  16
  17#include <linux/atomic.h>
  18#include <linux/blkdev.h>
  19#include <linux/buffer_head.h>
  20#include <linux/dax.h>
  21#include <linux/fs.h>
  22#include <linux/genhd.h>
  23#include <linux/highmem.h>
  24#include <linux/memcontrol.h>
  25#include <linux/mm.h>
  26#include <linux/mutex.h>
  27#include <linux/pagevec.h>
 
  28#include <linux/sched.h>
  29#include <linux/sched/signal.h>
  30#include <linux/uio.h>
  31#include <linux/vmstat.h>
  32#include <linux/pfn_t.h>
  33#include <linux/sizes.h>
  34#include <linux/mmu_notifier.h>
  35#include <linux/iomap.h>
  36#include "internal.h"
  37
  38#define CREATE_TRACE_POINTS
  39#include <trace/events/fs_dax.h>
  40
  41/* We choose 4096 entries - same as per-zone page wait tables */
  42#define DAX_WAIT_TABLE_BITS 12
  43#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
  44
  45/* The 'colour' (ie low bits) within a PMD of a page offset.  */
  46#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
  47#define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
  48
  49static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
  50
  51static int __init init_dax_wait_table(void)
  52{
  53	int i;
  54
  55	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
  56		init_waitqueue_head(wait_table + i);
  57	return 0;
  58}
  59fs_initcall(init_dax_wait_table);
  60
  61/*
  62 * We use lowest available bit in exceptional entry for locking, one bit for
  63 * the entry size (PMD) and two more to tell us if the entry is a zero page or
  64 * an empty entry that is just used for locking.  In total four special bits.
  65 *
  66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
  67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
  68 * block allocation.
  69 */
  70#define RADIX_DAX_SHIFT		(RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
  71#define RADIX_DAX_ENTRY_LOCK	(1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
  72#define RADIX_DAX_PMD		(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
  73#define RADIX_DAX_ZERO_PAGE	(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
  74#define RADIX_DAX_EMPTY		(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
  75
  76static unsigned long dax_radix_pfn(void *entry)
  77{
  78	return (unsigned long)entry >> RADIX_DAX_SHIFT;
  79}
  80
  81static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
  82{
  83	return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
  84			(pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
 
 
 
  85}
  86
  87static unsigned int dax_radix_order(void *entry)
 
  88{
  89	if ((unsigned long)entry & RADIX_DAX_PMD)
  90		return PMD_SHIFT - PAGE_SHIFT;
  91	return 0;
  92}
  93
  94static int dax_is_pmd_entry(void *entry)
  95{
  96	return (unsigned long)entry & RADIX_DAX_PMD;
  97}
  98
  99static int dax_is_pte_entry(void *entry)
 100{
 101	return !((unsigned long)entry & RADIX_DAX_PMD);
 102}
 103
 104static int dax_is_zero_entry(void *entry)
 105{
 106	return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
 107}
 108
 109static int dax_is_empty_entry(void *entry)
 110{
 111	return (unsigned long)entry & RADIX_DAX_EMPTY;
 112}
 113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 114/*
 115 * DAX radix tree locking
 116 */
 117struct exceptional_entry_key {
 118	struct address_space *mapping;
 119	pgoff_t entry_start;
 120};
 121
 122struct wait_exceptional_entry_queue {
 123	wait_queue_entry_t wait;
 124	struct exceptional_entry_key key;
 125};
 126
 127static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
 128		pgoff_t index, void *entry, struct exceptional_entry_key *key)
 129{
 130	unsigned long hash;
 131
 132	/*
 133	 * If 'entry' is a PMD, align the 'index' that we use for the wait
 134	 * queue to the start of that PMD.  This ensures that all offsets in
 135	 * the range covered by the PMD map to the same bit lock.
 136	 */
 137	if (dax_is_pmd_entry(entry))
 138		index &= ~PG_PMD_COLOUR;
 139
 140	key->mapping = mapping;
 141	key->entry_start = index;
 142
 143	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
 144	return wait_table + hash;
 145}
 146
 147static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
 148				       int sync, void *keyp)
 149{
 150	struct exceptional_entry_key *key = keyp;
 151	struct wait_exceptional_entry_queue *ewait =
 152		container_of(wait, struct wait_exceptional_entry_queue, wait);
 153
 154	if (key->mapping != ewait->key.mapping ||
 155	    key->entry_start != ewait->key.entry_start)
 156		return 0;
 157	return autoremove_wake_function(wait, mode, sync, NULL);
 158}
 159
 160/*
 161 * @entry may no longer be the entry at the index in the mapping.
 162 * The important information it's conveying is whether the entry at
 163 * this index used to be a PMD entry.
 164 */
 165static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
 166		pgoff_t index, void *entry, bool wake_all)
 167{
 168	struct exceptional_entry_key key;
 169	wait_queue_head_t *wq;
 170
 171	wq = dax_entry_waitqueue(mapping, index, entry, &key);
 172
 173	/*
 174	 * Checking for locked entry and prepare_to_wait_exclusive() happens
 175	 * under the i_pages lock, ditto for entry handling in our callers.
 176	 * So at this point all tasks that could have seen our entry locked
 177	 * must be in the waitqueue and the following check will see them.
 178	 */
 179	if (waitqueue_active(wq))
 180		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 181}
 182
 183/*
 184 * Check whether the given slot is locked.  Must be called with the i_pages
 185 * lock held.
 186 */
 187static inline int slot_locked(struct address_space *mapping, void **slot)
 188{
 189	unsigned long entry = (unsigned long)
 190		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
 191	return entry & RADIX_DAX_ENTRY_LOCK;
 192}
 193
 194/*
 195 * Mark the given slot as locked.  Must be called with the i_pages lock held.
 
 196 */
 197static inline void *lock_slot(struct address_space *mapping, void **slot)
 198{
 199	unsigned long entry = (unsigned long)
 200		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
 201
 202	entry |= RADIX_DAX_ENTRY_LOCK;
 203	radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
 204	return (void *)entry;
 205}
 206
 207/*
 208 * Mark the given slot as unlocked.  Must be called with the i_pages lock held.
 
 209 */
 210static inline void *unlock_slot(struct address_space *mapping, void **slot)
 211{
 212	unsigned long entry = (unsigned long)
 213		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
 214
 215	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
 216	radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
 217	return (void *)entry;
 218}
 219
 220/*
 221 * Lookup entry in radix tree, wait for it to become unlocked if it is
 222 * exceptional entry and return it. The caller must call
 223 * put_unlocked_mapping_entry() when he decided not to lock the entry or
 224 * put_locked_mapping_entry() when he locked the entry and now wants to
 225 * unlock it.
 226 *
 227 * Must be called with the i_pages lock held.
 228 */
 229static void *get_unlocked_mapping_entry(struct address_space *mapping,
 230					pgoff_t index, void ***slotp)
 231{
 232	void *entry, **slot;
 233	struct wait_exceptional_entry_queue ewait;
 234	wait_queue_head_t *wq;
 235
 236	init_wait(&ewait.wait);
 237	ewait.wait.func = wake_exceptional_entry_func;
 238
 239	for (;;) {
 240		entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
 241					  &slot);
 242		if (!entry ||
 243		    WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
 244		    !slot_locked(mapping, slot)) {
 245			if (slotp)
 246				*slotp = slot;
 247			return entry;
 248		}
 249
 250		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
 251		prepare_to_wait_exclusive(wq, &ewait.wait,
 252					  TASK_UNINTERRUPTIBLE);
 253		xa_unlock_irq(&mapping->i_pages);
 254		schedule();
 255		finish_wait(wq, &ewait.wait);
 256		xa_lock_irq(&mapping->i_pages);
 257	}
 258}
 259
 260static void dax_unlock_mapping_entry(struct address_space *mapping,
 261				     pgoff_t index)
 262{
 263	void *entry, **slot;
 264
 265	xa_lock_irq(&mapping->i_pages);
 266	entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
 267	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
 268			 !slot_locked(mapping, slot))) {
 269		xa_unlock_irq(&mapping->i_pages);
 270		return;
 271	}
 272	unlock_slot(mapping, slot);
 273	xa_unlock_irq(&mapping->i_pages);
 274	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 275}
 276
 277static void put_locked_mapping_entry(struct address_space *mapping,
 278		pgoff_t index)
 279{
 280	dax_unlock_mapping_entry(mapping, index);
 
 
 
 
 
 281}
 282
 283/*
 284 * Called when we are done with radix tree entry we looked up via
 285 * get_unlocked_mapping_entry() and which we didn't lock in the end.
 286 */
 287static void put_unlocked_mapping_entry(struct address_space *mapping,
 288				       pgoff_t index, void *entry)
 289{
 290	if (!entry)
 291		return;
 292
 293	/* We have to wake up next waiter for the radix tree entry lock */
 294	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 295}
 296
 297static unsigned long dax_entry_size(void *entry)
 298{
 299	if (dax_is_zero_entry(entry))
 300		return 0;
 301	else if (dax_is_empty_entry(entry))
 302		return 0;
 303	else if (dax_is_pmd_entry(entry))
 304		return PMD_SIZE;
 305	else
 306		return PAGE_SIZE;
 307}
 308
 309static unsigned long dax_radix_end_pfn(void *entry)
 310{
 311	return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
 312}
 313
 314/*
 315 * Iterate through all mapped pfns represented by an entry, i.e. skip
 316 * 'empty' and 'zero' entries.
 317 */
 318#define for_each_mapped_pfn(entry, pfn) \
 319	for (pfn = dax_radix_pfn(entry); \
 320			pfn < dax_radix_end_pfn(entry); pfn++)
 321
 322static void dax_associate_entry(void *entry, struct address_space *mapping)
 323{
 324	unsigned long pfn;
 325
 326	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 327		return;
 328
 329	for_each_mapped_pfn(entry, pfn) {
 330		struct page *page = pfn_to_page(pfn);
 331
 332		WARN_ON_ONCE(page->mapping);
 333		page->mapping = mapping;
 334	}
 335}
 336
 337static void dax_disassociate_entry(void *entry, struct address_space *mapping,
 338		bool trunc)
 339{
 340	unsigned long pfn;
 341
 342	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 343		return;
 344
 345	for_each_mapped_pfn(entry, pfn) {
 346		struct page *page = pfn_to_page(pfn);
 347
 348		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
 349		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
 350		page->mapping = NULL;
 351	}
 352}
 353
 354/*
 355 * Find radix tree entry at given index. If it points to an exceptional entry,
 356 * return it with the radix tree entry locked. If the radix tree doesn't
 357 * contain given index, create an empty exceptional entry for the index and
 358 * return with it locked.
 359 *
 360 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
 361 * either return that locked entry or will return an error.  This error will
 362 * happen if there are any 4k entries within the 2MiB range that we are
 363 * requesting.
 364 *
 365 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
 366 * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
 367 * insertion will fail if it finds any 4k entries already in the tree, and a
 368 * 4k insertion will cause an existing 2MiB entry to be unmapped and
 369 * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
 370 * well as 2MiB empty entries.
 371 *
 372 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
 373 * real storage backing them.  We will leave these real 2MiB DAX entries in
 374 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
 375 *
 376 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 377 * persistent memory the benefit is doubtful. We can add that later if we can
 378 * show it helps.
 379 */
 380static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 381		unsigned long size_flag)
 382{
 383	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
 384	void *entry, **slot;
 385
 386restart:
 387	xa_lock_irq(&mapping->i_pages);
 388	entry = get_unlocked_mapping_entry(mapping, index, &slot);
 389
 390	if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
 391		entry = ERR_PTR(-EIO);
 392		goto out_unlock;
 393	}
 394
 395	if (entry) {
 396		if (size_flag & RADIX_DAX_PMD) {
 397			if (dax_is_pte_entry(entry)) {
 
 398				put_unlocked_mapping_entry(mapping, index,
 399						entry);
 400				entry = ERR_PTR(-EEXIST);
 401				goto out_unlock;
 402			}
 403		} else { /* trying to grab a PTE entry */
 404			if (dax_is_pmd_entry(entry) &&
 
 405			    (dax_is_zero_entry(entry) ||
 406			     dax_is_empty_entry(entry))) {
 407				pmd_downgrade = true;
 408			}
 409		}
 410	}
 411
 412	/* No entry for given index? Make sure radix tree is big enough. */
 413	if (!entry || pmd_downgrade) {
 414		int err;
 415
 416		if (pmd_downgrade) {
 417			/*
 418			 * Make sure 'entry' remains valid while we drop
 419			 * the i_pages lock.
 420			 */
 421			entry = lock_slot(mapping, slot);
 422		}
 423
 424		xa_unlock_irq(&mapping->i_pages);
 425		/*
 426		 * Besides huge zero pages the only other thing that gets
 427		 * downgraded are empty entries which don't need to be
 428		 * unmapped.
 429		 */
 430		if (pmd_downgrade && dax_is_zero_entry(entry))
 431			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 432							PG_PMD_NR, false);
 433
 434		err = radix_tree_preload(
 435				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
 436		if (err) {
 437			if (pmd_downgrade)
 438				put_locked_mapping_entry(mapping, index);
 439			return ERR_PTR(err);
 440		}
 441		xa_lock_irq(&mapping->i_pages);
 442
 443		if (!entry) {
 444			/*
 445			 * We needed to drop the i_pages lock while calling
 446			 * radix_tree_preload() and we didn't have an entry to
 447			 * lock.  See if another thread inserted an entry at
 448			 * our index during this time.
 449			 */
 450			entry = __radix_tree_lookup(&mapping->i_pages, index,
 451					NULL, &slot);
 452			if (entry) {
 453				radix_tree_preload_end();
 454				xa_unlock_irq(&mapping->i_pages);
 455				goto restart;
 456			}
 457		}
 458
 459		if (pmd_downgrade) {
 460			dax_disassociate_entry(entry, mapping, false);
 461			radix_tree_delete(&mapping->i_pages, index);
 462			mapping->nrexceptional--;
 463			dax_wake_mapping_entry_waiter(mapping, index, entry,
 464					true);
 465		}
 466
 467		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
 468
 469		err = __radix_tree_insert(&mapping->i_pages, index,
 470				dax_radix_order(entry), entry);
 471		radix_tree_preload_end();
 472		if (err) {
 473			xa_unlock_irq(&mapping->i_pages);
 474			/*
 475			 * Our insertion of a DAX entry failed, most likely
 476			 * because we were inserting a PMD entry and it
 477			 * collided with a PTE sized entry at a different
 478			 * index in the PMD range.  We haven't inserted
 479			 * anything into the radix tree and have no waiters to
 480			 * wake.
 481			 */
 482			return ERR_PTR(err);
 483		}
 484		/* Good, we have inserted empty locked entry into the tree. */
 485		mapping->nrexceptional++;
 486		xa_unlock_irq(&mapping->i_pages);
 487		return entry;
 488	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 489	entry = lock_slot(mapping, slot);
 490 out_unlock:
 491	xa_unlock_irq(&mapping->i_pages);
 492	return entry;
 493}
 494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495static int __dax_invalidate_mapping_entry(struct address_space *mapping,
 496					  pgoff_t index, bool trunc)
 497{
 498	int ret = 0;
 499	void *entry;
 500	struct radix_tree_root *pages = &mapping->i_pages;
 501
 502	xa_lock_irq(pages);
 503	entry = get_unlocked_mapping_entry(mapping, index, NULL);
 504	if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
 505		goto out;
 506	if (!trunc &&
 507	    (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
 508	     radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
 509		goto out;
 510	dax_disassociate_entry(entry, mapping, trunc);
 511	radix_tree_delete(pages, index);
 512	mapping->nrexceptional--;
 513	ret = 1;
 514out:
 515	put_unlocked_mapping_entry(mapping, index, entry);
 516	xa_unlock_irq(pages);
 517	return ret;
 518}
 519/*
 520 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
 521 * entry to get unlocked before deleting it.
 522 */
 523int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 524{
 525	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
 526
 527	/*
 528	 * This gets called from truncate / punch_hole path. As such, the caller
 529	 * must hold locks protecting against concurrent modifications of the
 530	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
 531	 * caller has seen exceptional entry for this index, we better find it
 532	 * at that index as well...
 533	 */
 534	WARN_ON_ONCE(!ret);
 535	return ret;
 536}
 537
 538/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539 * Invalidate exceptional DAX entry if it is clean.
 540 */
 541int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 542				      pgoff_t index)
 543{
 544	return __dax_invalidate_mapping_entry(mapping, index, false);
 545}
 546
 547static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
 548		sector_t sector, size_t size, struct page *to,
 549		unsigned long vaddr)
 550{
 551	void *vto, *kaddr;
 552	pgoff_t pgoff;
 553	pfn_t pfn;
 554	long rc;
 555	int id;
 
 
 
 
 556
 557	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
 558	if (rc)
 559		return rc;
 
 
 560
 561	id = dax_read_lock();
 562	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
 563	if (rc < 0) {
 564		dax_read_unlock(id);
 565		return rc;
 
 
 
 
 
 
 
 
 
 566	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 567	vto = kmap_atomic(to);
 568	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
 569	kunmap_atomic(vto);
 570	dax_read_unlock(id);
 571	return 0;
 572}
 573
 574/*
 575 * By this point grab_mapping_entry() has ensured that we have a locked entry
 576 * of the appropriate size so we don't have to worry about downgrading PMDs to
 577 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 578 * already in the tree, we will skip the insertion and just dirty the PMD as
 579 * appropriate.
 580 */
 581static void *dax_insert_mapping_entry(struct address_space *mapping,
 582				      struct vm_fault *vmf,
 583				      void *entry, pfn_t pfn_t,
 584				      unsigned long flags, bool dirty)
 585{
 586	struct radix_tree_root *pages = &mapping->i_pages;
 587	unsigned long pfn = pfn_t_to_pfn(pfn_t);
 
 
 588	pgoff_t index = vmf->pgoff;
 589	void *new_entry;
 590
 591	if (dirty)
 592		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 593
 594	if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
 595		/* we are replacing a zero page with block mapping */
 596		if (dax_is_pmd_entry(entry))
 597			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 598							PG_PMD_NR, false);
 599		else /* pte entry */
 600			unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
 601	}
 602
 603	xa_lock_irq(pages);
 604	new_entry = dax_radix_locked_entry(pfn, flags);
 605	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
 606		dax_disassociate_entry(entry, mapping, false);
 607		dax_associate_entry(new_entry, mapping);
 608	}
 609
 610	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 611		/*
 612		 * Only swap our new entry into the radix tree if the current
 613		 * entry is a zero page or an empty entry.  If a normal PTE or
 614		 * PMD entry is already in the tree, we leave it alone.  This
 615		 * means that if we are trying to insert a PTE and the
 616		 * existing entry is a PMD, we will just leave the PMD in the
 617		 * tree and dirty it if necessary.
 618		 */
 619		struct radix_tree_node *node;
 620		void **slot;
 621		void *ret;
 622
 623		ret = __radix_tree_lookup(pages, index, &node, &slot);
 624		WARN_ON_ONCE(ret != entry);
 625		__radix_tree_replace(pages, node, slot,
 626				     new_entry, NULL);
 627		entry = new_entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 628	}
 629
 630	if (dirty)
 631		radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
 632
 633	xa_unlock_irq(pages);
 634	return entry;
 635}
 636
 637static inline unsigned long
 638pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 639{
 640	unsigned long address;
 641
 642	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 643	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
 644	return address;
 645}
 646
 647/* Walk all mappings of a given index of a file and writeprotect them */
 648static void dax_mapping_entry_mkclean(struct address_space *mapping,
 649				      pgoff_t index, unsigned long pfn)
 650{
 651	struct vm_area_struct *vma;
 652	pte_t pte, *ptep = NULL;
 653	pmd_t *pmdp = NULL;
 654	spinlock_t *ptl;
 
 655
 656	i_mmap_lock_read(mapping);
 657	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
 658		unsigned long address, start, end;
 659
 660		cond_resched();
 661
 662		if (!(vma->vm_flags & VM_SHARED))
 663			continue;
 664
 665		address = pgoff_address(index, vma);
 666
 667		/*
 668		 * Note because we provide start/end to follow_pte_pmd it will
 669		 * call mmu_notifier_invalidate_range_start() on our behalf
 670		 * before taking any lock.
 671		 */
 672		if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
 673			continue;
 674
 675		/*
 676		 * No need to call mmu_notifier_invalidate_range() as we are
 677		 * downgrading page table protection not changing it to point
 678		 * to a new page.
 679		 *
 680		 * See Documentation/vm/mmu_notifier.txt
 681		 */
 682		if (pmdp) {
 683#ifdef CONFIG_FS_DAX_PMD
 684			pmd_t pmd;
 685
 686			if (pfn != pmd_pfn(*pmdp))
 687				goto unlock_pmd;
 688			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
 689				goto unlock_pmd;
 690
 691			flush_cache_page(vma, address, pfn);
 692			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
 693			pmd = pmd_wrprotect(pmd);
 694			pmd = pmd_mkclean(pmd);
 695			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 
 696unlock_pmd:
 
 697#endif
 698			spin_unlock(ptl);
 699		} else {
 700			if (pfn != pte_pfn(*ptep))
 701				goto unlock_pte;
 702			if (!pte_dirty(*ptep) && !pte_write(*ptep))
 703				goto unlock_pte;
 704
 705			flush_cache_page(vma, address, pfn);
 706			pte = ptep_clear_flush(vma, address, ptep);
 707			pte = pte_wrprotect(pte);
 708			pte = pte_mkclean(pte);
 709			set_pte_at(vma->vm_mm, address, ptep, pte);
 
 710unlock_pte:
 711			pte_unmap_unlock(ptep, ptl);
 712		}
 713
 714		mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
 
 715	}
 716	i_mmap_unlock_read(mapping);
 717}
 718
 719static int dax_writeback_one(struct dax_device *dax_dev,
 720		struct address_space *mapping, pgoff_t index, void *entry)
 721{
 722	struct radix_tree_root *pages = &mapping->i_pages;
 
 723	void *entry2, **slot;
 724	unsigned long pfn;
 725	long ret = 0;
 726	size_t size;
 727
 728	/*
 729	 * A page got tagged dirty in DAX mapping? Something is seriously
 730	 * wrong.
 731	 */
 732	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
 733		return -EIO;
 734
 735	xa_lock_irq(pages);
 736	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
 737	/* Entry got punched out / reallocated? */
 738	if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
 739		goto put_unlocked;
 740	/*
 741	 * Entry got reallocated elsewhere? No need to writeback. We have to
 742	 * compare pfns as we must not bail out due to difference in lockbit
 743	 * or entry type.
 744	 */
 745	if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
 746		goto put_unlocked;
 747	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 748				dax_is_zero_entry(entry))) {
 749		ret = -EIO;
 750		goto put_unlocked;
 751	}
 752
 753	/* Another fsync thread may have already written back this entry */
 754	if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
 755		goto put_unlocked;
 756	/* Lock the entry to serialize with page faults */
 757	entry = lock_slot(mapping, slot);
 758	/*
 759	 * We can clear the tag now but we have to be careful so that concurrent
 760	 * dax_writeback_one() calls for the same index cannot finish before we
 761	 * actually flush the caches. This is achieved as the calls will look
 762	 * at the entry only under the i_pages lock and once they do that
 763	 * they will see the entry locked and wait for it to unlock.
 764	 */
 765	radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
 766	xa_unlock_irq(pages);
 767
 768	/*
 769	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
 770	 * in the middle of a PMD, the 'index' we are given will be aligned to
 771	 * the start index of the PMD, as will the pfn we pull from 'entry'.
 772	 * This allows us to flush for PMD_SIZE and not have to worry about
 773	 * partial PMD writebacks.
 
 
 
 
 
 
 
 774	 */
 775	pfn = dax_radix_pfn(entry);
 776	size = PAGE_SIZE << dax_radix_order(entry);
 
 
 
 
 
 
 
 
 777
 778	dax_mapping_entry_mkclean(mapping, index, pfn);
 779	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
 780	/*
 781	 * After we have flushed the cache, we can clear the dirty tag. There
 782	 * cannot be new dirty data in the pfn after the flush has completed as
 783	 * the pfn mappings are writeprotected and fault waits for mapping
 784	 * entry lock.
 785	 */
 786	xa_lock_irq(pages);
 787	radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
 788	xa_unlock_irq(pages);
 789	trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
 790	put_locked_mapping_entry(mapping, index);
 
 791	return ret;
 792
 793 put_unlocked:
 794	put_unlocked_mapping_entry(mapping, index, entry2);
 795	xa_unlock_irq(pages);
 796	return ret;
 797}
 798
 799/*
 800 * Flush the mapping to the persistent domain within the byte range of [start,
 801 * end]. This is required by data integrity operations to ensure file data is
 802 * on persistent storage prior to completion of the operation.
 803 */
 804int dax_writeback_mapping_range(struct address_space *mapping,
 805		struct block_device *bdev, struct writeback_control *wbc)
 806{
 807	struct inode *inode = mapping->host;
 808	pgoff_t start_index, end_index;
 809	pgoff_t indices[PAGEVEC_SIZE];
 810	struct dax_device *dax_dev;
 811	struct pagevec pvec;
 812	bool done = false;
 813	int i, ret = 0;
 814
 815	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
 816		return -EIO;
 817
 818	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
 819		return 0;
 820
 821	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
 822	if (!dax_dev)
 823		return -EIO;
 824
 825	start_index = wbc->range_start >> PAGE_SHIFT;
 826	end_index = wbc->range_end >> PAGE_SHIFT;
 827
 828	trace_dax_writeback_range(inode, start_index, end_index);
 829
 830	tag_pages_for_writeback(mapping, start_index, end_index);
 831
 832	pagevec_init(&pvec);
 833	while (!done) {
 834		pvec.nr = find_get_entries_tag(mapping, start_index,
 835				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
 836				pvec.pages, indices);
 837
 838		if (pvec.nr == 0)
 839			break;
 840
 841		for (i = 0; i < pvec.nr; i++) {
 842			if (indices[i] > end_index) {
 843				done = true;
 844				break;
 845			}
 846
 847			ret = dax_writeback_one(dax_dev, mapping, indices[i],
 848					pvec.pages[i]);
 849			if (ret < 0) {
 850				mapping_set_error(mapping, ret);
 851				goto out;
 852			}
 853		}
 854		start_index = indices[pvec.nr - 1] + 1;
 855	}
 856out:
 857	put_dax(dax_dev);
 858	trace_dax_writeback_range_done(inode, start_index, end_index);
 859	return (ret < 0 ? ret : 0);
 860}
 861EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 862
 863static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 
 
 864{
 865	return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
 866}
 
 
 
 
 
 
 
 
 
 867
 868static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
 869			 pfn_t *pfnp)
 870{
 871	const sector_t sector = dax_iomap_sector(iomap, pos);
 872	pgoff_t pgoff;
 873	void *kaddr;
 874	int id, rc;
 875	long length;
 876
 877	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
 878	if (rc)
 879		return rc;
 880	id = dax_read_lock();
 881	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
 882				   &kaddr, pfnp);
 883	if (length < 0) {
 884		rc = length;
 885		goto out;
 886	}
 887	rc = -EINVAL;
 888	if (PFN_PHYS(length) < size)
 889		goto out;
 890	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
 891		goto out;
 892	/* For larger pages we need devmap */
 893	if (length > 1 && !pfn_t_devmap(*pfnp))
 894		goto out;
 895	rc = 0;
 896out:
 897	dax_read_unlock(id);
 898	return rc;
 899}
 900
 901/*
 902 * The user has performed a load from a hole in the file.  Allocating a new
 903 * page in the file would cause excessive storage usage for workloads with
 904 * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
 905 * If this page is ever written to we will re-fault and change the mapping to
 906 * point to real DAX storage instead.
 907 */
 908static int dax_load_hole(struct address_space *mapping, void *entry,
 909			 struct vm_fault *vmf)
 910{
 911	struct inode *inode = mapping->host;
 912	unsigned long vaddr = vmf->address;
 913	int ret = VM_FAULT_NOPAGE;
 914	struct page *zero_page;
 915	void *entry2;
 916	pfn_t pfn;
 917
 918	zero_page = ZERO_PAGE(0);
 919	if (unlikely(!zero_page)) {
 920		ret = VM_FAULT_OOM;
 921		goto out;
 
 
 
 922	}
 923
 924	pfn = page_to_pfn_t(zero_page);
 925	entry2 = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
 926			RADIX_DAX_ZERO_PAGE, false);
 927	if (IS_ERR(entry2)) {
 928		ret = VM_FAULT_SIGBUS;
 929		goto out;
 930	}
 931
 932	vm_insert_mixed(vmf->vma, vaddr, pfn);
 933out:
 934	trace_dax_load_hole(inode, vmf, ret);
 935	return ret;
 936}
 
 937
 938static bool dax_range_is_aligned(struct block_device *bdev,
 939				 unsigned int offset, unsigned int length)
 940{
 941	unsigned short sector_size = bdev_logical_block_size(bdev);
 942
 943	if (!IS_ALIGNED(offset, sector_size))
 944		return false;
 945	if (!IS_ALIGNED(length, sector_size))
 946		return false;
 947
 948	return true;
 949}
 950
 951int __dax_zero_page_range(struct block_device *bdev,
 952		struct dax_device *dax_dev, sector_t sector,
 953		unsigned int offset, unsigned int size)
 954{
 955	if (dax_range_is_aligned(bdev, offset, size)) {
 956		sector_t start_sector = sector + (offset >> 9);
 
 
 
 
 
 957
 958		return blkdev_issue_zeroout(bdev, start_sector,
 959				size >> 9, GFP_NOFS, 0);
 960	} else {
 961		pgoff_t pgoff;
 962		long rc, id;
 963		void *kaddr;
 964		pfn_t pfn;
 965
 966		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
 967		if (rc)
 968			return rc;
 969
 970		id = dax_read_lock();
 971		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
 972				&pfn);
 973		if (rc < 0) {
 974			dax_read_unlock(id);
 975			return rc;
 976		}
 977		memset(kaddr + offset, 0, size);
 978		dax_flush(dax_dev, kaddr + offset, size);
 979		dax_read_unlock(id);
 980	}
 981	return 0;
 982}
 983EXPORT_SYMBOL_GPL(__dax_zero_page_range);
 984
 
 
 
 
 
 985static loff_t
 986dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 987		struct iomap *iomap)
 988{
 989	struct block_device *bdev = iomap->bdev;
 990	struct dax_device *dax_dev = iomap->dax_dev;
 991	struct iov_iter *iter = data;
 992	loff_t end = pos + length, done = 0;
 993	ssize_t ret = 0;
 994	int id;
 995
 996	if (iov_iter_rw(iter) == READ) {
 997		end = min(end, i_size_read(inode));
 998		if (pos >= end)
 999			return 0;
1000
1001		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1002			return iov_iter_zero(min(length, end - pos), iter);
1003	}
1004
1005	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1006		return -EIO;
1007
1008	/*
1009	 * Write can allocate block for an area which has a hole page mapped
1010	 * into page tables. We have to tear down these mappings so that data
1011	 * written by write(2) is visible in mmap.
1012	 */
1013	if (iomap->flags & IOMAP_F_NEW) {
1014		invalidate_inode_pages2_range(inode->i_mapping,
1015					      pos >> PAGE_SHIFT,
1016					      (end - 1) >> PAGE_SHIFT);
1017	}
1018
1019	id = dax_read_lock();
1020	while (pos < end) {
1021		unsigned offset = pos & (PAGE_SIZE - 1);
1022		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1023		const sector_t sector = dax_iomap_sector(iomap, pos);
1024		ssize_t map_len;
1025		pgoff_t pgoff;
1026		void *kaddr;
1027		pfn_t pfn;
1028
1029		if (fatal_signal_pending(current)) {
1030			ret = -EINTR;
1031			break;
1032		}
1033
1034		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1035		if (ret)
1036			break;
1037
1038		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1039				&kaddr, &pfn);
1040		if (map_len < 0) {
1041			ret = map_len;
1042			break;
1043		}
1044
1045		map_len = PFN_PHYS(map_len);
1046		kaddr += offset;
1047		map_len -= offset;
1048		if (map_len > end - pos)
1049			map_len = end - pos;
1050
1051		/*
1052		 * The userspace address for the memory copy has already been
1053		 * validated via access_ok() in either vfs_read() or
1054		 * vfs_write(), depending on which operation we are doing.
1055		 */
1056		if (iov_iter_rw(iter) == WRITE)
1057			map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1058					map_len, iter);
1059		else
1060			map_len = copy_to_iter(kaddr, map_len, iter);
 
1061		if (map_len <= 0) {
1062			ret = map_len ? map_len : -EFAULT;
1063			break;
1064		}
1065
1066		pos += map_len;
1067		length -= map_len;
1068		done += map_len;
1069	}
1070	dax_read_unlock(id);
1071
1072	return done ? done : ret;
1073}
1074
1075/**
1076 * dax_iomap_rw - Perform I/O to a DAX file
1077 * @iocb:	The control block for this I/O
1078 * @iter:	The addresses to do I/O from or to
1079 * @ops:	iomap ops passed from the file system
1080 *
1081 * This function performs read and write operations to directly mapped
1082 * persistent memory.  The callers needs to take care of read/write exclusion
1083 * and evicting any page cache pages in the region under I/O.
1084 */
1085ssize_t
1086dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1087		const struct iomap_ops *ops)
1088{
1089	struct address_space *mapping = iocb->ki_filp->f_mapping;
1090	struct inode *inode = mapping->host;
1091	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1092	unsigned flags = 0;
1093
1094	if (iov_iter_rw(iter) == WRITE) {
1095		lockdep_assert_held_exclusive(&inode->i_rwsem);
1096		flags |= IOMAP_WRITE;
1097	} else {
1098		lockdep_assert_held(&inode->i_rwsem);
1099	}
1100
1101	while (iov_iter_count(iter)) {
1102		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1103				iter, dax_iomap_actor);
1104		if (ret <= 0)
1105			break;
1106		pos += ret;
1107		done += ret;
1108	}
1109
1110	iocb->ki_pos += done;
1111	return done ? done : ret;
1112}
1113EXPORT_SYMBOL_GPL(dax_iomap_rw);
1114
1115static int dax_fault_return(int error)
1116{
1117	if (error == 0)
1118		return VM_FAULT_NOPAGE;
1119	if (error == -ENOMEM)
1120		return VM_FAULT_OOM;
1121	return VM_FAULT_SIGBUS;
1122}
1123
1124/*
1125 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1126 * flushed on write-faults (non-cow), but not read-faults.
 
 
 
 
 
 
1127 */
1128static bool dax_fault_is_synchronous(unsigned long flags,
1129		struct vm_area_struct *vma, struct iomap *iomap)
1130{
1131	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1132		&& (iomap->flags & IOMAP_F_DIRTY);
1133}
1134
1135static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1136			       int *iomap_errp, const struct iomap_ops *ops)
1137{
1138	struct vm_area_struct *vma = vmf->vma;
1139	struct address_space *mapping = vma->vm_file->f_mapping;
1140	struct inode *inode = mapping->host;
1141	unsigned long vaddr = vmf->address;
1142	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
 
1143	struct iomap iomap = { 0 };
1144	unsigned flags = IOMAP_FAULT;
1145	int error, major = 0;
1146	bool write = vmf->flags & FAULT_FLAG_WRITE;
1147	bool sync;
1148	int vmf_ret = 0;
1149	void *entry;
1150	pfn_t pfn;
1151
1152	trace_dax_pte_fault(inode, vmf, vmf_ret);
1153	/*
1154	 * Check whether offset isn't beyond end of file now. Caller is supposed
1155	 * to hold locks serializing us with truncate / punch hole so this is
1156	 * a reliable test.
1157	 */
1158	if (pos >= i_size_read(inode)) {
1159		vmf_ret = VM_FAULT_SIGBUS;
1160		goto out;
1161	}
1162
1163	if (write && !vmf->cow_page)
1164		flags |= IOMAP_WRITE;
1165
1166	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1167	if (IS_ERR(entry)) {
1168		vmf_ret = dax_fault_return(PTR_ERR(entry));
1169		goto out;
1170	}
1171
1172	/*
1173	 * It is possible, particularly with mixed reads & writes to private
1174	 * mappings, that we have raced with a PMD fault that overlaps with
1175	 * the PTE we need to set up.  If so just return and the fault will be
1176	 * retried.
1177	 */
1178	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1179		vmf_ret = VM_FAULT_NOPAGE;
1180		goto unlock_entry;
1181	}
1182
1183	/*
1184	 * Note that we don't bother to use iomap_apply here: DAX required
1185	 * the file system block size to be equal the page size, which means
1186	 * that we never have to deal with more than a single extent here.
1187	 */
1188	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1189	if (iomap_errp)
1190		*iomap_errp = error;
1191	if (error) {
1192		vmf_ret = dax_fault_return(error);
1193		goto unlock_entry;
1194	}
1195	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1196		error = -EIO;	/* fs corruption? */
1197		goto error_finish_iomap;
 
 
1198	}
1199
 
 
1200	if (vmf->cow_page) {
1201		sector_t sector = dax_iomap_sector(&iomap, pos);
1202
1203		switch (iomap.type) {
1204		case IOMAP_HOLE:
1205		case IOMAP_UNWRITTEN:
1206			clear_user_highpage(vmf->cow_page, vaddr);
1207			break;
1208		case IOMAP_MAPPED:
1209			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1210					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1211			break;
1212		default:
1213			WARN_ON_ONCE(1);
1214			error = -EIO;
1215			break;
1216		}
1217
1218		if (error)
1219			goto error_finish_iomap;
1220
1221		__SetPageUptodate(vmf->cow_page);
1222		vmf_ret = finish_fault(vmf);
1223		if (!vmf_ret)
1224			vmf_ret = VM_FAULT_DONE_COW;
1225		goto finish_iomap;
1226	}
1227
1228	sync = dax_fault_is_synchronous(flags, vma, &iomap);
1229
1230	switch (iomap.type) {
1231	case IOMAP_MAPPED:
1232		if (iomap.flags & IOMAP_F_NEW) {
1233			count_vm_event(PGMAJFAULT);
1234			count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1235			major = VM_FAULT_MAJOR;
1236		}
1237		error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1238		if (error < 0)
1239			goto error_finish_iomap;
1240
1241		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1242						 0, write && !sync);
1243		if (IS_ERR(entry)) {
1244			error = PTR_ERR(entry);
1245			goto error_finish_iomap;
1246		}
1247
1248		/*
1249		 * If we are doing synchronous page fault and inode needs fsync,
1250		 * we can insert PTE into page tables only after that happens.
1251		 * Skip insertion for now and return the pfn so that caller can
1252		 * insert it after fsync is done.
1253		 */
1254		if (sync) {
1255			if (WARN_ON_ONCE(!pfnp)) {
1256				error = -EIO;
1257				goto error_finish_iomap;
1258			}
1259			*pfnp = pfn;
1260			vmf_ret = VM_FAULT_NEEDDSYNC | major;
1261			goto finish_iomap;
1262		}
1263		trace_dax_insert_mapping(inode, vmf, entry);
1264		if (write)
1265			error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
1266		else
1267			error = vm_insert_mixed(vma, vaddr, pfn);
1268
1269		/* -EBUSY is fine, somebody else faulted on the same PTE */
1270		if (error == -EBUSY)
1271			error = 0;
1272		break;
1273	case IOMAP_UNWRITTEN:
1274	case IOMAP_HOLE:
1275		if (!write) {
1276			vmf_ret = dax_load_hole(mapping, entry, vmf);
1277			goto finish_iomap;
1278		}
1279		/*FALLTHRU*/
1280	default:
1281		WARN_ON_ONCE(1);
1282		error = -EIO;
1283		break;
1284	}
1285
1286 error_finish_iomap:
1287	vmf_ret = dax_fault_return(error) | major;
 
 
1288 finish_iomap:
1289	if (ops->iomap_end) {
1290		int copied = PAGE_SIZE;
1291
1292		if (vmf_ret & VM_FAULT_ERROR)
1293			copied = 0;
1294		/*
1295		 * The fault is done by now and there's no way back (other
1296		 * thread may be already happily using PTE we have installed).
1297		 * Just ignore error from ->iomap_end since we cannot do much
1298		 * with it.
1299		 */
1300		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1301	}
1302 unlock_entry:
1303	put_locked_mapping_entry(mapping, vmf->pgoff);
1304 out:
1305	trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1306	return vmf_ret;
1307}
 
1308
1309#ifdef CONFIG_FS_DAX_PMD
1310static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1311		void *entry)
 
 
 
 
 
 
 
1312{
1313	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1314	unsigned long pmd_addr = vmf->address & PMD_MASK;
1315	struct inode *inode = mapping->host;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1316	struct page *zero_page;
1317	void *ret = NULL;
1318	spinlock_t *ptl;
1319	pmd_t pmd_entry;
1320	pfn_t pfn;
1321
1322	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1323
1324	if (unlikely(!zero_page))
1325		goto fallback;
1326
1327	pfn = page_to_pfn_t(zero_page);
1328	ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1329			RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1330	if (IS_ERR(ret))
1331		goto fallback;
 
1332
1333	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1334	if (!pmd_none(*(vmf->pmd))) {
1335		spin_unlock(ptl);
1336		goto fallback;
1337	}
1338
1339	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1340	pmd_entry = pmd_mkhuge(pmd_entry);
1341	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1342	spin_unlock(ptl);
1343	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1344	return VM_FAULT_NOPAGE;
1345
1346fallback:
1347	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1348	return VM_FAULT_FALLBACK;
1349}
1350
1351static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1352			       const struct iomap_ops *ops)
1353{
1354	struct vm_area_struct *vma = vmf->vma;
1355	struct address_space *mapping = vma->vm_file->f_mapping;
1356	unsigned long pmd_addr = vmf->address & PMD_MASK;
1357	bool write = vmf->flags & FAULT_FLAG_WRITE;
1358	bool sync;
1359	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1360	struct inode *inode = mapping->host;
1361	int result = VM_FAULT_FALLBACK;
1362	struct iomap iomap = { 0 };
1363	pgoff_t max_pgoff, pgoff;
 
1364	void *entry;
1365	loff_t pos;
1366	int error;
1367	pfn_t pfn;
1368
1369	/*
1370	 * Check whether offset isn't beyond end of file now. Caller is
1371	 * supposed to hold locks serializing us with truncate / punch hole so
1372	 * this is a reliable test.
1373	 */
1374	pgoff = linear_page_index(vma, pmd_addr);
1375	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1376
1377	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1378
1379	/*
1380	 * Make sure that the faulting address's PMD offset (color) matches
1381	 * the PMD offset from the start of the file.  This is necessary so
1382	 * that a PMD range in the page table overlaps exactly with a PMD
1383	 * range in the radix tree.
1384	 */
1385	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1386	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1387		goto fallback;
1388
1389	/* Fall back to PTEs if we're going to COW */
1390	if (write && !(vma->vm_flags & VM_SHARED))
1391		goto fallback;
1392
1393	/* If the PMD would extend outside the VMA */
1394	if (pmd_addr < vma->vm_start)
1395		goto fallback;
1396	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1397		goto fallback;
1398
1399	if (pgoff >= max_pgoff) {
1400		result = VM_FAULT_SIGBUS;
1401		goto out;
1402	}
 
 
 
 
 
 
1403
1404	/* If the PMD would extend beyond the file size */
1405	if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1406		goto fallback;
1407
1408	/*
1409	 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1410	 * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
1411	 * is already in the tree, for instance), it will return -EEXIST and
1412	 * we just fall back to 4k entries.
1413	 */
1414	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1415	if (IS_ERR(entry))
1416		goto fallback;
1417
1418	/*
1419	 * It is possible, particularly with mixed reads & writes to private
1420	 * mappings, that we have raced with a PTE fault that overlaps with
1421	 * the PMD we need to set up.  If so just return and the fault will be
1422	 * retried.
1423	 */
1424	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1425			!pmd_devmap(*vmf->pmd)) {
1426		result = 0;
1427		goto unlock_entry;
1428	}
1429
1430	/*
1431	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1432	 * setting up a mapping, so really we're using iomap_begin() as a way
1433	 * to look up our filesystem block.
1434	 */
1435	pos = (loff_t)pgoff << PAGE_SHIFT;
1436	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1437	if (error)
1438		goto unlock_entry;
1439
1440	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1441		goto finish_iomap;
1442
1443	sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
 
 
 
 
 
 
 
 
 
 
 
 
1444
1445	switch (iomap.type) {
1446	case IOMAP_MAPPED:
1447		error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1448		if (error < 0)
1449			goto finish_iomap;
1450
1451		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1452						RADIX_DAX_PMD, write && !sync);
1453		if (IS_ERR(entry))
1454			goto finish_iomap;
1455
1456		/*
1457		 * If we are doing synchronous page fault and inode needs fsync,
1458		 * we can insert PMD into page tables only after that happens.
1459		 * Skip insertion for now and return the pfn so that caller can
1460		 * insert it after fsync is done.
1461		 */
1462		if (sync) {
1463			if (WARN_ON_ONCE(!pfnp))
1464				goto finish_iomap;
1465			*pfnp = pfn;
1466			result = VM_FAULT_NEEDDSYNC;
1467			goto finish_iomap;
1468		}
1469
1470		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1471		result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1472					    write);
1473		break;
1474	case IOMAP_UNWRITTEN:
1475	case IOMAP_HOLE:
1476		if (WARN_ON_ONCE(write))
1477			break;
1478		result = dax_pmd_load_hole(vmf, &iomap, entry);
 
1479		break;
1480	default:
1481		WARN_ON_ONCE(1);
1482		break;
1483	}
1484
 
 
1485 finish_iomap:
1486	if (ops->iomap_end) {
1487		int copied = PMD_SIZE;
1488
1489		if (result == VM_FAULT_FALLBACK)
1490			copied = 0;
1491		/*
1492		 * The fault is done by now and there's no way back (other
1493		 * thread may be already happily using PMD we have installed).
1494		 * Just ignore error from ->iomap_end since we cannot do much
1495		 * with it.
1496		 */
1497		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1498				&iomap);
1499	}
1500 unlock_entry:
1501	put_locked_mapping_entry(mapping, pgoff);
1502 fallback:
1503	if (result == VM_FAULT_FALLBACK) {
1504		split_huge_pmd(vma, vmf->pmd, vmf->address);
1505		count_vm_event(THP_FAULT_FALLBACK);
1506	}
1507out:
1508	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1509	return result;
1510}
1511#else
1512static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1513			       const struct iomap_ops *ops)
1514{
1515	return VM_FAULT_FALLBACK;
1516}
1517#endif /* CONFIG_FS_DAX_PMD */
1518
1519/**
1520 * dax_iomap_fault - handle a page fault on a DAX file
1521 * @vmf: The description of the fault
1522 * @pe_size: Size of the page to fault in
1523 * @pfnp: PFN to insert for synchronous faults if fsync is required
1524 * @iomap_errp: Storage for detailed error code in case of error
1525 * @ops: Iomap ops passed from the file system
1526 *
1527 * When a page fault occurs, filesystems may call this helper in
1528 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1529 * has done all the necessary locking for page fault to proceed
1530 * successfully.
1531 */
1532int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1533		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1534{
1535	switch (pe_size) {
1536	case PE_SIZE_PTE:
1537		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1538	case PE_SIZE_PMD:
1539		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1540	default:
1541		return VM_FAULT_FALLBACK;
1542	}
1543}
1544EXPORT_SYMBOL_GPL(dax_iomap_fault);
1545
1546/**
1547 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1548 * @vmf: The description of the fault
1549 * @pe_size: Size of entry to be inserted
1550 * @pfn: PFN to insert
1551 *
1552 * This function inserts writeable PTE or PMD entry into page tables for mmaped
1553 * DAX file.  It takes care of marking corresponding radix tree entry as dirty
1554 * as well.
1555 */
1556static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1557				  enum page_entry_size pe_size,
1558				  pfn_t pfn)
1559{
1560	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1561	void *entry, **slot;
1562	pgoff_t index = vmf->pgoff;
1563	int vmf_ret, error;
1564
1565	xa_lock_irq(&mapping->i_pages);
1566	entry = get_unlocked_mapping_entry(mapping, index, &slot);
1567	/* Did we race with someone splitting entry or so? */
1568	if (!entry ||
1569	    (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1570	    (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1571		put_unlocked_mapping_entry(mapping, index, entry);
1572		xa_unlock_irq(&mapping->i_pages);
1573		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1574						      VM_FAULT_NOPAGE);
1575		return VM_FAULT_NOPAGE;
1576	}
1577	radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
1578	entry = lock_slot(mapping, slot);
1579	xa_unlock_irq(&mapping->i_pages);
1580	switch (pe_size) {
1581	case PE_SIZE_PTE:
1582		error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1583		vmf_ret = dax_fault_return(error);
1584		break;
1585#ifdef CONFIG_FS_DAX_PMD
1586	case PE_SIZE_PMD:
1587		vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1588			pfn, true);
1589		break;
1590#endif
1591	default:
1592		vmf_ret = VM_FAULT_FALLBACK;
1593	}
1594	put_locked_mapping_entry(mapping, index);
1595	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret);
1596	return vmf_ret;
1597}
1598
1599/**
1600 * dax_finish_sync_fault - finish synchronous page fault
1601 * @vmf: The description of the fault
1602 * @pe_size: Size of entry to be inserted
1603 * @pfn: PFN to insert
1604 *
1605 * This function ensures that the file range touched by the page fault is
1606 * stored persistently on the media and handles inserting of appropriate page
1607 * table entry.
1608 */
1609int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1610			  pfn_t pfn)
1611{
1612	int err;
1613	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1614	size_t len = 0;
1615
1616	if (pe_size == PE_SIZE_PTE)
1617		len = PAGE_SIZE;
1618	else if (pe_size == PE_SIZE_PMD)
1619		len = PMD_SIZE;
1620	else
1621		WARN_ON_ONCE(1);
1622	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1623	if (err)
1624		return VM_FAULT_SIGBUS;
1625	return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1626}
1627EXPORT_SYMBOL_GPL(dax_finish_sync_fault);