Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/dax.c - Direct Access filesystem code
   4 * Copyright (c) 2013-2014 Intel Corporation
   5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
   7 */
   8
   9#include <linux/atomic.h>
  10#include <linux/blkdev.h>
  11#include <linux/buffer_head.h>
  12#include <linux/dax.h>
  13#include <linux/fs.h>
 
  14#include <linux/highmem.h>
  15#include <linux/memcontrol.h>
  16#include <linux/mm.h>
  17#include <linux/mutex.h>
  18#include <linux/pagevec.h>
  19#include <linux/sched.h>
  20#include <linux/sched/signal.h>
  21#include <linux/uio.h>
  22#include <linux/vmstat.h>
  23#include <linux/pfn_t.h>
  24#include <linux/sizes.h>
  25#include <linux/mmu_notifier.h>
  26#include <linux/iomap.h>
  27#include <linux/rmap.h>
  28#include <asm/pgalloc.h>
  29
  30#define CREATE_TRACE_POINTS
  31#include <trace/events/fs_dax.h>
  32
  33static inline unsigned int pe_order(enum page_entry_size pe_size)
  34{
  35	if (pe_size == PE_SIZE_PTE)
  36		return PAGE_SHIFT - PAGE_SHIFT;
  37	if (pe_size == PE_SIZE_PMD)
  38		return PMD_SHIFT - PAGE_SHIFT;
  39	if (pe_size == PE_SIZE_PUD)
  40		return PUD_SHIFT - PAGE_SHIFT;
  41	return ~0;
  42}
  43
  44/* We choose 4096 entries - same as per-zone page wait tables */
  45#define DAX_WAIT_TABLE_BITS 12
  46#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
  47
  48/* The 'colour' (ie low bits) within a PMD of a page offset.  */
  49#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
  50#define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
  51
  52/* The order of a PMD entry */
  53#define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
  54
  55static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
  56
  57static int __init init_dax_wait_table(void)
  58{
  59	int i;
  60
  61	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
  62		init_waitqueue_head(wait_table + i);
  63	return 0;
  64}
  65fs_initcall(init_dax_wait_table);
  66
  67/*
  68 * DAX pagecache entries use XArray value entries so they can't be mistaken
  69 * for pages.  We use one bit for locking, one bit for the entry size (PMD)
  70 * and two more to tell us if the entry is a zero page or an empty entry that
  71 * is just used for locking.  In total four special bits.
  72 *
  73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
  74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
  75 * block allocation.
  76 */
  77#define DAX_SHIFT	(4)
  78#define DAX_LOCKED	(1UL << 0)
  79#define DAX_PMD		(1UL << 1)
  80#define DAX_ZERO_PAGE	(1UL << 2)
  81#define DAX_EMPTY	(1UL << 3)
  82
  83static unsigned long dax_to_pfn(void *entry)
  84{
  85	return xa_to_value(entry) >> DAX_SHIFT;
  86}
  87
  88static void *dax_make_entry(pfn_t pfn, unsigned long flags)
  89{
  90	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
  91}
  92
  93static bool dax_is_locked(void *entry)
  94{
  95	return xa_to_value(entry) & DAX_LOCKED;
  96}
  97
  98static unsigned int dax_entry_order(void *entry)
  99{
 100	if (xa_to_value(entry) & DAX_PMD)
 101		return PMD_ORDER;
 102	return 0;
 103}
 104
 105static unsigned long dax_is_pmd_entry(void *entry)
 106{
 107	return xa_to_value(entry) & DAX_PMD;
 108}
 109
 110static bool dax_is_pte_entry(void *entry)
 111{
 112	return !(xa_to_value(entry) & DAX_PMD);
 113}
 114
 115static int dax_is_zero_entry(void *entry)
 116{
 117	return xa_to_value(entry) & DAX_ZERO_PAGE;
 118}
 119
 120static int dax_is_empty_entry(void *entry)
 121{
 122	return xa_to_value(entry) & DAX_EMPTY;
 123}
 124
 125/*
 126 * true if the entry that was found is of a smaller order than the entry
 127 * we were looking for
 128 */
 129static bool dax_is_conflict(void *entry)
 130{
 131	return entry == XA_RETRY_ENTRY;
 132}
 133
 134/*
 135 * DAX page cache entry locking
 136 */
 137struct exceptional_entry_key {
 138	struct xarray *xa;
 139	pgoff_t entry_start;
 140};
 141
 142struct wait_exceptional_entry_queue {
 143	wait_queue_entry_t wait;
 144	struct exceptional_entry_key key;
 145};
 146
 147/**
 148 * enum dax_wake_mode: waitqueue wakeup behaviour
 149 * @WAKE_ALL: wake all waiters in the waitqueue
 150 * @WAKE_NEXT: wake only the first waiter in the waitqueue
 151 */
 152enum dax_wake_mode {
 153	WAKE_ALL,
 154	WAKE_NEXT,
 155};
 156
 157static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
 158		void *entry, struct exceptional_entry_key *key)
 159{
 160	unsigned long hash;
 161	unsigned long index = xas->xa_index;
 162
 163	/*
 164	 * If 'entry' is a PMD, align the 'index' that we use for the wait
 165	 * queue to the start of that PMD.  This ensures that all offsets in
 166	 * the range covered by the PMD map to the same bit lock.
 167	 */
 168	if (dax_is_pmd_entry(entry))
 169		index &= ~PG_PMD_COLOUR;
 170	key->xa = xas->xa;
 171	key->entry_start = index;
 172
 173	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
 174	return wait_table + hash;
 175}
 176
 177static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
 178		unsigned int mode, int sync, void *keyp)
 179{
 180	struct exceptional_entry_key *key = keyp;
 181	struct wait_exceptional_entry_queue *ewait =
 182		container_of(wait, struct wait_exceptional_entry_queue, wait);
 183
 184	if (key->xa != ewait->key.xa ||
 185	    key->entry_start != ewait->key.entry_start)
 186		return 0;
 187	return autoremove_wake_function(wait, mode, sync, NULL);
 188}
 189
 190/*
 191 * @entry may no longer be the entry at the index in the mapping.
 192 * The important information it's conveying is whether the entry at
 193 * this index used to be a PMD entry.
 194 */
 195static void dax_wake_entry(struct xa_state *xas, void *entry,
 196			   enum dax_wake_mode mode)
 197{
 198	struct exceptional_entry_key key;
 199	wait_queue_head_t *wq;
 200
 201	wq = dax_entry_waitqueue(xas, entry, &key);
 202
 203	/*
 204	 * Checking for locked entry and prepare_to_wait_exclusive() happens
 205	 * under the i_pages lock, ditto for entry handling in our callers.
 206	 * So at this point all tasks that could have seen our entry locked
 207	 * must be in the waitqueue and the following check will see them.
 208	 */
 209	if (waitqueue_active(wq))
 210		__wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
 211}
 212
 213/*
 214 * Look up entry in page cache, wait for it to become unlocked if it
 215 * is a DAX entry and return it.  The caller must subsequently call
 216 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
 217 * if it did.  The entry returned may have a larger order than @order.
 218 * If @order is larger than the order of the entry found in i_pages, this
 219 * function returns a dax_is_conflict entry.
 220 *
 221 * Must be called with the i_pages lock held.
 222 */
 223static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
 224{
 225	void *entry;
 226	struct wait_exceptional_entry_queue ewait;
 227	wait_queue_head_t *wq;
 228
 229	init_wait(&ewait.wait);
 230	ewait.wait.func = wake_exceptional_entry_func;
 231
 232	for (;;) {
 233		entry = xas_find_conflict(xas);
 234		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 235			return entry;
 236		if (dax_entry_order(entry) < order)
 237			return XA_RETRY_ENTRY;
 238		if (!dax_is_locked(entry))
 239			return entry;
 240
 241		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 242		prepare_to_wait_exclusive(wq, &ewait.wait,
 243					  TASK_UNINTERRUPTIBLE);
 244		xas_unlock_irq(xas);
 245		xas_reset(xas);
 246		schedule();
 247		finish_wait(wq, &ewait.wait);
 248		xas_lock_irq(xas);
 249	}
 250}
 251
 252/*
 253 * The only thing keeping the address space around is the i_pages lock
 254 * (it's cycled in clear_inode() after removing the entries from i_pages)
 255 * After we call xas_unlock_irq(), we cannot touch xas->xa.
 256 */
 257static void wait_entry_unlocked(struct xa_state *xas, void *entry)
 258{
 259	struct wait_exceptional_entry_queue ewait;
 260	wait_queue_head_t *wq;
 261
 262	init_wait(&ewait.wait);
 263	ewait.wait.func = wake_exceptional_entry_func;
 264
 265	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 266	/*
 267	 * Unlike get_unlocked_entry() there is no guarantee that this
 268	 * path ever successfully retrieves an unlocked entry before an
 269	 * inode dies. Perform a non-exclusive wait in case this path
 270	 * never successfully performs its own wake up.
 271	 */
 272	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
 273	xas_unlock_irq(xas);
 274	schedule();
 275	finish_wait(wq, &ewait.wait);
 276}
 277
 278static void put_unlocked_entry(struct xa_state *xas, void *entry,
 279			       enum dax_wake_mode mode)
 280{
 
 281	if (entry && !dax_is_conflict(entry))
 282		dax_wake_entry(xas, entry, mode);
 283}
 284
 285/*
 286 * We used the xa_state to get the entry, but then we locked the entry and
 287 * dropped the xa_lock, so we know the xa_state is stale and must be reset
 288 * before use.
 289 */
 290static void dax_unlock_entry(struct xa_state *xas, void *entry)
 291{
 292	void *old;
 293
 294	BUG_ON(dax_is_locked(entry));
 295	xas_reset(xas);
 296	xas_lock_irq(xas);
 297	old = xas_store(xas, entry);
 298	xas_unlock_irq(xas);
 299	BUG_ON(!dax_is_locked(old));
 300	dax_wake_entry(xas, entry, WAKE_NEXT);
 301}
 302
 303/*
 304 * Return: The entry stored at this location before it was locked.
 305 */
 306static void *dax_lock_entry(struct xa_state *xas, void *entry)
 307{
 308	unsigned long v = xa_to_value(entry);
 309	return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
 310}
 311
 312static unsigned long dax_entry_size(void *entry)
 313{
 314	if (dax_is_zero_entry(entry))
 315		return 0;
 316	else if (dax_is_empty_entry(entry))
 317		return 0;
 318	else if (dax_is_pmd_entry(entry))
 319		return PMD_SIZE;
 320	else
 321		return PAGE_SIZE;
 322}
 323
 324static unsigned long dax_end_pfn(void *entry)
 325{
 326	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
 327}
 328
 329/*
 330 * Iterate through all mapped pfns represented by an entry, i.e. skip
 331 * 'empty' and 'zero' entries.
 332 */
 333#define for_each_mapped_pfn(entry, pfn) \
 334	for (pfn = dax_to_pfn(entry); \
 335			pfn < dax_end_pfn(entry); pfn++)
 336
 337static inline bool dax_page_is_shared(struct page *page)
 338{
 339	return page->mapping == PAGE_MAPPING_DAX_SHARED;
 340}
 341
 342/*
 343 * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
 344 * refcount.
 345 */
 346static inline void dax_page_share_get(struct page *page)
 347{
 348	if (page->mapping != PAGE_MAPPING_DAX_SHARED) {
 349		/*
 350		 * Reset the index if the page was already mapped
 351		 * regularly before.
 352		 */
 353		if (page->mapping)
 354			page->share = 1;
 355		page->mapping = PAGE_MAPPING_DAX_SHARED;
 356	}
 357	page->share++;
 358}
 359
 360static inline unsigned long dax_page_share_put(struct page *page)
 361{
 362	return --page->share;
 363}
 364
 365/*
 366 * When it is called in dax_insert_entry(), the shared flag will indicate that
 367 * whether this entry is shared by multiple files.  If so, set the page->mapping
 368 * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
 369 */
 370static void dax_associate_entry(void *entry, struct address_space *mapping,
 371		struct vm_area_struct *vma, unsigned long address, bool shared)
 372{
 373	unsigned long size = dax_entry_size(entry), pfn, index;
 374	int i = 0;
 375
 376	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 377		return;
 378
 379	index = linear_page_index(vma, address & ~(size - 1));
 380	for_each_mapped_pfn(entry, pfn) {
 381		struct page *page = pfn_to_page(pfn);
 382
 383		if (shared) {
 384			dax_page_share_get(page);
 385		} else {
 386			WARN_ON_ONCE(page->mapping);
 387			page->mapping = mapping;
 388			page->index = index + i++;
 389		}
 390	}
 391}
 392
 393static void dax_disassociate_entry(void *entry, struct address_space *mapping,
 394		bool trunc)
 395{
 396	unsigned long pfn;
 397
 398	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 399		return;
 400
 401	for_each_mapped_pfn(entry, pfn) {
 402		struct page *page = pfn_to_page(pfn);
 403
 404		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
 405		if (dax_page_is_shared(page)) {
 406			/* keep the shared flag if this page is still shared */
 407			if (dax_page_share_put(page) > 0)
 408				continue;
 409		} else
 410			WARN_ON_ONCE(page->mapping && page->mapping != mapping);
 411		page->mapping = NULL;
 412		page->index = 0;
 413	}
 414}
 415
 416static struct page *dax_busy_page(void *entry)
 417{
 418	unsigned long pfn;
 419
 420	for_each_mapped_pfn(entry, pfn) {
 421		struct page *page = pfn_to_page(pfn);
 422
 423		if (page_ref_count(page) > 1)
 424			return page;
 425	}
 426	return NULL;
 427}
 428
 429/*
 430 * dax_lock_page - Lock the DAX entry corresponding to a page
 431 * @page: The page whose entry we want to lock
 432 *
 433 * Context: Process context.
 434 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
 435 * not be locked.
 436 */
 437dax_entry_t dax_lock_page(struct page *page)
 438{
 439	XA_STATE(xas, NULL, 0);
 440	void *entry;
 441
 442	/* Ensure page->mapping isn't freed while we look at it */
 443	rcu_read_lock();
 444	for (;;) {
 445		struct address_space *mapping = READ_ONCE(page->mapping);
 446
 447		entry = NULL;
 448		if (!mapping || !dax_mapping(mapping))
 449			break;
 450
 451		/*
 452		 * In the device-dax case there's no need to lock, a
 453		 * struct dev_pagemap pin is sufficient to keep the
 454		 * inode alive, and we assume we have dev_pagemap pin
 455		 * otherwise we would not have a valid pfn_to_page()
 456		 * translation.
 457		 */
 458		entry = (void *)~0UL;
 459		if (S_ISCHR(mapping->host->i_mode))
 460			break;
 461
 462		xas.xa = &mapping->i_pages;
 463		xas_lock_irq(&xas);
 464		if (mapping != page->mapping) {
 465			xas_unlock_irq(&xas);
 466			continue;
 467		}
 468		xas_set(&xas, page->index);
 469		entry = xas_load(&xas);
 470		if (dax_is_locked(entry)) {
 471			rcu_read_unlock();
 472			wait_entry_unlocked(&xas, entry);
 473			rcu_read_lock();
 474			continue;
 475		}
 476		dax_lock_entry(&xas, entry);
 477		xas_unlock_irq(&xas);
 478		break;
 479	}
 480	rcu_read_unlock();
 481	return (dax_entry_t)entry;
 482}
 483
 484void dax_unlock_page(struct page *page, dax_entry_t cookie)
 485{
 486	struct address_space *mapping = page->mapping;
 487	XA_STATE(xas, &mapping->i_pages, page->index);
 488
 489	if (S_ISCHR(mapping->host->i_mode))
 490		return;
 491
 492	dax_unlock_entry(&xas, (void *)cookie);
 493}
 494
 495/*
 496 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
 497 * @mapping: the file's mapping whose entry we want to lock
 498 * @index: the offset within this file
 499 * @page: output the dax page corresponding to this dax entry
 500 *
 501 * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
 502 * could not be locked.
 503 */
 504dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
 505		struct page **page)
 506{
 507	XA_STATE(xas, NULL, 0);
 508	void *entry;
 509
 510	rcu_read_lock();
 511	for (;;) {
 512		entry = NULL;
 513		if (!dax_mapping(mapping))
 514			break;
 515
 516		xas.xa = &mapping->i_pages;
 517		xas_lock_irq(&xas);
 518		xas_set(&xas, index);
 519		entry = xas_load(&xas);
 520		if (dax_is_locked(entry)) {
 521			rcu_read_unlock();
 522			wait_entry_unlocked(&xas, entry);
 523			rcu_read_lock();
 524			continue;
 525		}
 526		if (!entry ||
 527		    dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 528			/*
 529			 * Because we are looking for entry from file's mapping
 530			 * and index, so the entry may not be inserted for now,
 531			 * or even a zero/empty entry.  We don't think this is
 532			 * an error case.  So, return a special value and do
 533			 * not output @page.
 534			 */
 535			entry = (void *)~0UL;
 536		} else {
 537			*page = pfn_to_page(dax_to_pfn(entry));
 538			dax_lock_entry(&xas, entry);
 539		}
 540		xas_unlock_irq(&xas);
 541		break;
 542	}
 543	rcu_read_unlock();
 544	return (dax_entry_t)entry;
 545}
 546
 547void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
 548		dax_entry_t cookie)
 549{
 550	XA_STATE(xas, &mapping->i_pages, index);
 551
 552	if (cookie == ~0UL)
 553		return;
 554
 555	dax_unlock_entry(&xas, (void *)cookie);
 556}
 557
 558/*
 559 * Find page cache entry at given index. If it is a DAX entry, return it
 560 * with the entry locked. If the page cache doesn't contain an entry at
 561 * that index, add a locked empty entry.
 562 *
 563 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
 564 * either return that locked entry or will return VM_FAULT_FALLBACK.
 565 * This will happen if there are any PTE entries within the PMD range
 566 * that we are requesting.
 567 *
 568 * We always favor PTE entries over PMD entries. There isn't a flow where we
 569 * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
 570 * insertion will fail if it finds any PTE entries already in the tree, and a
 571 * PTE insertion will cause an existing PMD entry to be unmapped and
 572 * downgraded to PTE entries.  This happens for both PMD zero pages as
 573 * well as PMD empty entries.
 574 *
 575 * The exception to this downgrade path is for PMD entries that have
 576 * real storage backing them.  We will leave these real PMD entries in
 577 * the tree, and PTE writes will simply dirty the entire PMD entry.
 578 *
 579 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 580 * persistent memory the benefit is doubtful. We can add that later if we can
 581 * show it helps.
 582 *
 583 * On error, this function does not return an ERR_PTR.  Instead it returns
 584 * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
 585 * overlap with xarray value entries.
 586 */
 587static void *grab_mapping_entry(struct xa_state *xas,
 588		struct address_space *mapping, unsigned int order)
 589{
 590	unsigned long index = xas->xa_index;
 591	bool pmd_downgrade;	/* splitting PMD entry into PTE entries? */
 592	void *entry;
 593
 594retry:
 595	pmd_downgrade = false;
 596	xas_lock_irq(xas);
 597	entry = get_unlocked_entry(xas, order);
 598
 599	if (entry) {
 600		if (dax_is_conflict(entry))
 601			goto fallback;
 602		if (!xa_is_value(entry)) {
 603			xas_set_err(xas, -EIO);
 604			goto out_unlock;
 605		}
 606
 607		if (order == 0) {
 608			if (dax_is_pmd_entry(entry) &&
 609			    (dax_is_zero_entry(entry) ||
 610			     dax_is_empty_entry(entry))) {
 611				pmd_downgrade = true;
 612			}
 613		}
 614	}
 615
 616	if (pmd_downgrade) {
 617		/*
 618		 * Make sure 'entry' remains valid while we drop
 619		 * the i_pages lock.
 620		 */
 621		dax_lock_entry(xas, entry);
 622
 623		/*
 624		 * Besides huge zero pages the only other thing that gets
 625		 * downgraded are empty entries which don't need to be
 626		 * unmapped.
 627		 */
 628		if (dax_is_zero_entry(entry)) {
 629			xas_unlock_irq(xas);
 630			unmap_mapping_pages(mapping,
 631					xas->xa_index & ~PG_PMD_COLOUR,
 632					PG_PMD_NR, false);
 633			xas_reset(xas);
 634			xas_lock_irq(xas);
 635		}
 636
 637		dax_disassociate_entry(entry, mapping, false);
 638		xas_store(xas, NULL);	/* undo the PMD join */
 639		dax_wake_entry(xas, entry, WAKE_ALL);
 640		mapping->nrpages -= PG_PMD_NR;
 641		entry = NULL;
 642		xas_set(xas, index);
 643	}
 644
 645	if (entry) {
 646		dax_lock_entry(xas, entry);
 647	} else {
 648		unsigned long flags = DAX_EMPTY;
 649
 650		if (order > 0)
 651			flags |= DAX_PMD;
 652		entry = dax_make_entry(pfn_to_pfn_t(0), flags);
 653		dax_lock_entry(xas, entry);
 654		if (xas_error(xas))
 655			goto out_unlock;
 656		mapping->nrpages += 1UL << order;
 657	}
 658
 659out_unlock:
 660	xas_unlock_irq(xas);
 661	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
 662		goto retry;
 663	if (xas->xa_node == XA_ERROR(-ENOMEM))
 664		return xa_mk_internal(VM_FAULT_OOM);
 665	if (xas_error(xas))
 666		return xa_mk_internal(VM_FAULT_SIGBUS);
 667	return entry;
 668fallback:
 669	xas_unlock_irq(xas);
 670	return xa_mk_internal(VM_FAULT_FALLBACK);
 671}
 672
 673/**
 674 * dax_layout_busy_page_range - find first pinned page in @mapping
 675 * @mapping: address space to scan for a page with ref count > 1
 676 * @start: Starting offset. Page containing 'start' is included.
 677 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
 678 *       pages from 'start' till the end of file are included.
 679 *
 680 * DAX requires ZONE_DEVICE mapped pages. These pages are never
 681 * 'onlined' to the page allocator so they are considered idle when
 682 * page->count == 1. A filesystem uses this interface to determine if
 683 * any page in the mapping is busy, i.e. for DMA, or other
 684 * get_user_pages() usages.
 685 *
 686 * It is expected that the filesystem is holding locks to block the
 687 * establishment of new mappings in this address_space. I.e. it expects
 688 * to be able to run unmap_mapping_range() and subsequently not race
 689 * mapping_mapped() becoming true.
 690 */
 691struct page *dax_layout_busy_page_range(struct address_space *mapping,
 692					loff_t start, loff_t end)
 693{
 
 694	void *entry;
 695	unsigned int scanned = 0;
 696	struct page *page = NULL;
 697	pgoff_t start_idx = start >> PAGE_SHIFT;
 698	pgoff_t end_idx;
 699	XA_STATE(xas, &mapping->i_pages, start_idx);
 700
 701	/*
 702	 * In the 'limited' case get_user_pages() for dax is disabled.
 703	 */
 704	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 705		return NULL;
 706
 707	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
 708		return NULL;
 709
 710	/* If end == LLONG_MAX, all pages from start to till end of file */
 711	if (end == LLONG_MAX)
 712		end_idx = ULONG_MAX;
 713	else
 714		end_idx = end >> PAGE_SHIFT;
 715	/*
 716	 * If we race get_user_pages_fast() here either we'll see the
 717	 * elevated page count in the iteration and wait, or
 718	 * get_user_pages_fast() will see that the page it took a reference
 719	 * against is no longer mapped in the page tables and bail to the
 720	 * get_user_pages() slow path.  The slow path is protected by
 721	 * pte_lock() and pmd_lock(). New references are not taken without
 722	 * holding those locks, and unmap_mapping_pages() will not zero the
 723	 * pte or pmd without holding the respective lock, so we are
 724	 * guaranteed to either see new references or prevent new
 725	 * references from being established.
 726	 */
 727	unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
 728
 729	xas_lock_irq(&xas);
 730	xas_for_each(&xas, entry, end_idx) {
 731		if (WARN_ON_ONCE(!xa_is_value(entry)))
 732			continue;
 733		if (unlikely(dax_is_locked(entry)))
 734			entry = get_unlocked_entry(&xas, 0);
 735		if (entry)
 736			page = dax_busy_page(entry);
 737		put_unlocked_entry(&xas, entry, WAKE_NEXT);
 738		if (page)
 739			break;
 740		if (++scanned % XA_CHECK_SCHED)
 741			continue;
 742
 743		xas_pause(&xas);
 744		xas_unlock_irq(&xas);
 745		cond_resched();
 746		xas_lock_irq(&xas);
 747	}
 748	xas_unlock_irq(&xas);
 749	return page;
 750}
 751EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
 752
 753struct page *dax_layout_busy_page(struct address_space *mapping)
 754{
 755	return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
 756}
 757EXPORT_SYMBOL_GPL(dax_layout_busy_page);
 758
 759static int __dax_invalidate_entry(struct address_space *mapping,
 760					  pgoff_t index, bool trunc)
 761{
 762	XA_STATE(xas, &mapping->i_pages, index);
 763	int ret = 0;
 764	void *entry;
 765
 766	xas_lock_irq(&xas);
 767	entry = get_unlocked_entry(&xas, 0);
 768	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 769		goto out;
 770	if (!trunc &&
 771	    (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
 772	     xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
 773		goto out;
 774	dax_disassociate_entry(entry, mapping, trunc);
 775	xas_store(&xas, NULL);
 776	mapping->nrpages -= 1UL << dax_entry_order(entry);
 777	ret = 1;
 778out:
 779	put_unlocked_entry(&xas, entry, WAKE_ALL);
 780	xas_unlock_irq(&xas);
 781	return ret;
 782}
 783
 784/*
 785 * Delete DAX entry at @index from @mapping.  Wait for it
 786 * to be unlocked before deleting it.
 787 */
 788int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 789{
 790	int ret = __dax_invalidate_entry(mapping, index, true);
 791
 792	/*
 793	 * This gets called from truncate / punch_hole path. As such, the caller
 794	 * must hold locks protecting against concurrent modifications of the
 795	 * page cache (usually fs-private i_mmap_sem for writing). Since the
 796	 * caller has seen a DAX entry for this index, we better find it
 797	 * at that index as well...
 798	 */
 799	WARN_ON_ONCE(!ret);
 800	return ret;
 801}
 802
 803/*
 804 * Invalidate DAX entry if it is clean.
 805 */
 806int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 807				      pgoff_t index)
 808{
 809	return __dax_invalidate_entry(mapping, index, false);
 810}
 811
 812static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
 813{
 814	return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
 815}
 816
 817static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
 818{
 819	pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
 820	void *vto, *kaddr;
 
 821	long rc;
 822	int id;
 823
 
 
 
 
 824	id = dax_read_lock();
 825	rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
 826				&kaddr, NULL);
 827	if (rc < 0) {
 828		dax_read_unlock(id);
 829		return rc;
 830	}
 831	vto = kmap_atomic(vmf->cow_page);
 832	copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
 833	kunmap_atomic(vto);
 834	dax_read_unlock(id);
 835	return 0;
 836}
 837
 838/*
 839 * MAP_SYNC on a dax mapping guarantees dirty metadata is
 840 * flushed on write-faults (non-cow), but not read-faults.
 841 */
 842static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
 843		struct vm_area_struct *vma)
 844{
 845	return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
 846		(iter->iomap.flags & IOMAP_F_DIRTY);
 847}
 848
 849/*
 850 * By this point grab_mapping_entry() has ensured that we have a locked entry
 851 * of the appropriate size so we don't have to worry about downgrading PMDs to
 852 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 853 * already in the tree, we will skip the insertion and just dirty the PMD as
 854 * appropriate.
 855 */
 856static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
 857		const struct iomap_iter *iter, void *entry, pfn_t pfn,
 858		unsigned long flags)
 859{
 860	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 861	void *new_entry = dax_make_entry(pfn, flags);
 862	bool write = iter->flags & IOMAP_WRITE;
 863	bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
 864	bool shared = iter->iomap.flags & IOMAP_F_SHARED;
 865
 866	if (dirty)
 867		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 868
 869	if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
 870		unsigned long index = xas->xa_index;
 871		/* we are replacing a zero page with block mapping */
 872		if (dax_is_pmd_entry(entry))
 873			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 874					PG_PMD_NR, false);
 875		else /* pte entry */
 876			unmap_mapping_pages(mapping, index, 1, false);
 877	}
 878
 879	xas_reset(xas);
 880	xas_lock_irq(xas);
 881	if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 882		void *old;
 883
 884		dax_disassociate_entry(entry, mapping, false);
 885		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
 886				shared);
 887		/*
 888		 * Only swap our new entry into the page cache if the current
 889		 * entry is a zero page or an empty entry.  If a normal PTE or
 890		 * PMD entry is already in the cache, we leave it alone.  This
 891		 * means that if we are trying to insert a PTE and the
 892		 * existing entry is a PMD, we will just leave the PMD in the
 893		 * tree and dirty it if necessary.
 894		 */
 895		old = dax_lock_entry(xas, new_entry);
 896		WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
 897					DAX_LOCKED));
 898		entry = new_entry;
 899	} else {
 900		xas_load(xas);	/* Walk the xa_state */
 901	}
 902
 903	if (dirty)
 904		xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
 905
 906	if (write && shared)
 907		xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
 908
 909	xas_unlock_irq(xas);
 910	return entry;
 911}
 912
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 913static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
 914		struct address_space *mapping, void *entry)
 915{
 916	unsigned long pfn, index, count, end;
 917	long ret = 0;
 918	struct vm_area_struct *vma;
 919
 920	/*
 921	 * A page got tagged dirty in DAX mapping? Something is seriously
 922	 * wrong.
 923	 */
 924	if (WARN_ON(!xa_is_value(entry)))
 925		return -EIO;
 926
 927	if (unlikely(dax_is_locked(entry))) {
 928		void *old_entry = entry;
 929
 930		entry = get_unlocked_entry(xas, 0);
 931
 932		/* Entry got punched out / reallocated? */
 933		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 934			goto put_unlocked;
 935		/*
 936		 * Entry got reallocated elsewhere? No need to writeback.
 937		 * We have to compare pfns as we must not bail out due to
 938		 * difference in lockbit or entry type.
 939		 */
 940		if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
 941			goto put_unlocked;
 942		if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 943					dax_is_zero_entry(entry))) {
 944			ret = -EIO;
 945			goto put_unlocked;
 946		}
 947
 948		/* Another fsync thread may have already done this entry */
 949		if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
 950			goto put_unlocked;
 951	}
 952
 953	/* Lock the entry to serialize with page faults */
 954	dax_lock_entry(xas, entry);
 955
 956	/*
 957	 * We can clear the tag now but we have to be careful so that concurrent
 958	 * dax_writeback_one() calls for the same index cannot finish before we
 959	 * actually flush the caches. This is achieved as the calls will look
 960	 * at the entry only under the i_pages lock and once they do that
 961	 * they will see the entry locked and wait for it to unlock.
 962	 */
 963	xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
 964	xas_unlock_irq(xas);
 965
 966	/*
 967	 * If dax_writeback_mapping_range() was given a wbc->range_start
 968	 * in the middle of a PMD, the 'index' we use needs to be
 969	 * aligned to the start of the PMD.
 970	 * This allows us to flush for PMD_SIZE and not have to worry about
 971	 * partial PMD writebacks.
 972	 */
 973	pfn = dax_to_pfn(entry);
 974	count = 1UL << dax_entry_order(entry);
 975	index = xas->xa_index & ~(count - 1);
 976	end = index + count - 1;
 977
 978	/* Walk all mappings of a given index of a file and writeprotect them */
 979	i_mmap_lock_read(mapping);
 980	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
 981		pfn_mkclean_range(pfn, count, index, vma);
 982		cond_resched();
 983	}
 984	i_mmap_unlock_read(mapping);
 985
 
 986	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
 987	/*
 988	 * After we have flushed the cache, we can clear the dirty tag. There
 989	 * cannot be new dirty data in the pfn after the flush has completed as
 990	 * the pfn mappings are writeprotected and fault waits for mapping
 991	 * entry lock.
 992	 */
 993	xas_reset(xas);
 994	xas_lock_irq(xas);
 995	xas_store(xas, entry);
 996	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
 997	dax_wake_entry(xas, entry, WAKE_NEXT);
 998
 999	trace_dax_writeback_one(mapping->host, index, count);
1000	return ret;
1001
1002 put_unlocked:
1003	put_unlocked_entry(xas, entry, WAKE_NEXT);
1004	return ret;
1005}
1006
1007/*
1008 * Flush the mapping to the persistent domain within the byte range of [start,
1009 * end]. This is required by data integrity operations to ensure file data is
1010 * on persistent storage prior to completion of the operation.
1011 */
1012int dax_writeback_mapping_range(struct address_space *mapping,
1013		struct dax_device *dax_dev, struct writeback_control *wbc)
1014{
1015	XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
1016	struct inode *inode = mapping->host;
1017	pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
1018	void *entry;
1019	int ret = 0;
1020	unsigned int scanned = 0;
1021
1022	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1023		return -EIO;
1024
1025	if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
1026		return 0;
1027
1028	trace_dax_writeback_range(inode, xas.xa_index, end_index);
1029
1030	tag_pages_for_writeback(mapping, xas.xa_index, end_index);
1031
1032	xas_lock_irq(&xas);
1033	xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
1034		ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
1035		if (ret < 0) {
1036			mapping_set_error(mapping, ret);
1037			break;
1038		}
1039		if (++scanned % XA_CHECK_SCHED)
1040			continue;
1041
1042		xas_pause(&xas);
1043		xas_unlock_irq(&xas);
1044		cond_resched();
1045		xas_lock_irq(&xas);
1046	}
1047	xas_unlock_irq(&xas);
1048	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1049	return ret;
1050}
1051EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1052
1053static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
1054		size_t size, void **kaddr, pfn_t *pfnp)
1055{
1056	pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1057	int id, rc = 0;
 
 
 
 
 
 
 
1058	long length;
1059
 
 
 
1060	id = dax_read_lock();
1061	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1062				   DAX_ACCESS, kaddr, pfnp);
1063	if (length < 0) {
1064		rc = length;
1065		goto out;
1066	}
1067	if (!pfnp)
1068		goto out_check_addr;
1069	rc = -EINVAL;
1070	if (PFN_PHYS(length) < size)
1071		goto out;
1072	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1073		goto out;
1074	/* For larger pages we need devmap */
1075	if (length > 1 && !pfn_t_devmap(*pfnp))
1076		goto out;
1077	rc = 0;
1078
1079out_check_addr:
1080	if (!kaddr)
1081		goto out;
1082	if (!*kaddr)
1083		rc = -EFAULT;
1084out:
1085	dax_read_unlock(id);
1086	return rc;
1087}
1088
1089/**
1090 * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page
1091 * by copying the data before and after the range to be written.
1092 * @pos:	address to do copy from.
1093 * @length:	size of copy operation.
1094 * @align_size:	aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
1095 * @srcmap:	iomap srcmap
1096 * @daddr:	destination address to copy to.
1097 *
1098 * This can be called from two places. Either during DAX write fault (page
1099 * aligned), to copy the length size data to daddr. Or, while doing normal DAX
1100 * write operation, dax_iomap_iter() might call this to do the copy of either
1101 * start or end unaligned address. In the latter case the rest of the copy of
1102 * aligned ranges is taken care by dax_iomap_iter() itself.
1103 * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the
1104 * area to make sure no old data remains.
1105 */
1106static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size,
1107		const struct iomap *srcmap, void *daddr)
1108{
1109	loff_t head_off = pos & (align_size - 1);
1110	size_t size = ALIGN(head_off + length, align_size);
1111	loff_t end = pos + length;
1112	loff_t pg_end = round_up(end, align_size);
1113	/* copy_all is usually in page fault case */
1114	bool copy_all = head_off == 0 && end == pg_end;
1115	/* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */
1116	bool zero_edge = srcmap->flags & IOMAP_F_SHARED ||
1117			 srcmap->type == IOMAP_UNWRITTEN;
1118	void *saddr = 0;
1119	int ret = 0;
1120
1121	if (!zero_edge) {
1122		ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
1123		if (ret)
1124			return ret;
1125	}
1126
1127	if (copy_all) {
1128		if (zero_edge)
1129			memset(daddr, 0, size);
1130		else
1131			ret = copy_mc_to_kernel(daddr, saddr, length);
1132		goto out;
1133	}
1134
1135	/* Copy the head part of the range */
1136	if (head_off) {
1137		if (zero_edge)
1138			memset(daddr, 0, head_off);
1139		else {
1140			ret = copy_mc_to_kernel(daddr, saddr, head_off);
1141			if (ret)
1142				return -EIO;
1143		}
1144	}
1145
1146	/* Copy the tail part of the range */
1147	if (end < pg_end) {
1148		loff_t tail_off = head_off + length;
1149		loff_t tail_len = pg_end - end;
1150
1151		if (zero_edge)
1152			memset(daddr + tail_off, 0, tail_len);
1153		else {
1154			ret = copy_mc_to_kernel(daddr + tail_off,
1155						saddr + tail_off, tail_len);
1156			if (ret)
1157				return -EIO;
1158		}
1159	}
1160out:
1161	if (zero_edge)
1162		dax_flush(srcmap->dax_dev, daddr, size);
1163	return ret ? -EIO : 0;
1164}
1165
1166/*
1167 * The user has performed a load from a hole in the file.  Allocating a new
1168 * page in the file would cause excessive storage usage for workloads with
1169 * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1170 * If this page is ever written to we will re-fault and change the mapping to
1171 * point to real DAX storage instead.
1172 */
1173static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1174		const struct iomap_iter *iter, void **entry)
 
1175{
1176	struct inode *inode = iter->inode;
1177	unsigned long vaddr = vmf->address;
1178	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1179	vm_fault_t ret;
1180
1181	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
 
1182
1183	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1184	trace_dax_load_hole(inode, vmf, ret);
1185	return ret;
1186}
1187
1188#ifdef CONFIG_FS_DAX_PMD
1189static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1190		const struct iomap_iter *iter, void **entry)
1191{
1192	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1193	unsigned long pmd_addr = vmf->address & PMD_MASK;
1194	struct vm_area_struct *vma = vmf->vma;
1195	struct inode *inode = mapping->host;
1196	pgtable_t pgtable = NULL;
1197	struct page *zero_page;
1198	spinlock_t *ptl;
1199	pmd_t pmd_entry;
1200	pfn_t pfn;
1201
1202	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1203
1204	if (unlikely(!zero_page))
1205		goto fallback;
1206
1207	pfn = page_to_pfn_t(zero_page);
1208	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
1209				  DAX_PMD | DAX_ZERO_PAGE);
1210
1211	if (arch_needs_pgtable_deposit()) {
1212		pgtable = pte_alloc_one(vma->vm_mm);
1213		if (!pgtable)
1214			return VM_FAULT_OOM;
1215	}
1216
1217	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1218	if (!pmd_none(*(vmf->pmd))) {
1219		spin_unlock(ptl);
1220		goto fallback;
1221	}
1222
1223	if (pgtable) {
1224		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1225		mm_inc_nr_ptes(vma->vm_mm);
1226	}
1227	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1228	pmd_entry = pmd_mkhuge(pmd_entry);
1229	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1230	spin_unlock(ptl);
1231	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1232	return VM_FAULT_NOPAGE;
1233
1234fallback:
1235	if (pgtable)
1236		pte_free(vma->vm_mm, pgtable);
1237	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1238	return VM_FAULT_FALLBACK;
1239}
1240#else
1241static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1242		const struct iomap_iter *iter, void **entry)
1243{
1244	return VM_FAULT_FALLBACK;
1245}
1246#endif /* CONFIG_FS_DAX_PMD */
1247
1248static s64 dax_unshare_iter(struct iomap_iter *iter)
1249{
1250	struct iomap *iomap = &iter->iomap;
1251	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1252	loff_t pos = iter->pos;
1253	loff_t length = iomap_length(iter);
1254	int id = 0;
1255	s64 ret = 0;
1256	void *daddr = NULL, *saddr = NULL;
1257
1258	/* don't bother with blocks that are not shared to start with */
1259	if (!(iomap->flags & IOMAP_F_SHARED))
1260		return length;
1261	/* don't bother with holes or unwritten extents */
1262	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1263		return length;
1264
1265	id = dax_read_lock();
1266	ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
1267	if (ret < 0)
1268		goto out_unlock;
1269
1270	ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
1271	if (ret < 0)
1272		goto out_unlock;
1273
1274	if (copy_mc_to_kernel(daddr, saddr, length) == 0)
1275		ret = length;
 
1276	else
1277		ret = -EIO;
1278
1279out_unlock:
1280	dax_read_unlock(id);
1281	return ret;
1282}
1283
1284int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1285		const struct iomap_ops *ops)
1286{
1287	struct iomap_iter iter = {
1288		.inode		= inode,
1289		.pos		= pos,
1290		.len		= len,
1291		.flags		= IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
1292	};
1293	int ret;
1294
1295	while ((ret = iomap_iter(&iter, ops)) > 0)
1296		iter.processed = dax_unshare_iter(&iter);
1297	return ret;
1298}
1299EXPORT_SYMBOL_GPL(dax_file_unshare);
1300
1301static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
1302{
1303	const struct iomap *iomap = &iter->iomap;
1304	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1305	unsigned offset = offset_in_page(pos);
1306	pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1307	void *kaddr;
1308	long ret;
1309
1310	ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
1311				NULL);
1312	if (ret < 0)
1313		return ret;
1314	memset(kaddr + offset, 0, size);
1315	if (iomap->flags & IOMAP_F_SHARED)
1316		ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap,
1317					    kaddr);
1318	else
1319		dax_flush(iomap->dax_dev, kaddr + offset, size);
1320	return ret;
1321}
1322
1323static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
1324{
1325	const struct iomap *iomap = &iter->iomap;
1326	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1327	loff_t pos = iter->pos;
1328	u64 length = iomap_length(iter);
1329	s64 written = 0;
1330
1331	/* already zeroed?  we're done. */
1332	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1333		return length;
1334
1335	/*
1336	 * invalidate the pages whose sharing state is to be changed
1337	 * because of CoW.
1338	 */
1339	if (iomap->flags & IOMAP_F_SHARED)
1340		invalidate_inode_pages2_range(iter->inode->i_mapping,
1341					      pos >> PAGE_SHIFT,
1342					      (pos + length - 1) >> PAGE_SHIFT);
1343
1344	do {
1345		unsigned offset = offset_in_page(pos);
1346		unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1347		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1348		long rc;
1349		int id;
1350
1351		id = dax_read_lock();
1352		if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
1353			rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
1354		else
1355			rc = dax_memzero(iter, pos, size);
1356		dax_read_unlock(id);
 
 
1357
1358		if (rc < 0)
1359			return rc;
1360		pos += size;
1361		length -= size;
1362		written += size;
1363	} while (length > 0);
1364
1365	if (did_zero)
1366		*did_zero = true;
1367	return written;
1368}
1369
1370int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1371		const struct iomap_ops *ops)
1372{
1373	struct iomap_iter iter = {
1374		.inode		= inode,
1375		.pos		= pos,
1376		.len		= len,
1377		.flags		= IOMAP_DAX | IOMAP_ZERO,
1378	};
1379	int ret;
1380
1381	while ((ret = iomap_iter(&iter, ops)) > 0)
1382		iter.processed = dax_zero_iter(&iter, did_zero);
1383	return ret;
1384}
1385EXPORT_SYMBOL_GPL(dax_zero_range);
1386
1387int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1388		const struct iomap_ops *ops)
1389{
1390	unsigned int blocksize = i_blocksize(inode);
1391	unsigned int off = pos & (blocksize - 1);
1392
1393	/* Block boundary? Nothing to do */
1394	if (!off)
1395		return 0;
1396	return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
1397}
1398EXPORT_SYMBOL_GPL(dax_truncate_page);
1399
1400static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1401		struct iov_iter *iter)
 
1402{
1403	const struct iomap *iomap = &iomi->iomap;
1404	const struct iomap *srcmap = iomap_iter_srcmap(iomi);
1405	loff_t length = iomap_length(iomi);
1406	loff_t pos = iomi->pos;
1407	struct dax_device *dax_dev = iomap->dax_dev;
 
1408	loff_t end = pos + length, done = 0;
1409	bool write = iov_iter_rw(iter) == WRITE;
1410	bool cow = write && iomap->flags & IOMAP_F_SHARED;
1411	ssize_t ret = 0;
1412	size_t xfer;
1413	int id;
1414
1415	if (!write) {
1416		end = min(end, i_size_read(iomi->inode));
1417		if (pos >= end)
1418			return 0;
1419
1420		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1421			return iov_iter_zero(min(length, end - pos), iter);
1422	}
1423
1424	/*
1425	 * In DAX mode, enforce either pure overwrites of written extents, or
1426	 * writes to unwritten extents as part of a copy-on-write operation.
1427	 */
1428	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
1429			!(iomap->flags & IOMAP_F_SHARED)))
1430		return -EIO;
1431
1432	/*
1433	 * Write can allocate block for an area which has a hole page mapped
1434	 * into page tables. We have to tear down these mappings so that data
1435	 * written by write(2) is visible in mmap.
1436	 */
1437	if (iomap->flags & IOMAP_F_NEW || cow) {
1438		invalidate_inode_pages2_range(iomi->inode->i_mapping,
1439					      pos >> PAGE_SHIFT,
1440					      (end - 1) >> PAGE_SHIFT);
1441	}
1442
1443	id = dax_read_lock();
1444	while (pos < end) {
1445		unsigned offset = pos & (PAGE_SIZE - 1);
1446		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1447		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1448		ssize_t map_len;
1449		bool recovery = false;
1450		void *kaddr;
1451
1452		if (fatal_signal_pending(current)) {
1453			ret = -EINTR;
1454			break;
1455		}
1456
 
 
 
 
1457		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1458				DAX_ACCESS, &kaddr, NULL);
1459		if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
1460			map_len = dax_direct_access(dax_dev, pgoff,
1461					PHYS_PFN(size), DAX_RECOVERY_WRITE,
1462					&kaddr, NULL);
1463			if (map_len > 0)
1464				recovery = true;
1465		}
1466		if (map_len < 0) {
1467			ret = map_len;
1468			break;
1469		}
1470
1471		if (cow) {
1472			ret = dax_iomap_copy_around(pos, length, PAGE_SIZE,
1473						    srcmap, kaddr);
1474			if (ret)
1475				break;
1476		}
1477
1478		map_len = PFN_PHYS(map_len);
1479		kaddr += offset;
1480		map_len -= offset;
1481		if (map_len > end - pos)
1482			map_len = end - pos;
1483
1484		if (recovery)
1485			xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
1486					map_len, iter);
1487		else if (write)
 
 
1488			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1489					map_len, iter);
1490		else
1491			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1492					map_len, iter);
1493
1494		pos += xfer;
1495		length -= xfer;
1496		done += xfer;
1497
1498		if (xfer == 0)
1499			ret = -EFAULT;
1500		if (xfer < map_len)
1501			break;
1502	}
1503	dax_read_unlock(id);
1504
1505	return done ? done : ret;
1506}
1507
1508/**
1509 * dax_iomap_rw - Perform I/O to a DAX file
1510 * @iocb:	The control block for this I/O
1511 * @iter:	The addresses to do I/O from or to
1512 * @ops:	iomap ops passed from the file system
1513 *
1514 * This function performs read and write operations to directly mapped
1515 * persistent memory.  The callers needs to take care of read/write exclusion
1516 * and evicting any page cache pages in the region under I/O.
1517 */
1518ssize_t
1519dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1520		const struct iomap_ops *ops)
1521{
1522	struct iomap_iter iomi = {
1523		.inode		= iocb->ki_filp->f_mapping->host,
1524		.pos		= iocb->ki_pos,
1525		.len		= iov_iter_count(iter),
1526		.flags		= IOMAP_DAX,
1527	};
1528	loff_t done = 0;
1529	int ret;
1530
1531	if (!iomi.len)
1532		return 0;
1533
1534	if (iov_iter_rw(iter) == WRITE) {
1535		lockdep_assert_held_write(&iomi.inode->i_rwsem);
1536		iomi.flags |= IOMAP_WRITE;
1537	} else {
1538		lockdep_assert_held(&iomi.inode->i_rwsem);
1539	}
1540
1541	if (iocb->ki_flags & IOCB_NOWAIT)
1542		iomi.flags |= IOMAP_NOWAIT;
1543
1544	while ((ret = iomap_iter(&iomi, ops)) > 0)
1545		iomi.processed = dax_iomap_iter(&iomi, iter);
 
 
 
 
 
 
1546
1547	done = iomi.pos - iocb->ki_pos;
1548	iocb->ki_pos = iomi.pos;
1549	return done ? done : ret;
1550}
1551EXPORT_SYMBOL_GPL(dax_iomap_rw);
1552
1553static vm_fault_t dax_fault_return(int error)
1554{
1555	if (error == 0)
1556		return VM_FAULT_NOPAGE;
1557	return vmf_error(error);
1558}
1559
1560/*
1561 * When handling a synchronous page fault and the inode need a fsync, we can
1562 * insert the PTE/PMD into page tables only after that fsync happened. Skip
1563 * insertion for now and return the pfn so that caller can insert it after the
1564 * fsync is done.
1565 */
1566static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
 
1567{
1568	if (WARN_ON_ONCE(!pfnp))
1569		return VM_FAULT_SIGBUS;
1570	*pfnp = pfn;
1571	return VM_FAULT_NEEDDSYNC;
1572}
1573
1574static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1575		const struct iomap_iter *iter)
1576{
1577	vm_fault_t ret;
1578	int error = 0;
1579
1580	switch (iter->iomap.type) {
1581	case IOMAP_HOLE:
1582	case IOMAP_UNWRITTEN:
1583		clear_user_highpage(vmf->cow_page, vmf->address);
1584		break;
1585	case IOMAP_MAPPED:
1586		error = copy_cow_page_dax(vmf, iter);
1587		break;
1588	default:
1589		WARN_ON_ONCE(1);
1590		error = -EIO;
1591		break;
1592	}
1593
1594	if (error)
1595		return dax_fault_return(error);
1596
1597	__SetPageUptodate(vmf->cow_page);
1598	ret = finish_fault(vmf);
1599	if (!ret)
1600		return VM_FAULT_DONE_COW;
1601	return ret;
1602}
1603
1604/**
1605 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
1606 * @vmf:	vm fault instance
1607 * @iter:	iomap iter
1608 * @pfnp:	pfn to be returned
1609 * @xas:	the dax mapping tree of a file
1610 * @entry:	an unlocked dax entry to be inserted
1611 * @pmd:	distinguish whether it is a pmd fault
1612 */
1613static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1614		const struct iomap_iter *iter, pfn_t *pfnp,
1615		struct xa_state *xas, void **entry, bool pmd)
1616{
1617	const struct iomap *iomap = &iter->iomap;
1618	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1619	size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1620	loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1621	bool write = iter->flags & IOMAP_WRITE;
1622	unsigned long entry_flags = pmd ? DAX_PMD : 0;
1623	int err = 0;
1624	pfn_t pfn;
1625	void *kaddr;
1626
1627	if (!pmd && vmf->cow_page)
1628		return dax_fault_cow_page(vmf, iter);
1629
1630	/* if we are reading UNWRITTEN and HOLE, return a hole. */
1631	if (!write &&
1632	    (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1633		if (!pmd)
1634			return dax_load_hole(xas, vmf, iter, entry);
1635		return dax_pmd_load_hole(xas, vmf, iter, entry);
1636	}
1637
1638	if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
1639		WARN_ON_ONCE(1);
1640		return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1641	}
1642
1643	err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
1644	if (err)
1645		return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1646
1647	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
1648
1649	if (write && iomap->flags & IOMAP_F_SHARED) {
1650		err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr);
1651		if (err)
1652			return dax_fault_return(err);
1653	}
1654
1655	if (dax_fault_is_synchronous(iter, vmf->vma))
1656		return dax_fault_synchronous_pfnp(pfnp, pfn);
1657
1658	/* insert PMD pfn */
1659	if (pmd)
1660		return vmf_insert_pfn_pmd(vmf, pfn, write);
1661
1662	/* insert PTE pfn */
1663	if (write)
1664		return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1665	return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1666}
1667
1668static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1669			       int *iomap_errp, const struct iomap_ops *ops)
1670{
1671	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 
1672	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1673	struct iomap_iter iter = {
1674		.inode		= mapping->host,
1675		.pos		= (loff_t)vmf->pgoff << PAGE_SHIFT,
1676		.len		= PAGE_SIZE,
1677		.flags		= IOMAP_DAX | IOMAP_FAULT,
1678	};
 
 
 
1679	vm_fault_t ret = 0;
1680	void *entry;
1681	int error;
1682
1683	trace_dax_pte_fault(iter.inode, vmf, ret);
1684	/*
1685	 * Check whether offset isn't beyond end of file now. Caller is supposed
1686	 * to hold locks serializing us with truncate / punch hole so this is
1687	 * a reliable test.
1688	 */
1689	if (iter.pos >= i_size_read(iter.inode)) {
1690		ret = VM_FAULT_SIGBUS;
1691		goto out;
1692	}
1693
1694	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1695		iter.flags |= IOMAP_WRITE;
1696
1697	entry = grab_mapping_entry(&xas, mapping, 0);
1698	if (xa_is_internal(entry)) {
1699		ret = xa_to_internal(entry);
1700		goto out;
1701	}
1702
1703	/*
1704	 * It is possible, particularly with mixed reads & writes to private
1705	 * mappings, that we have raced with a PMD fault that overlaps with
1706	 * the PTE we need to set up.  If so just return and the fault will be
1707	 * retried.
1708	 */
1709	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1710		ret = VM_FAULT_NOPAGE;
1711		goto unlock_entry;
1712	}
1713
1714	while ((error = iomap_iter(&iter, ops)) > 0) {
1715		if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
1716			iter.processed = -EIO;	/* fs corruption? */
1717			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1718		}
1719
1720		ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1721		if (ret != VM_FAULT_SIGBUS &&
1722		    (iter.iomap.flags & IOMAP_F_NEW)) {
 
 
 
 
 
 
 
 
 
 
 
 
1723			count_vm_event(PGMAJFAULT);
1724			count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1725			ret |= VM_FAULT_MAJOR;
1726		}
 
 
 
 
 
 
1727
1728		if (!(ret & VM_FAULT_ERROR))
1729			iter.processed = PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1730	}
1731
1732	if (iomap_errp)
1733		*iomap_errp = error;
1734	if (!ret && error)
1735		ret = dax_fault_return(error);
 
1736
1737unlock_entry:
 
 
 
 
 
 
 
 
 
 
1738	dax_unlock_entry(&xas, entry);
1739out:
1740	trace_dax_pte_fault_done(iter.inode, vmf, ret);
1741	return ret;
1742}
1743
1744#ifdef CONFIG_FS_DAX_PMD
1745static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1746		pgoff_t max_pgoff)
1747{
 
1748	unsigned long pmd_addr = vmf->address & PMD_MASK;
1749	bool write = vmf->flags & FAULT_FLAG_WRITE;
 
 
 
 
 
 
1750
1751	/*
1752	 * Make sure that the faulting address's PMD offset (color) matches
1753	 * the PMD offset from the start of the file.  This is necessary so
1754	 * that a PMD range in the page table overlaps exactly with a PMD
1755	 * range in the page cache.
1756	 */
1757	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1758	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1759		return true;
1760
1761	/* Fall back to PTEs if we're going to COW */
1762	if (write && !(vmf->vma->vm_flags & VM_SHARED))
1763		return true;
1764
1765	/* If the PMD would extend outside the VMA */
1766	if (pmd_addr < vmf->vma->vm_start)
1767		return true;
1768	if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1769		return true;
1770
1771	/* If the PMD would extend beyond the file size */
1772	if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1773		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1774
1775	return false;
 
 
 
 
1776}
1777
1778static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1779			       const struct iomap_ops *ops)
1780{
1781	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 
1782	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1783	struct iomap_iter iter = {
1784		.inode		= mapping->host,
1785		.len		= PMD_SIZE,
1786		.flags		= IOMAP_DAX | IOMAP_FAULT,
1787	};
1788	vm_fault_t ret = VM_FAULT_FALLBACK;
 
 
1789	pgoff_t max_pgoff;
1790	void *entry;
 
1791	int error;
1792
1793	if (vmf->flags & FAULT_FLAG_WRITE)
1794		iter.flags |= IOMAP_WRITE;
1795
1796	/*
1797	 * Check whether offset isn't beyond end of file now. Caller is
1798	 * supposed to hold locks serializing us with truncate / punch hole so
1799	 * this is a reliable test.
1800	 */
1801	max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
1802
1803	trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1804
1805	if (xas.xa_index >= max_pgoff) {
1806		ret = VM_FAULT_SIGBUS;
1807		goto out;
1808	}
1809
1810	if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
 
1811		goto fallback;
1812
1813	/*
1814	 * grab_mapping_entry() will make sure we get an empty PMD entry,
1815	 * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1816	 * entry is already in the array, for instance), it will return
1817	 * VM_FAULT_FALLBACK.
1818	 */
1819	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1820	if (xa_is_internal(entry)) {
1821		ret = xa_to_internal(entry);
1822		goto fallback;
1823	}
1824
1825	/*
1826	 * It is possible, particularly with mixed reads & writes to private
1827	 * mappings, that we have raced with a PTE fault that overlaps with
1828	 * the PMD we need to set up.  If so just return and the fault will be
1829	 * retried.
1830	 */
1831	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1832			!pmd_devmap(*vmf->pmd)) {
1833		ret = 0;
1834		goto unlock_entry;
1835	}
1836
1837	iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1838	while ((error = iomap_iter(&iter, ops)) > 0) {
1839		if (iomap_length(&iter) < PMD_SIZE)
1840			continue; /* actually breaks out of the loop */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1841
1842		ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1843		if (ret != VM_FAULT_FALLBACK)
1844			iter.processed = PMD_SIZE;
 
 
 
 
 
 
 
 
 
1845	}
1846
1847unlock_entry:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1848	dax_unlock_entry(&xas, entry);
1849fallback:
1850	if (ret == VM_FAULT_FALLBACK) {
1851		split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
1852		count_vm_event(THP_FAULT_FALLBACK);
1853	}
1854out:
1855	trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
1856	return ret;
1857}
1858#else
1859static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1860			       const struct iomap_ops *ops)
1861{
1862	return VM_FAULT_FALLBACK;
1863}
1864#endif /* CONFIG_FS_DAX_PMD */
1865
1866/**
1867 * dax_iomap_fault - handle a page fault on a DAX file
1868 * @vmf: The description of the fault
1869 * @pe_size: Size of the page to fault in
1870 * @pfnp: PFN to insert for synchronous faults if fsync is required
1871 * @iomap_errp: Storage for detailed error code in case of error
1872 * @ops: Iomap ops passed from the file system
1873 *
1874 * When a page fault occurs, filesystems may call this helper in
1875 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1876 * has done all the necessary locking for page fault to proceed
1877 * successfully.
1878 */
1879vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1880		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1881{
1882	switch (pe_size) {
1883	case PE_SIZE_PTE:
1884		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1885	case PE_SIZE_PMD:
1886		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1887	default:
1888		return VM_FAULT_FALLBACK;
1889	}
1890}
1891EXPORT_SYMBOL_GPL(dax_iomap_fault);
1892
1893/*
1894 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1895 * @vmf: The description of the fault
1896 * @pfn: PFN to insert
1897 * @order: Order of entry to insert.
1898 *
1899 * This function inserts a writeable PTE or PMD entry into the page tables
1900 * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1901 */
1902static vm_fault_t
1903dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1904{
1905	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1906	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1907	void *entry;
1908	vm_fault_t ret;
1909
1910	xas_lock_irq(&xas);
1911	entry = get_unlocked_entry(&xas, order);
1912	/* Did we race with someone splitting entry or so? */
1913	if (!entry || dax_is_conflict(entry) ||
1914	    (order == 0 && !dax_is_pte_entry(entry))) {
1915		put_unlocked_entry(&xas, entry, WAKE_NEXT);
1916		xas_unlock_irq(&xas);
1917		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1918						      VM_FAULT_NOPAGE);
1919		return VM_FAULT_NOPAGE;
1920	}
1921	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1922	dax_lock_entry(&xas, entry);
1923	xas_unlock_irq(&xas);
1924	if (order == 0)
1925		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1926#ifdef CONFIG_FS_DAX_PMD
1927	else if (order == PMD_ORDER)
1928		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1929#endif
1930	else
1931		ret = VM_FAULT_FALLBACK;
1932	dax_unlock_entry(&xas, entry);
1933	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1934	return ret;
1935}
1936
1937/**
1938 * dax_finish_sync_fault - finish synchronous page fault
1939 * @vmf: The description of the fault
1940 * @pe_size: Size of entry to be inserted
1941 * @pfn: PFN to insert
1942 *
1943 * This function ensures that the file range touched by the page fault is
1944 * stored persistently on the media and handles inserting of appropriate page
1945 * table entry.
1946 */
1947vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1948		enum page_entry_size pe_size, pfn_t pfn)
1949{
1950	int err;
1951	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1952	unsigned int order = pe_order(pe_size);
1953	size_t len = PAGE_SIZE << order;
1954
1955	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1956	if (err)
1957		return VM_FAULT_SIGBUS;
1958	return dax_insert_pfn_mkwrite(vmf, pfn, order);
1959}
1960EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1961
1962static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
1963		struct iomap_iter *it_dest, u64 len, bool *same)
1964{
1965	const struct iomap *smap = &it_src->iomap;
1966	const struct iomap *dmap = &it_dest->iomap;
1967	loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
1968	void *saddr, *daddr;
1969	int id, ret;
1970
1971	len = min(len, min(smap->length, dmap->length));
1972
1973	if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
1974		*same = true;
1975		return len;
1976	}
1977
1978	if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
1979		*same = false;
1980		return 0;
1981	}
1982
1983	id = dax_read_lock();
1984	ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
1985				      &saddr, NULL);
1986	if (ret < 0)
1987		goto out_unlock;
1988
1989	ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
1990				      &daddr, NULL);
1991	if (ret < 0)
1992		goto out_unlock;
1993
1994	*same = !memcmp(saddr, daddr, len);
1995	if (!*same)
1996		len = 0;
1997	dax_read_unlock(id);
1998	return len;
1999
2000out_unlock:
2001	dax_read_unlock(id);
2002	return -EIO;
2003}
2004
2005int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
2006		struct inode *dst, loff_t dstoff, loff_t len, bool *same,
2007		const struct iomap_ops *ops)
2008{
2009	struct iomap_iter src_iter = {
2010		.inode		= src,
2011		.pos		= srcoff,
2012		.len		= len,
2013		.flags		= IOMAP_DAX,
2014	};
2015	struct iomap_iter dst_iter = {
2016		.inode		= dst,
2017		.pos		= dstoff,
2018		.len		= len,
2019		.flags		= IOMAP_DAX,
2020	};
2021	int ret, compared = 0;
2022
2023	while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
2024	       (ret = iomap_iter(&dst_iter, ops)) > 0) {
2025		compared = dax_range_compare_iter(&src_iter, &dst_iter, len,
2026						  same);
2027		if (compared < 0)
2028			return ret;
2029		src_iter.processed = dst_iter.processed = compared;
2030	}
2031	return ret;
2032}
2033
2034int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
2035			      struct file *file_out, loff_t pos_out,
2036			      loff_t *len, unsigned int remap_flags,
2037			      const struct iomap_ops *ops)
2038{
2039	return __generic_remap_file_range_prep(file_in, pos_in, file_out,
2040					       pos_out, len, remap_flags, ops);
2041}
2042EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/dax.c - Direct Access filesystem code
   4 * Copyright (c) 2013-2014 Intel Corporation
   5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
   7 */
   8
   9#include <linux/atomic.h>
  10#include <linux/blkdev.h>
  11#include <linux/buffer_head.h>
  12#include <linux/dax.h>
  13#include <linux/fs.h>
  14#include <linux/genhd.h>
  15#include <linux/highmem.h>
  16#include <linux/memcontrol.h>
  17#include <linux/mm.h>
  18#include <linux/mutex.h>
  19#include <linux/pagevec.h>
  20#include <linux/sched.h>
  21#include <linux/sched/signal.h>
  22#include <linux/uio.h>
  23#include <linux/vmstat.h>
  24#include <linux/pfn_t.h>
  25#include <linux/sizes.h>
  26#include <linux/mmu_notifier.h>
  27#include <linux/iomap.h>
 
  28#include <asm/pgalloc.h>
  29
  30#define CREATE_TRACE_POINTS
  31#include <trace/events/fs_dax.h>
  32
  33static inline unsigned int pe_order(enum page_entry_size pe_size)
  34{
  35	if (pe_size == PE_SIZE_PTE)
  36		return PAGE_SHIFT - PAGE_SHIFT;
  37	if (pe_size == PE_SIZE_PMD)
  38		return PMD_SHIFT - PAGE_SHIFT;
  39	if (pe_size == PE_SIZE_PUD)
  40		return PUD_SHIFT - PAGE_SHIFT;
  41	return ~0;
  42}
  43
  44/* We choose 4096 entries - same as per-zone page wait tables */
  45#define DAX_WAIT_TABLE_BITS 12
  46#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
  47
  48/* The 'colour' (ie low bits) within a PMD of a page offset.  */
  49#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
  50#define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
  51
  52/* The order of a PMD entry */
  53#define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
  54
  55static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
  56
  57static int __init init_dax_wait_table(void)
  58{
  59	int i;
  60
  61	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
  62		init_waitqueue_head(wait_table + i);
  63	return 0;
  64}
  65fs_initcall(init_dax_wait_table);
  66
  67/*
  68 * DAX pagecache entries use XArray value entries so they can't be mistaken
  69 * for pages.  We use one bit for locking, one bit for the entry size (PMD)
  70 * and two more to tell us if the entry is a zero page or an empty entry that
  71 * is just used for locking.  In total four special bits.
  72 *
  73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
  74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
  75 * block allocation.
  76 */
  77#define DAX_SHIFT	(4)
  78#define DAX_LOCKED	(1UL << 0)
  79#define DAX_PMD		(1UL << 1)
  80#define DAX_ZERO_PAGE	(1UL << 2)
  81#define DAX_EMPTY	(1UL << 3)
  82
  83static unsigned long dax_to_pfn(void *entry)
  84{
  85	return xa_to_value(entry) >> DAX_SHIFT;
  86}
  87
  88static void *dax_make_entry(pfn_t pfn, unsigned long flags)
  89{
  90	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
  91}
  92
  93static bool dax_is_locked(void *entry)
  94{
  95	return xa_to_value(entry) & DAX_LOCKED;
  96}
  97
  98static unsigned int dax_entry_order(void *entry)
  99{
 100	if (xa_to_value(entry) & DAX_PMD)
 101		return PMD_ORDER;
 102	return 0;
 103}
 104
 105static unsigned long dax_is_pmd_entry(void *entry)
 106{
 107	return xa_to_value(entry) & DAX_PMD;
 108}
 109
 110static bool dax_is_pte_entry(void *entry)
 111{
 112	return !(xa_to_value(entry) & DAX_PMD);
 113}
 114
 115static int dax_is_zero_entry(void *entry)
 116{
 117	return xa_to_value(entry) & DAX_ZERO_PAGE;
 118}
 119
 120static int dax_is_empty_entry(void *entry)
 121{
 122	return xa_to_value(entry) & DAX_EMPTY;
 123}
 124
 125/*
 126 * true if the entry that was found is of a smaller order than the entry
 127 * we were looking for
 128 */
 129static bool dax_is_conflict(void *entry)
 130{
 131	return entry == XA_RETRY_ENTRY;
 132}
 133
 134/*
 135 * DAX page cache entry locking
 136 */
 137struct exceptional_entry_key {
 138	struct xarray *xa;
 139	pgoff_t entry_start;
 140};
 141
 142struct wait_exceptional_entry_queue {
 143	wait_queue_entry_t wait;
 144	struct exceptional_entry_key key;
 145};
 146
 
 
 
 
 
 
 
 
 
 
 147static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
 148		void *entry, struct exceptional_entry_key *key)
 149{
 150	unsigned long hash;
 151	unsigned long index = xas->xa_index;
 152
 153	/*
 154	 * If 'entry' is a PMD, align the 'index' that we use for the wait
 155	 * queue to the start of that PMD.  This ensures that all offsets in
 156	 * the range covered by the PMD map to the same bit lock.
 157	 */
 158	if (dax_is_pmd_entry(entry))
 159		index &= ~PG_PMD_COLOUR;
 160	key->xa = xas->xa;
 161	key->entry_start = index;
 162
 163	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
 164	return wait_table + hash;
 165}
 166
 167static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
 168		unsigned int mode, int sync, void *keyp)
 169{
 170	struct exceptional_entry_key *key = keyp;
 171	struct wait_exceptional_entry_queue *ewait =
 172		container_of(wait, struct wait_exceptional_entry_queue, wait);
 173
 174	if (key->xa != ewait->key.xa ||
 175	    key->entry_start != ewait->key.entry_start)
 176		return 0;
 177	return autoremove_wake_function(wait, mode, sync, NULL);
 178}
 179
 180/*
 181 * @entry may no longer be the entry at the index in the mapping.
 182 * The important information it's conveying is whether the entry at
 183 * this index used to be a PMD entry.
 184 */
 185static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
 
 186{
 187	struct exceptional_entry_key key;
 188	wait_queue_head_t *wq;
 189
 190	wq = dax_entry_waitqueue(xas, entry, &key);
 191
 192	/*
 193	 * Checking for locked entry and prepare_to_wait_exclusive() happens
 194	 * under the i_pages lock, ditto for entry handling in our callers.
 195	 * So at this point all tasks that could have seen our entry locked
 196	 * must be in the waitqueue and the following check will see them.
 197	 */
 198	if (waitqueue_active(wq))
 199		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 200}
 201
 202/*
 203 * Look up entry in page cache, wait for it to become unlocked if it
 204 * is a DAX entry and return it.  The caller must subsequently call
 205 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
 206 * if it did.  The entry returned may have a larger order than @order.
 207 * If @order is larger than the order of the entry found in i_pages, this
 208 * function returns a dax_is_conflict entry.
 209 *
 210 * Must be called with the i_pages lock held.
 211 */
 212static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
 213{
 214	void *entry;
 215	struct wait_exceptional_entry_queue ewait;
 216	wait_queue_head_t *wq;
 217
 218	init_wait(&ewait.wait);
 219	ewait.wait.func = wake_exceptional_entry_func;
 220
 221	for (;;) {
 222		entry = xas_find_conflict(xas);
 223		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 224			return entry;
 225		if (dax_entry_order(entry) < order)
 226			return XA_RETRY_ENTRY;
 227		if (!dax_is_locked(entry))
 228			return entry;
 229
 230		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 231		prepare_to_wait_exclusive(wq, &ewait.wait,
 232					  TASK_UNINTERRUPTIBLE);
 233		xas_unlock_irq(xas);
 234		xas_reset(xas);
 235		schedule();
 236		finish_wait(wq, &ewait.wait);
 237		xas_lock_irq(xas);
 238	}
 239}
 240
 241/*
 242 * The only thing keeping the address space around is the i_pages lock
 243 * (it's cycled in clear_inode() after removing the entries from i_pages)
 244 * After we call xas_unlock_irq(), we cannot touch xas->xa.
 245 */
 246static void wait_entry_unlocked(struct xa_state *xas, void *entry)
 247{
 248	struct wait_exceptional_entry_queue ewait;
 249	wait_queue_head_t *wq;
 250
 251	init_wait(&ewait.wait);
 252	ewait.wait.func = wake_exceptional_entry_func;
 253
 254	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 255	/*
 256	 * Unlike get_unlocked_entry() there is no guarantee that this
 257	 * path ever successfully retrieves an unlocked entry before an
 258	 * inode dies. Perform a non-exclusive wait in case this path
 259	 * never successfully performs its own wake up.
 260	 */
 261	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
 262	xas_unlock_irq(xas);
 263	schedule();
 264	finish_wait(wq, &ewait.wait);
 265}
 266
 267static void put_unlocked_entry(struct xa_state *xas, void *entry)
 
 268{
 269	/* If we were the only waiter woken, wake the next one */
 270	if (entry && !dax_is_conflict(entry))
 271		dax_wake_entry(xas, entry, false);
 272}
 273
 274/*
 275 * We used the xa_state to get the entry, but then we locked the entry and
 276 * dropped the xa_lock, so we know the xa_state is stale and must be reset
 277 * before use.
 278 */
 279static void dax_unlock_entry(struct xa_state *xas, void *entry)
 280{
 281	void *old;
 282
 283	BUG_ON(dax_is_locked(entry));
 284	xas_reset(xas);
 285	xas_lock_irq(xas);
 286	old = xas_store(xas, entry);
 287	xas_unlock_irq(xas);
 288	BUG_ON(!dax_is_locked(old));
 289	dax_wake_entry(xas, entry, false);
 290}
 291
 292/*
 293 * Return: The entry stored at this location before it was locked.
 294 */
 295static void *dax_lock_entry(struct xa_state *xas, void *entry)
 296{
 297	unsigned long v = xa_to_value(entry);
 298	return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
 299}
 300
 301static unsigned long dax_entry_size(void *entry)
 302{
 303	if (dax_is_zero_entry(entry))
 304		return 0;
 305	else if (dax_is_empty_entry(entry))
 306		return 0;
 307	else if (dax_is_pmd_entry(entry))
 308		return PMD_SIZE;
 309	else
 310		return PAGE_SIZE;
 311}
 312
 313static unsigned long dax_end_pfn(void *entry)
 314{
 315	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
 316}
 317
 318/*
 319 * Iterate through all mapped pfns represented by an entry, i.e. skip
 320 * 'empty' and 'zero' entries.
 321 */
 322#define for_each_mapped_pfn(entry, pfn) \
 323	for (pfn = dax_to_pfn(entry); \
 324			pfn < dax_end_pfn(entry); pfn++)
 325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 326/*
 327 * TODO: for reflink+dax we need a way to associate a single page with
 328 * multiple address_space instances at different linear_page_index()
 329 * offsets.
 330 */
 331static void dax_associate_entry(void *entry, struct address_space *mapping,
 332		struct vm_area_struct *vma, unsigned long address)
 333{
 334	unsigned long size = dax_entry_size(entry), pfn, index;
 335	int i = 0;
 336
 337	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 338		return;
 339
 340	index = linear_page_index(vma, address & ~(size - 1));
 341	for_each_mapped_pfn(entry, pfn) {
 342		struct page *page = pfn_to_page(pfn);
 343
 344		WARN_ON_ONCE(page->mapping);
 345		page->mapping = mapping;
 346		page->index = index + i++;
 
 
 
 
 347	}
 348}
 349
 350static void dax_disassociate_entry(void *entry, struct address_space *mapping,
 351		bool trunc)
 352{
 353	unsigned long pfn;
 354
 355	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 356		return;
 357
 358	for_each_mapped_pfn(entry, pfn) {
 359		struct page *page = pfn_to_page(pfn);
 360
 361		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
 362		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
 
 
 
 
 
 363		page->mapping = NULL;
 364		page->index = 0;
 365	}
 366}
 367
 368static struct page *dax_busy_page(void *entry)
 369{
 370	unsigned long pfn;
 371
 372	for_each_mapped_pfn(entry, pfn) {
 373		struct page *page = pfn_to_page(pfn);
 374
 375		if (page_ref_count(page) > 1)
 376			return page;
 377	}
 378	return NULL;
 379}
 380
 381/*
 382 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
 383 * @page: The page whose entry we want to lock
 384 *
 385 * Context: Process context.
 386 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
 387 * not be locked.
 388 */
 389dax_entry_t dax_lock_page(struct page *page)
 390{
 391	XA_STATE(xas, NULL, 0);
 392	void *entry;
 393
 394	/* Ensure page->mapping isn't freed while we look at it */
 395	rcu_read_lock();
 396	for (;;) {
 397		struct address_space *mapping = READ_ONCE(page->mapping);
 398
 399		entry = NULL;
 400		if (!mapping || !dax_mapping(mapping))
 401			break;
 402
 403		/*
 404		 * In the device-dax case there's no need to lock, a
 405		 * struct dev_pagemap pin is sufficient to keep the
 406		 * inode alive, and we assume we have dev_pagemap pin
 407		 * otherwise we would not have a valid pfn_to_page()
 408		 * translation.
 409		 */
 410		entry = (void *)~0UL;
 411		if (S_ISCHR(mapping->host->i_mode))
 412			break;
 413
 414		xas.xa = &mapping->i_pages;
 415		xas_lock_irq(&xas);
 416		if (mapping != page->mapping) {
 417			xas_unlock_irq(&xas);
 418			continue;
 419		}
 420		xas_set(&xas, page->index);
 421		entry = xas_load(&xas);
 422		if (dax_is_locked(entry)) {
 423			rcu_read_unlock();
 424			wait_entry_unlocked(&xas, entry);
 425			rcu_read_lock();
 426			continue;
 427		}
 428		dax_lock_entry(&xas, entry);
 429		xas_unlock_irq(&xas);
 430		break;
 431	}
 432	rcu_read_unlock();
 433	return (dax_entry_t)entry;
 434}
 435
 436void dax_unlock_page(struct page *page, dax_entry_t cookie)
 437{
 438	struct address_space *mapping = page->mapping;
 439	XA_STATE(xas, &mapping->i_pages, page->index);
 440
 441	if (S_ISCHR(mapping->host->i_mode))
 442		return;
 443
 444	dax_unlock_entry(&xas, (void *)cookie);
 445}
 446
 447/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 448 * Find page cache entry at given index. If it is a DAX entry, return it
 449 * with the entry locked. If the page cache doesn't contain an entry at
 450 * that index, add a locked empty entry.
 451 *
 452 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
 453 * either return that locked entry or will return VM_FAULT_FALLBACK.
 454 * This will happen if there are any PTE entries within the PMD range
 455 * that we are requesting.
 456 *
 457 * We always favor PTE entries over PMD entries. There isn't a flow where we
 458 * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
 459 * insertion will fail if it finds any PTE entries already in the tree, and a
 460 * PTE insertion will cause an existing PMD entry to be unmapped and
 461 * downgraded to PTE entries.  This happens for both PMD zero pages as
 462 * well as PMD empty entries.
 463 *
 464 * The exception to this downgrade path is for PMD entries that have
 465 * real storage backing them.  We will leave these real PMD entries in
 466 * the tree, and PTE writes will simply dirty the entire PMD entry.
 467 *
 468 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 469 * persistent memory the benefit is doubtful. We can add that later if we can
 470 * show it helps.
 471 *
 472 * On error, this function does not return an ERR_PTR.  Instead it returns
 473 * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
 474 * overlap with xarray value entries.
 475 */
 476static void *grab_mapping_entry(struct xa_state *xas,
 477		struct address_space *mapping, unsigned int order)
 478{
 479	unsigned long index = xas->xa_index;
 480	bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
 481	void *entry;
 482
 483retry:
 
 484	xas_lock_irq(xas);
 485	entry = get_unlocked_entry(xas, order);
 486
 487	if (entry) {
 488		if (dax_is_conflict(entry))
 489			goto fallback;
 490		if (!xa_is_value(entry)) {
 491			xas_set_err(xas, -EIO);
 492			goto out_unlock;
 493		}
 494
 495		if (order == 0) {
 496			if (dax_is_pmd_entry(entry) &&
 497			    (dax_is_zero_entry(entry) ||
 498			     dax_is_empty_entry(entry))) {
 499				pmd_downgrade = true;
 500			}
 501		}
 502	}
 503
 504	if (pmd_downgrade) {
 505		/*
 506		 * Make sure 'entry' remains valid while we drop
 507		 * the i_pages lock.
 508		 */
 509		dax_lock_entry(xas, entry);
 510
 511		/*
 512		 * Besides huge zero pages the only other thing that gets
 513		 * downgraded are empty entries which don't need to be
 514		 * unmapped.
 515		 */
 516		if (dax_is_zero_entry(entry)) {
 517			xas_unlock_irq(xas);
 518			unmap_mapping_pages(mapping,
 519					xas->xa_index & ~PG_PMD_COLOUR,
 520					PG_PMD_NR, false);
 521			xas_reset(xas);
 522			xas_lock_irq(xas);
 523		}
 524
 525		dax_disassociate_entry(entry, mapping, false);
 526		xas_store(xas, NULL);	/* undo the PMD join */
 527		dax_wake_entry(xas, entry, true);
 528		mapping->nrexceptional--;
 529		entry = NULL;
 530		xas_set(xas, index);
 531	}
 532
 533	if (entry) {
 534		dax_lock_entry(xas, entry);
 535	} else {
 536		unsigned long flags = DAX_EMPTY;
 537
 538		if (order > 0)
 539			flags |= DAX_PMD;
 540		entry = dax_make_entry(pfn_to_pfn_t(0), flags);
 541		dax_lock_entry(xas, entry);
 542		if (xas_error(xas))
 543			goto out_unlock;
 544		mapping->nrexceptional++;
 545	}
 546
 547out_unlock:
 548	xas_unlock_irq(xas);
 549	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
 550		goto retry;
 551	if (xas->xa_node == XA_ERROR(-ENOMEM))
 552		return xa_mk_internal(VM_FAULT_OOM);
 553	if (xas_error(xas))
 554		return xa_mk_internal(VM_FAULT_SIGBUS);
 555	return entry;
 556fallback:
 557	xas_unlock_irq(xas);
 558	return xa_mk_internal(VM_FAULT_FALLBACK);
 559}
 560
 561/**
 562 * dax_layout_busy_page - find first pinned page in @mapping
 563 * @mapping: address space to scan for a page with ref count > 1
 
 
 
 564 *
 565 * DAX requires ZONE_DEVICE mapped pages. These pages are never
 566 * 'onlined' to the page allocator so they are considered idle when
 567 * page->count == 1. A filesystem uses this interface to determine if
 568 * any page in the mapping is busy, i.e. for DMA, or other
 569 * get_user_pages() usages.
 570 *
 571 * It is expected that the filesystem is holding locks to block the
 572 * establishment of new mappings in this address_space. I.e. it expects
 573 * to be able to run unmap_mapping_range() and subsequently not race
 574 * mapping_mapped() becoming true.
 575 */
 576struct page *dax_layout_busy_page(struct address_space *mapping)
 
 577{
 578	XA_STATE(xas, &mapping->i_pages, 0);
 579	void *entry;
 580	unsigned int scanned = 0;
 581	struct page *page = NULL;
 
 
 
 582
 583	/*
 584	 * In the 'limited' case get_user_pages() for dax is disabled.
 585	 */
 586	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 587		return NULL;
 588
 589	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
 590		return NULL;
 591
 
 
 
 
 
 592	/*
 593	 * If we race get_user_pages_fast() here either we'll see the
 594	 * elevated page count in the iteration and wait, or
 595	 * get_user_pages_fast() will see that the page it took a reference
 596	 * against is no longer mapped in the page tables and bail to the
 597	 * get_user_pages() slow path.  The slow path is protected by
 598	 * pte_lock() and pmd_lock(). New references are not taken without
 599	 * holding those locks, and unmap_mapping_range() will not zero the
 600	 * pte or pmd without holding the respective lock, so we are
 601	 * guaranteed to either see new references or prevent new
 602	 * references from being established.
 603	 */
 604	unmap_mapping_range(mapping, 0, 0, 0);
 605
 606	xas_lock_irq(&xas);
 607	xas_for_each(&xas, entry, ULONG_MAX) {
 608		if (WARN_ON_ONCE(!xa_is_value(entry)))
 609			continue;
 610		if (unlikely(dax_is_locked(entry)))
 611			entry = get_unlocked_entry(&xas, 0);
 612		if (entry)
 613			page = dax_busy_page(entry);
 614		put_unlocked_entry(&xas, entry);
 615		if (page)
 616			break;
 617		if (++scanned % XA_CHECK_SCHED)
 618			continue;
 619
 620		xas_pause(&xas);
 621		xas_unlock_irq(&xas);
 622		cond_resched();
 623		xas_lock_irq(&xas);
 624	}
 625	xas_unlock_irq(&xas);
 626	return page;
 627}
 
 
 
 
 
 
 628EXPORT_SYMBOL_GPL(dax_layout_busy_page);
 629
 630static int __dax_invalidate_entry(struct address_space *mapping,
 631					  pgoff_t index, bool trunc)
 632{
 633	XA_STATE(xas, &mapping->i_pages, index);
 634	int ret = 0;
 635	void *entry;
 636
 637	xas_lock_irq(&xas);
 638	entry = get_unlocked_entry(&xas, 0);
 639	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 640		goto out;
 641	if (!trunc &&
 642	    (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
 643	     xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
 644		goto out;
 645	dax_disassociate_entry(entry, mapping, trunc);
 646	xas_store(&xas, NULL);
 647	mapping->nrexceptional--;
 648	ret = 1;
 649out:
 650	put_unlocked_entry(&xas, entry);
 651	xas_unlock_irq(&xas);
 652	return ret;
 653}
 654
 655/*
 656 * Delete DAX entry at @index from @mapping.  Wait for it
 657 * to be unlocked before deleting it.
 658 */
 659int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 660{
 661	int ret = __dax_invalidate_entry(mapping, index, true);
 662
 663	/*
 664	 * This gets called from truncate / punch_hole path. As such, the caller
 665	 * must hold locks protecting against concurrent modifications of the
 666	 * page cache (usually fs-private i_mmap_sem for writing). Since the
 667	 * caller has seen a DAX entry for this index, we better find it
 668	 * at that index as well...
 669	 */
 670	WARN_ON_ONCE(!ret);
 671	return ret;
 672}
 673
 674/*
 675 * Invalidate DAX entry if it is clean.
 676 */
 677int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 678				      pgoff_t index)
 679{
 680	return __dax_invalidate_entry(mapping, index, false);
 681}
 682
 683static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev,
 684			     sector_t sector, struct page *to, unsigned long vaddr)
 
 
 
 
 685{
 
 686	void *vto, *kaddr;
 687	pgoff_t pgoff;
 688	long rc;
 689	int id;
 690
 691	rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
 692	if (rc)
 693		return rc;
 694
 695	id = dax_read_lock();
 696	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
 
 697	if (rc < 0) {
 698		dax_read_unlock(id);
 699		return rc;
 700	}
 701	vto = kmap_atomic(to);
 702	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
 703	kunmap_atomic(vto);
 704	dax_read_unlock(id);
 705	return 0;
 706}
 707
 708/*
 
 
 
 
 
 
 
 
 
 
 
 709 * By this point grab_mapping_entry() has ensured that we have a locked entry
 710 * of the appropriate size so we don't have to worry about downgrading PMDs to
 711 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 712 * already in the tree, we will skip the insertion and just dirty the PMD as
 713 * appropriate.
 714 */
 715static void *dax_insert_entry(struct xa_state *xas,
 716		struct address_space *mapping, struct vm_fault *vmf,
 717		void *entry, pfn_t pfn, unsigned long flags, bool dirty)
 718{
 
 719	void *new_entry = dax_make_entry(pfn, flags);
 
 
 
 720
 721	if (dirty)
 722		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 723
 724	if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
 725		unsigned long index = xas->xa_index;
 726		/* we are replacing a zero page with block mapping */
 727		if (dax_is_pmd_entry(entry))
 728			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 729					PG_PMD_NR, false);
 730		else /* pte entry */
 731			unmap_mapping_pages(mapping, index, 1, false);
 732	}
 733
 734	xas_reset(xas);
 735	xas_lock_irq(xas);
 736	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 737		void *old;
 738
 739		dax_disassociate_entry(entry, mapping, false);
 740		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
 
 741		/*
 742		 * Only swap our new entry into the page cache if the current
 743		 * entry is a zero page or an empty entry.  If a normal PTE or
 744		 * PMD entry is already in the cache, we leave it alone.  This
 745		 * means that if we are trying to insert a PTE and the
 746		 * existing entry is a PMD, we will just leave the PMD in the
 747		 * tree and dirty it if necessary.
 748		 */
 749		old = dax_lock_entry(xas, new_entry);
 750		WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
 751					DAX_LOCKED));
 752		entry = new_entry;
 753	} else {
 754		xas_load(xas);	/* Walk the xa_state */
 755	}
 756
 757	if (dirty)
 758		xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
 759
 
 
 
 760	xas_unlock_irq(xas);
 761	return entry;
 762}
 763
 764static inline
 765unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 766{
 767	unsigned long address;
 768
 769	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 770	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
 771	return address;
 772}
 773
 774/* Walk all mappings of a given index of a file and writeprotect them */
 775static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
 776		unsigned long pfn)
 777{
 778	struct vm_area_struct *vma;
 779	pte_t pte, *ptep = NULL;
 780	pmd_t *pmdp = NULL;
 781	spinlock_t *ptl;
 782
 783	i_mmap_lock_read(mapping);
 784	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
 785		struct mmu_notifier_range range;
 786		unsigned long address;
 787
 788		cond_resched();
 789
 790		if (!(vma->vm_flags & VM_SHARED))
 791			continue;
 792
 793		address = pgoff_address(index, vma);
 794
 795		/*
 796		 * Note because we provide range to follow_pte_pmd it will
 797		 * call mmu_notifier_invalidate_range_start() on our behalf
 798		 * before taking any lock.
 799		 */
 800		if (follow_pte_pmd(vma->vm_mm, address, &range,
 801				   &ptep, &pmdp, &ptl))
 802			continue;
 803
 804		/*
 805		 * No need to call mmu_notifier_invalidate_range() as we are
 806		 * downgrading page table protection not changing it to point
 807		 * to a new page.
 808		 *
 809		 * See Documentation/vm/mmu_notifier.rst
 810		 */
 811		if (pmdp) {
 812#ifdef CONFIG_FS_DAX_PMD
 813			pmd_t pmd;
 814
 815			if (pfn != pmd_pfn(*pmdp))
 816				goto unlock_pmd;
 817			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
 818				goto unlock_pmd;
 819
 820			flush_cache_page(vma, address, pfn);
 821			pmd = pmdp_invalidate(vma, address, pmdp);
 822			pmd = pmd_wrprotect(pmd);
 823			pmd = pmd_mkclean(pmd);
 824			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 825unlock_pmd:
 826#endif
 827			spin_unlock(ptl);
 828		} else {
 829			if (pfn != pte_pfn(*ptep))
 830				goto unlock_pte;
 831			if (!pte_dirty(*ptep) && !pte_write(*ptep))
 832				goto unlock_pte;
 833
 834			flush_cache_page(vma, address, pfn);
 835			pte = ptep_clear_flush(vma, address, ptep);
 836			pte = pte_wrprotect(pte);
 837			pte = pte_mkclean(pte);
 838			set_pte_at(vma->vm_mm, address, ptep, pte);
 839unlock_pte:
 840			pte_unmap_unlock(ptep, ptl);
 841		}
 842
 843		mmu_notifier_invalidate_range_end(&range);
 844	}
 845	i_mmap_unlock_read(mapping);
 846}
 847
 848static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
 849		struct address_space *mapping, void *entry)
 850{
 851	unsigned long pfn, index, count;
 852	long ret = 0;
 
 853
 854	/*
 855	 * A page got tagged dirty in DAX mapping? Something is seriously
 856	 * wrong.
 857	 */
 858	if (WARN_ON(!xa_is_value(entry)))
 859		return -EIO;
 860
 861	if (unlikely(dax_is_locked(entry))) {
 862		void *old_entry = entry;
 863
 864		entry = get_unlocked_entry(xas, 0);
 865
 866		/* Entry got punched out / reallocated? */
 867		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 868			goto put_unlocked;
 869		/*
 870		 * Entry got reallocated elsewhere? No need to writeback.
 871		 * We have to compare pfns as we must not bail out due to
 872		 * difference in lockbit or entry type.
 873		 */
 874		if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
 875			goto put_unlocked;
 876		if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 877					dax_is_zero_entry(entry))) {
 878			ret = -EIO;
 879			goto put_unlocked;
 880		}
 881
 882		/* Another fsync thread may have already done this entry */
 883		if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
 884			goto put_unlocked;
 885	}
 886
 887	/* Lock the entry to serialize with page faults */
 888	dax_lock_entry(xas, entry);
 889
 890	/*
 891	 * We can clear the tag now but we have to be careful so that concurrent
 892	 * dax_writeback_one() calls for the same index cannot finish before we
 893	 * actually flush the caches. This is achieved as the calls will look
 894	 * at the entry only under the i_pages lock and once they do that
 895	 * they will see the entry locked and wait for it to unlock.
 896	 */
 897	xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
 898	xas_unlock_irq(xas);
 899
 900	/*
 901	 * If dax_writeback_mapping_range() was given a wbc->range_start
 902	 * in the middle of a PMD, the 'index' we use needs to be
 903	 * aligned to the start of the PMD.
 904	 * This allows us to flush for PMD_SIZE and not have to worry about
 905	 * partial PMD writebacks.
 906	 */
 907	pfn = dax_to_pfn(entry);
 908	count = 1UL << dax_entry_order(entry);
 909	index = xas->xa_index & ~(count - 1);
 
 
 
 
 
 
 
 
 
 910
 911	dax_entry_mkclean(mapping, index, pfn);
 912	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
 913	/*
 914	 * After we have flushed the cache, we can clear the dirty tag. There
 915	 * cannot be new dirty data in the pfn after the flush has completed as
 916	 * the pfn mappings are writeprotected and fault waits for mapping
 917	 * entry lock.
 918	 */
 919	xas_reset(xas);
 920	xas_lock_irq(xas);
 921	xas_store(xas, entry);
 922	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
 923	dax_wake_entry(xas, entry, false);
 924
 925	trace_dax_writeback_one(mapping->host, index, count);
 926	return ret;
 927
 928 put_unlocked:
 929	put_unlocked_entry(xas, entry);
 930	return ret;
 931}
 932
 933/*
 934 * Flush the mapping to the persistent domain within the byte range of [start,
 935 * end]. This is required by data integrity operations to ensure file data is
 936 * on persistent storage prior to completion of the operation.
 937 */
 938int dax_writeback_mapping_range(struct address_space *mapping,
 939		struct dax_device *dax_dev, struct writeback_control *wbc)
 940{
 941	XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
 942	struct inode *inode = mapping->host;
 943	pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
 944	void *entry;
 945	int ret = 0;
 946	unsigned int scanned = 0;
 947
 948	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
 949		return -EIO;
 950
 951	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
 952		return 0;
 953
 954	trace_dax_writeback_range(inode, xas.xa_index, end_index);
 955
 956	tag_pages_for_writeback(mapping, xas.xa_index, end_index);
 957
 958	xas_lock_irq(&xas);
 959	xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
 960		ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
 961		if (ret < 0) {
 962			mapping_set_error(mapping, ret);
 963			break;
 964		}
 965		if (++scanned % XA_CHECK_SCHED)
 966			continue;
 967
 968		xas_pause(&xas);
 969		xas_unlock_irq(&xas);
 970		cond_resched();
 971		xas_lock_irq(&xas);
 972	}
 973	xas_unlock_irq(&xas);
 974	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
 975	return ret;
 976}
 977EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 978
 979static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 
 980{
 981	return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
 982}
 983
 984static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
 985			 pfn_t *pfnp)
 986{
 987	const sector_t sector = dax_iomap_sector(iomap, pos);
 988	pgoff_t pgoff;
 989	int id, rc;
 990	long length;
 991
 992	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
 993	if (rc)
 994		return rc;
 995	id = dax_read_lock();
 996	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
 997				   NULL, pfnp);
 998	if (length < 0) {
 999		rc = length;
1000		goto out;
1001	}
 
 
1002	rc = -EINVAL;
1003	if (PFN_PHYS(length) < size)
1004		goto out;
1005	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1006		goto out;
1007	/* For larger pages we need devmap */
1008	if (length > 1 && !pfn_t_devmap(*pfnp))
1009		goto out;
1010	rc = 0;
 
 
 
 
 
 
1011out:
1012	dax_read_unlock(id);
1013	return rc;
1014}
1015
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016/*
1017 * The user has performed a load from a hole in the file.  Allocating a new
1018 * page in the file would cause excessive storage usage for workloads with
1019 * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1020 * If this page is ever written to we will re-fault and change the mapping to
1021 * point to real DAX storage instead.
1022 */
1023static vm_fault_t dax_load_hole(struct xa_state *xas,
1024		struct address_space *mapping, void **entry,
1025		struct vm_fault *vmf)
1026{
1027	struct inode *inode = mapping->host;
1028	unsigned long vaddr = vmf->address;
1029	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1030	vm_fault_t ret;
1031
1032	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1033			DAX_ZERO_PAGE, false);
1034
1035	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1036	trace_dax_load_hole(inode, vmf, ret);
1037	return ret;
1038}
1039
1040int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
1041		   struct iomap *iomap)
 
1042{
1043	sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
1044	pgoff_t pgoff;
1045	long rc, id;
1046	void *kaddr;
1047	bool page_aligned = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
1048
 
 
 
 
 
1049
1050	if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
1051	    IS_ALIGNED(size, PAGE_SIZE))
1052		page_aligned = true;
 
 
1053
1054	rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
1055	if (rc)
1056		return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1057
1058	id = dax_read_lock();
 
 
 
 
 
 
 
1059
1060	if (page_aligned)
1061		rc = dax_zero_page_range(iomap->dax_dev, pgoff,
1062					 size >> PAGE_SHIFT);
1063	else
1064		rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
1065	if (rc < 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1066		dax_read_unlock(id);
1067		return rc;
1068	}
1069
1070	if (!page_aligned) {
1071		memset(kaddr + offset, 0, size);
1072		dax_flush(iomap->dax_dev, kaddr + offset, size);
1073	}
1074	dax_read_unlock(id);
1075	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1076}
 
1077
1078static loff_t
1079dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1080		struct iomap *iomap, struct iomap *srcmap)
1081{
1082	struct block_device *bdev = iomap->bdev;
 
 
 
1083	struct dax_device *dax_dev = iomap->dax_dev;
1084	struct iov_iter *iter = data;
1085	loff_t end = pos + length, done = 0;
 
 
1086	ssize_t ret = 0;
1087	size_t xfer;
1088	int id;
1089
1090	if (iov_iter_rw(iter) == READ) {
1091		end = min(end, i_size_read(inode));
1092		if (pos >= end)
1093			return 0;
1094
1095		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1096			return iov_iter_zero(min(length, end - pos), iter);
1097	}
1098
1099	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
 
 
 
 
 
1100		return -EIO;
1101
1102	/*
1103	 * Write can allocate block for an area which has a hole page mapped
1104	 * into page tables. We have to tear down these mappings so that data
1105	 * written by write(2) is visible in mmap.
1106	 */
1107	if (iomap->flags & IOMAP_F_NEW) {
1108		invalidate_inode_pages2_range(inode->i_mapping,
1109					      pos >> PAGE_SHIFT,
1110					      (end - 1) >> PAGE_SHIFT);
1111	}
1112
1113	id = dax_read_lock();
1114	while (pos < end) {
1115		unsigned offset = pos & (PAGE_SIZE - 1);
1116		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1117		const sector_t sector = dax_iomap_sector(iomap, pos);
1118		ssize_t map_len;
1119		pgoff_t pgoff;
1120		void *kaddr;
1121
1122		if (fatal_signal_pending(current)) {
1123			ret = -EINTR;
1124			break;
1125		}
1126
1127		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1128		if (ret)
1129			break;
1130
1131		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1132				&kaddr, NULL);
 
 
 
 
 
 
 
1133		if (map_len < 0) {
1134			ret = map_len;
1135			break;
1136		}
1137
 
 
 
 
 
 
 
1138		map_len = PFN_PHYS(map_len);
1139		kaddr += offset;
1140		map_len -= offset;
1141		if (map_len > end - pos)
1142			map_len = end - pos;
1143
1144		/*
1145		 * The userspace address for the memory copy has already been
1146		 * validated via access_ok() in either vfs_read() or
1147		 * vfs_write(), depending on which operation we are doing.
1148		 */
1149		if (iov_iter_rw(iter) == WRITE)
1150			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1151					map_len, iter);
1152		else
1153			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1154					map_len, iter);
1155
1156		pos += xfer;
1157		length -= xfer;
1158		done += xfer;
1159
1160		if (xfer == 0)
1161			ret = -EFAULT;
1162		if (xfer < map_len)
1163			break;
1164	}
1165	dax_read_unlock(id);
1166
1167	return done ? done : ret;
1168}
1169
1170/**
1171 * dax_iomap_rw - Perform I/O to a DAX file
1172 * @iocb:	The control block for this I/O
1173 * @iter:	The addresses to do I/O from or to
1174 * @ops:	iomap ops passed from the file system
1175 *
1176 * This function performs read and write operations to directly mapped
1177 * persistent memory.  The callers needs to take care of read/write exclusion
1178 * and evicting any page cache pages in the region under I/O.
1179 */
1180ssize_t
1181dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1182		const struct iomap_ops *ops)
1183{
1184	struct address_space *mapping = iocb->ki_filp->f_mapping;
1185	struct inode *inode = mapping->host;
1186	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1187	unsigned flags = 0;
 
 
 
 
 
 
 
1188
1189	if (iov_iter_rw(iter) == WRITE) {
1190		lockdep_assert_held_write(&inode->i_rwsem);
1191		flags |= IOMAP_WRITE;
1192	} else {
1193		lockdep_assert_held(&inode->i_rwsem);
1194	}
1195
1196	if (iocb->ki_flags & IOCB_NOWAIT)
1197		flags |= IOMAP_NOWAIT;
1198
1199	while (iov_iter_count(iter)) {
1200		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1201				iter, dax_iomap_actor);
1202		if (ret <= 0)
1203			break;
1204		pos += ret;
1205		done += ret;
1206	}
1207
1208	iocb->ki_pos += done;
 
1209	return done ? done : ret;
1210}
1211EXPORT_SYMBOL_GPL(dax_iomap_rw);
1212
1213static vm_fault_t dax_fault_return(int error)
1214{
1215	if (error == 0)
1216		return VM_FAULT_NOPAGE;
1217	return vmf_error(error);
1218}
1219
1220/*
1221 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1222 * flushed on write-faults (non-cow), but not read-faults.
 
 
1223 */
1224static bool dax_fault_is_synchronous(unsigned long flags,
1225		struct vm_area_struct *vma, struct iomap *iomap)
1226{
1227	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1228		&& (iomap->flags & IOMAP_F_DIRTY);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1229}
1230
1231static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1232			       int *iomap_errp, const struct iomap_ops *ops)
1233{
1234	struct vm_area_struct *vma = vmf->vma;
1235	struct address_space *mapping = vma->vm_file->f_mapping;
1236	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1237	struct inode *inode = mapping->host;
1238	unsigned long vaddr = vmf->address;
1239	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1240	struct iomap iomap = { .type = IOMAP_HOLE };
1241	struct iomap srcmap = { .type = IOMAP_HOLE };
1242	unsigned flags = IOMAP_FAULT;
1243	int error, major = 0;
1244	bool write = vmf->flags & FAULT_FLAG_WRITE;
1245	bool sync;
1246	vm_fault_t ret = 0;
1247	void *entry;
1248	pfn_t pfn;
1249
1250	trace_dax_pte_fault(inode, vmf, ret);
1251	/*
1252	 * Check whether offset isn't beyond end of file now. Caller is supposed
1253	 * to hold locks serializing us with truncate / punch hole so this is
1254	 * a reliable test.
1255	 */
1256	if (pos >= i_size_read(inode)) {
1257		ret = VM_FAULT_SIGBUS;
1258		goto out;
1259	}
1260
1261	if (write && !vmf->cow_page)
1262		flags |= IOMAP_WRITE;
1263
1264	entry = grab_mapping_entry(&xas, mapping, 0);
1265	if (xa_is_internal(entry)) {
1266		ret = xa_to_internal(entry);
1267		goto out;
1268	}
1269
1270	/*
1271	 * It is possible, particularly with mixed reads & writes to private
1272	 * mappings, that we have raced with a PMD fault that overlaps with
1273	 * the PTE we need to set up.  If so just return and the fault will be
1274	 * retried.
1275	 */
1276	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1277		ret = VM_FAULT_NOPAGE;
1278		goto unlock_entry;
1279	}
1280
1281	/*
1282	 * Note that we don't bother to use iomap_apply here: DAX required
1283	 * the file system block size to be equal the page size, which means
1284	 * that we never have to deal with more than a single extent here.
1285	 */
1286	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap);
1287	if (iomap_errp)
1288		*iomap_errp = error;
1289	if (error) {
1290		ret = dax_fault_return(error);
1291		goto unlock_entry;
1292	}
1293	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1294		error = -EIO;	/* fs corruption? */
1295		goto error_finish_iomap;
1296	}
1297
1298	if (vmf->cow_page) {
1299		sector_t sector = dax_iomap_sector(&iomap, pos);
1300
1301		switch (iomap.type) {
1302		case IOMAP_HOLE:
1303		case IOMAP_UNWRITTEN:
1304			clear_user_highpage(vmf->cow_page, vaddr);
1305			break;
1306		case IOMAP_MAPPED:
1307			error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
1308						  sector, vmf->cow_page, vaddr);
1309			break;
1310		default:
1311			WARN_ON_ONCE(1);
1312			error = -EIO;
1313			break;
1314		}
1315
1316		if (error)
1317			goto error_finish_iomap;
1318
1319		__SetPageUptodate(vmf->cow_page);
1320		ret = finish_fault(vmf);
1321		if (!ret)
1322			ret = VM_FAULT_DONE_COW;
1323		goto finish_iomap;
1324	}
1325
1326	sync = dax_fault_is_synchronous(flags, vma, &iomap);
1327
1328	switch (iomap.type) {
1329	case IOMAP_MAPPED:
1330		if (iomap.flags & IOMAP_F_NEW) {
1331			count_vm_event(PGMAJFAULT);
1332			count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1333			major = VM_FAULT_MAJOR;
1334		}
1335		error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1336		if (error < 0)
1337			goto error_finish_iomap;
1338
1339		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1340						 0, write && !sync);
1341
1342		/*
1343		 * If we are doing synchronous page fault and inode needs fsync,
1344		 * we can insert PTE into page tables only after that happens.
1345		 * Skip insertion for now and return the pfn so that caller can
1346		 * insert it after fsync is done.
1347		 */
1348		if (sync) {
1349			if (WARN_ON_ONCE(!pfnp)) {
1350				error = -EIO;
1351				goto error_finish_iomap;
1352			}
1353			*pfnp = pfn;
1354			ret = VM_FAULT_NEEDDSYNC | major;
1355			goto finish_iomap;
1356		}
1357		trace_dax_insert_mapping(inode, vmf, entry);
1358		if (write)
1359			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1360		else
1361			ret = vmf_insert_mixed(vma, vaddr, pfn);
1362
1363		goto finish_iomap;
1364	case IOMAP_UNWRITTEN:
1365	case IOMAP_HOLE:
1366		if (!write) {
1367			ret = dax_load_hole(&xas, mapping, &entry, vmf);
1368			goto finish_iomap;
1369		}
1370		fallthrough;
1371	default:
1372		WARN_ON_ONCE(1);
1373		error = -EIO;
1374		break;
1375	}
1376
1377 error_finish_iomap:
1378	ret = dax_fault_return(error);
1379 finish_iomap:
1380	if (ops->iomap_end) {
1381		int copied = PAGE_SIZE;
1382
1383		if (ret & VM_FAULT_ERROR)
1384			copied = 0;
1385		/*
1386		 * The fault is done by now and there's no way back (other
1387		 * thread may be already happily using PTE we have installed).
1388		 * Just ignore error from ->iomap_end since we cannot do much
1389		 * with it.
1390		 */
1391		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1392	}
1393 unlock_entry:
1394	dax_unlock_entry(&xas, entry);
1395 out:
1396	trace_dax_pte_fault_done(inode, vmf, ret);
1397	return ret | major;
1398}
1399
1400#ifdef CONFIG_FS_DAX_PMD
1401static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1402		struct iomap *iomap, void **entry)
1403{
1404	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1405	unsigned long pmd_addr = vmf->address & PMD_MASK;
1406	struct vm_area_struct *vma = vmf->vma;
1407	struct inode *inode = mapping->host;
1408	pgtable_t pgtable = NULL;
1409	struct page *zero_page;
1410	spinlock_t *ptl;
1411	pmd_t pmd_entry;
1412	pfn_t pfn;
1413
1414	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
 
 
 
 
 
 
 
 
1415
1416	if (unlikely(!zero_page))
1417		goto fallback;
 
1418
1419	pfn = page_to_pfn_t(zero_page);
1420	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1421			DAX_PMD | DAX_ZERO_PAGE, false);
 
 
1422
1423	if (arch_needs_pgtable_deposit()) {
1424		pgtable = pte_alloc_one(vma->vm_mm);
1425		if (!pgtable)
1426			return VM_FAULT_OOM;
1427	}
1428
1429	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1430	if (!pmd_none(*(vmf->pmd))) {
1431		spin_unlock(ptl);
1432		goto fallback;
1433	}
1434
1435	if (pgtable) {
1436		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1437		mm_inc_nr_ptes(vma->vm_mm);
1438	}
1439	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1440	pmd_entry = pmd_mkhuge(pmd_entry);
1441	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1442	spin_unlock(ptl);
1443	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1444	return VM_FAULT_NOPAGE;
1445
1446fallback:
1447	if (pgtable)
1448		pte_free(vma->vm_mm, pgtable);
1449	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1450	return VM_FAULT_FALLBACK;
1451}
1452
1453static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1454			       const struct iomap_ops *ops)
1455{
1456	struct vm_area_struct *vma = vmf->vma;
1457	struct address_space *mapping = vma->vm_file->f_mapping;
1458	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1459	unsigned long pmd_addr = vmf->address & PMD_MASK;
1460	bool write = vmf->flags & FAULT_FLAG_WRITE;
1461	bool sync;
1462	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1463	struct inode *inode = mapping->host;
1464	vm_fault_t result = VM_FAULT_FALLBACK;
1465	struct iomap iomap = { .type = IOMAP_HOLE };
1466	struct iomap srcmap = { .type = IOMAP_HOLE };
1467	pgoff_t max_pgoff;
1468	void *entry;
1469	loff_t pos;
1470	int error;
1471	pfn_t pfn;
 
 
1472
1473	/*
1474	 * Check whether offset isn't beyond end of file now. Caller is
1475	 * supposed to hold locks serializing us with truncate / punch hole so
1476	 * this is a reliable test.
1477	 */
1478	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1479
1480	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1481
1482	/*
1483	 * Make sure that the faulting address's PMD offset (color) matches
1484	 * the PMD offset from the start of the file.  This is necessary so
1485	 * that a PMD range in the page table overlaps exactly with a PMD
1486	 * range in the page cache.
1487	 */
1488	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1489	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1490		goto fallback;
1491
1492	/* Fall back to PTEs if we're going to COW */
1493	if (write && !(vma->vm_flags & VM_SHARED))
1494		goto fallback;
1495
1496	/* If the PMD would extend outside the VMA */
1497	if (pmd_addr < vma->vm_start)
1498		goto fallback;
1499	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1500		goto fallback;
1501
1502	if (xas.xa_index >= max_pgoff) {
1503		result = VM_FAULT_SIGBUS;
1504		goto out;
1505	}
1506
1507	/* If the PMD would extend beyond the file size */
1508	if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
1509		goto fallback;
1510
1511	/*
1512	 * grab_mapping_entry() will make sure we get an empty PMD entry,
1513	 * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1514	 * entry is already in the array, for instance), it will return
1515	 * VM_FAULT_FALLBACK.
1516	 */
1517	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1518	if (xa_is_internal(entry)) {
1519		result = xa_to_internal(entry);
1520		goto fallback;
1521	}
1522
1523	/*
1524	 * It is possible, particularly with mixed reads & writes to private
1525	 * mappings, that we have raced with a PTE fault that overlaps with
1526	 * the PMD we need to set up.  If so just return and the fault will be
1527	 * retried.
1528	 */
1529	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1530			!pmd_devmap(*vmf->pmd)) {
1531		result = 0;
1532		goto unlock_entry;
1533	}
1534
1535	/*
1536	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1537	 * setting up a mapping, so really we're using iomap_begin() as a way
1538	 * to look up our filesystem block.
1539	 */
1540	pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1541	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap,
1542			&srcmap);
1543	if (error)
1544		goto unlock_entry;
1545
1546	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1547		goto finish_iomap;
1548
1549	sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1550
1551	switch (iomap.type) {
1552	case IOMAP_MAPPED:
1553		error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1554		if (error < 0)
1555			goto finish_iomap;
1556
1557		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1558						DAX_PMD, write && !sync);
1559
1560		/*
1561		 * If we are doing synchronous page fault and inode needs fsync,
1562		 * we can insert PMD into page tables only after that happens.
1563		 * Skip insertion for now and return the pfn so that caller can
1564		 * insert it after fsync is done.
1565		 */
1566		if (sync) {
1567			if (WARN_ON_ONCE(!pfnp))
1568				goto finish_iomap;
1569			*pfnp = pfn;
1570			result = VM_FAULT_NEEDDSYNC;
1571			goto finish_iomap;
1572		}
1573
1574		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1575		result = vmf_insert_pfn_pmd(vmf, pfn, write);
1576		break;
1577	case IOMAP_UNWRITTEN:
1578	case IOMAP_HOLE:
1579		if (WARN_ON_ONCE(write))
1580			break;
1581		result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
1582		break;
1583	default:
1584		WARN_ON_ONCE(1);
1585		break;
1586	}
1587
1588 finish_iomap:
1589	if (ops->iomap_end) {
1590		int copied = PMD_SIZE;
1591
1592		if (result == VM_FAULT_FALLBACK)
1593			copied = 0;
1594		/*
1595		 * The fault is done by now and there's no way back (other
1596		 * thread may be already happily using PMD we have installed).
1597		 * Just ignore error from ->iomap_end since we cannot do much
1598		 * with it.
1599		 */
1600		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1601				&iomap);
1602	}
1603 unlock_entry:
1604	dax_unlock_entry(&xas, entry);
1605 fallback:
1606	if (result == VM_FAULT_FALLBACK) {
1607		split_huge_pmd(vma, vmf->pmd, vmf->address);
1608		count_vm_event(THP_FAULT_FALLBACK);
1609	}
1610out:
1611	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1612	return result;
1613}
1614#else
1615static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1616			       const struct iomap_ops *ops)
1617{
1618	return VM_FAULT_FALLBACK;
1619}
1620#endif /* CONFIG_FS_DAX_PMD */
1621
1622/**
1623 * dax_iomap_fault - handle a page fault on a DAX file
1624 * @vmf: The description of the fault
1625 * @pe_size: Size of the page to fault in
1626 * @pfnp: PFN to insert for synchronous faults if fsync is required
1627 * @iomap_errp: Storage for detailed error code in case of error
1628 * @ops: Iomap ops passed from the file system
1629 *
1630 * When a page fault occurs, filesystems may call this helper in
1631 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1632 * has done all the necessary locking for page fault to proceed
1633 * successfully.
1634 */
1635vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1636		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1637{
1638	switch (pe_size) {
1639	case PE_SIZE_PTE:
1640		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1641	case PE_SIZE_PMD:
1642		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1643	default:
1644		return VM_FAULT_FALLBACK;
1645	}
1646}
1647EXPORT_SYMBOL_GPL(dax_iomap_fault);
1648
1649/*
1650 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1651 * @vmf: The description of the fault
1652 * @pfn: PFN to insert
1653 * @order: Order of entry to insert.
1654 *
1655 * This function inserts a writeable PTE or PMD entry into the page tables
1656 * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1657 */
1658static vm_fault_t
1659dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1660{
1661	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1662	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1663	void *entry;
1664	vm_fault_t ret;
1665
1666	xas_lock_irq(&xas);
1667	entry = get_unlocked_entry(&xas, order);
1668	/* Did we race with someone splitting entry or so? */
1669	if (!entry || dax_is_conflict(entry) ||
1670	    (order == 0 && !dax_is_pte_entry(entry))) {
1671		put_unlocked_entry(&xas, entry);
1672		xas_unlock_irq(&xas);
1673		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1674						      VM_FAULT_NOPAGE);
1675		return VM_FAULT_NOPAGE;
1676	}
1677	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1678	dax_lock_entry(&xas, entry);
1679	xas_unlock_irq(&xas);
1680	if (order == 0)
1681		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1682#ifdef CONFIG_FS_DAX_PMD
1683	else if (order == PMD_ORDER)
1684		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1685#endif
1686	else
1687		ret = VM_FAULT_FALLBACK;
1688	dax_unlock_entry(&xas, entry);
1689	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1690	return ret;
1691}
1692
1693/**
1694 * dax_finish_sync_fault - finish synchronous page fault
1695 * @vmf: The description of the fault
1696 * @pe_size: Size of entry to be inserted
1697 * @pfn: PFN to insert
1698 *
1699 * This function ensures that the file range touched by the page fault is
1700 * stored persistently on the media and handles inserting of appropriate page
1701 * table entry.
1702 */
1703vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1704		enum page_entry_size pe_size, pfn_t pfn)
1705{
1706	int err;
1707	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1708	unsigned int order = pe_order(pe_size);
1709	size_t len = PAGE_SIZE << order;
1710
1711	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1712	if (err)
1713		return VM_FAULT_SIGBUS;
1714	return dax_insert_pfn_mkwrite(vmf, pfn, order);
1715}
1716EXPORT_SYMBOL_GPL(dax_finish_sync_fault);