Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/dax.c - Direct Access filesystem code
   4 * Copyright (c) 2013-2014 Intel Corporation
   5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/atomic.h>
  10#include <linux/blkdev.h>
  11#include <linux/buffer_head.h>
  12#include <linux/dax.h>
  13#include <linux/fs.h>
 
  14#include <linux/highmem.h>
  15#include <linux/memcontrol.h>
  16#include <linux/mm.h>
  17#include <linux/mutex.h>
  18#include <linux/pagevec.h>
  19#include <linux/sched.h>
  20#include <linux/sched/signal.h>
  21#include <linux/uio.h>
  22#include <linux/vmstat.h>
  23#include <linux/pfn_t.h>
  24#include <linux/sizes.h>
  25#include <linux/mmu_notifier.h>
  26#include <linux/iomap.h>
  27#include <linux/rmap.h>
  28#include <asm/pgalloc.h>
  29
  30#define CREATE_TRACE_POINTS
  31#include <trace/events/fs_dax.h>
  32
  33/* We choose 4096 entries - same as per-zone page wait tables */
  34#define DAX_WAIT_TABLE_BITS 12
  35#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
  36
  37/* The 'colour' (ie low bits) within a PMD of a page offset.  */
  38#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
  39#define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
  40
  41static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
  42
  43static int __init init_dax_wait_table(void)
  44{
  45	int i;
  46
  47	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
  48		init_waitqueue_head(wait_table + i);
  49	return 0;
  50}
  51fs_initcall(init_dax_wait_table);
  52
  53/*
  54 * DAX pagecache entries use XArray value entries so they can't be mistaken
  55 * for pages.  We use one bit for locking, one bit for the entry size (PMD)
  56 * and two more to tell us if the entry is a zero page or an empty entry that
  57 * is just used for locking.  In total four special bits.
  58 *
  59 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
  60 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
  61 * block allocation.
  62 */
  63#define DAX_SHIFT	(4)
  64#define DAX_LOCKED	(1UL << 0)
  65#define DAX_PMD		(1UL << 1)
  66#define DAX_ZERO_PAGE	(1UL << 2)
  67#define DAX_EMPTY	(1UL << 3)
  68
  69static unsigned long dax_to_pfn(void *entry)
  70{
  71	return xa_to_value(entry) >> DAX_SHIFT;
  72}
  73
  74static void *dax_make_entry(pfn_t pfn, unsigned long flags)
  75{
  76	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
  77}
  78
  79static bool dax_is_locked(void *entry)
  80{
  81	return xa_to_value(entry) & DAX_LOCKED;
 
  82}
  83
  84static unsigned int dax_entry_order(void *entry)
  85{
  86	if (xa_to_value(entry) & DAX_PMD)
  87		return PMD_ORDER;
  88	return 0;
  89}
  90
  91static unsigned long dax_is_pmd_entry(void *entry)
  92{
  93	return xa_to_value(entry) & DAX_PMD;
  94}
  95
  96static bool dax_is_pte_entry(void *entry)
  97{
  98	return !(xa_to_value(entry) & DAX_PMD);
  99}
 100
 101static int dax_is_zero_entry(void *entry)
 102{
 103	return xa_to_value(entry) & DAX_ZERO_PAGE;
 104}
 105
 106static int dax_is_empty_entry(void *entry)
 107{
 108	return xa_to_value(entry) & DAX_EMPTY;
 109}
 110
 111/*
 112 * true if the entry that was found is of a smaller order than the entry
 113 * we were looking for
 114 */
 115static bool dax_is_conflict(void *entry)
 116{
 117	return entry == XA_RETRY_ENTRY;
 118}
 119
 120/*
 121 * DAX page cache entry locking
 122 */
 123struct exceptional_entry_key {
 124	struct xarray *xa;
 125	pgoff_t entry_start;
 126};
 127
 128struct wait_exceptional_entry_queue {
 129	wait_queue_entry_t wait;
 130	struct exceptional_entry_key key;
 131};
 132
 133/**
 134 * enum dax_wake_mode: waitqueue wakeup behaviour
 135 * @WAKE_ALL: wake all waiters in the waitqueue
 136 * @WAKE_NEXT: wake only the first waiter in the waitqueue
 137 */
 138enum dax_wake_mode {
 139	WAKE_ALL,
 140	WAKE_NEXT,
 141};
 142
 143static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
 144		void *entry, struct exceptional_entry_key *key)
 145{
 146	unsigned long hash;
 147	unsigned long index = xas->xa_index;
 148
 149	/*
 150	 * If 'entry' is a PMD, align the 'index' that we use for the wait
 151	 * queue to the start of that PMD.  This ensures that all offsets in
 152	 * the range covered by the PMD map to the same bit lock.
 153	 */
 154	if (dax_is_pmd_entry(entry))
 155		index &= ~PG_PMD_COLOUR;
 156	key->xa = xas->xa;
 
 157	key->entry_start = index;
 158
 159	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
 160	return wait_table + hash;
 161}
 162
 163static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
 164		unsigned int mode, int sync, void *keyp)
 165{
 166	struct exceptional_entry_key *key = keyp;
 167	struct wait_exceptional_entry_queue *ewait =
 168		container_of(wait, struct wait_exceptional_entry_queue, wait);
 169
 170	if (key->xa != ewait->key.xa ||
 171	    key->entry_start != ewait->key.entry_start)
 172		return 0;
 173	return autoremove_wake_function(wait, mode, sync, NULL);
 174}
 175
 176/*
 177 * @entry may no longer be the entry at the index in the mapping.
 178 * The important information it's conveying is whether the entry at
 179 * this index used to be a PMD entry.
 180 */
 181static void dax_wake_entry(struct xa_state *xas, void *entry,
 182			   enum dax_wake_mode mode)
 183{
 184	struct exceptional_entry_key key;
 185	wait_queue_head_t *wq;
 186
 187	wq = dax_entry_waitqueue(xas, entry, &key);
 188
 189	/*
 190	 * Checking for locked entry and prepare_to_wait_exclusive() happens
 191	 * under the i_pages lock, ditto for entry handling in our callers.
 192	 * So at this point all tasks that could have seen our entry locked
 193	 * must be in the waitqueue and the following check will see them.
 194	 */
 195	if (waitqueue_active(wq))
 196		__wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
 
 
 
 
 
 
 
 
 
 
 
 197}
 198
 199/*
 200 * Look up entry in page cache, wait for it to become unlocked if it
 201 * is a DAX entry and return it.  The caller must subsequently call
 202 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
 203 * if it did.  The entry returned may have a larger order than @order.
 204 * If @order is larger than the order of the entry found in i_pages, this
 205 * function returns a dax_is_conflict entry.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 206 *
 207 * Must be called with the i_pages lock held.
 208 */
 209static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
 
 210{
 211	void *entry;
 212	struct wait_exceptional_entry_queue ewait;
 213	wait_queue_head_t *wq;
 214
 215	init_wait(&ewait.wait);
 216	ewait.wait.func = wake_exceptional_entry_func;
 217
 218	for (;;) {
 219		entry = xas_find_conflict(xas);
 220		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 221			return entry;
 222		if (dax_entry_order(entry) < order)
 223			return XA_RETRY_ENTRY;
 224		if (!dax_is_locked(entry))
 
 225			return entry;
 
 226
 227		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 228		prepare_to_wait_exclusive(wq, &ewait.wait,
 229					  TASK_UNINTERRUPTIBLE);
 230		xas_unlock_irq(xas);
 231		xas_reset(xas);
 232		schedule();
 233		finish_wait(wq, &ewait.wait);
 234		xas_lock_irq(xas);
 235	}
 236}
 237
 238/*
 239 * The only thing keeping the address space around is the i_pages lock
 240 * (it's cycled in clear_inode() after removing the entries from i_pages)
 241 * After we call xas_unlock_irq(), we cannot touch xas->xa.
 242 */
 243static void wait_entry_unlocked(struct xa_state *xas, void *entry)
 244{
 245	struct wait_exceptional_entry_queue ewait;
 246	wait_queue_head_t *wq;
 247
 248	init_wait(&ewait.wait);
 249	ewait.wait.func = wake_exceptional_entry_func;
 250
 251	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 252	/*
 253	 * Unlike get_unlocked_entry() there is no guarantee that this
 254	 * path ever successfully retrieves an unlocked entry before an
 255	 * inode dies. Perform a non-exclusive wait in case this path
 256	 * never successfully performs its own wake up.
 257	 */
 258	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
 259	xas_unlock_irq(xas);
 260	schedule();
 261	finish_wait(wq, &ewait.wait);
 262}
 263
 264static void put_unlocked_entry(struct xa_state *xas, void *entry,
 265			       enum dax_wake_mode mode)
 266{
 267	if (entry && !dax_is_conflict(entry))
 268		dax_wake_entry(xas, entry, mode);
 269}
 270
 271/*
 272 * We used the xa_state to get the entry, but then we locked the entry and
 273 * dropped the xa_lock, so we know the xa_state is stale and must be reset
 274 * before use.
 275 */
 276static void dax_unlock_entry(struct xa_state *xas, void *entry)
 277{
 278	void *old;
 279
 280	BUG_ON(dax_is_locked(entry));
 281	xas_reset(xas);
 282	xas_lock_irq(xas);
 283	old = xas_store(xas, entry);
 284	xas_unlock_irq(xas);
 285	BUG_ON(!dax_is_locked(old));
 286	dax_wake_entry(xas, entry, WAKE_NEXT);
 287}
 288
 289/*
 290 * Return: The entry stored at this location before it was locked.
 291 */
 292static void *dax_lock_entry(struct xa_state *xas, void *entry)
 
 293{
 294	unsigned long v = xa_to_value(entry);
 295	return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
 
 
 
 296}
 297
 298static unsigned long dax_entry_size(void *entry)
 299{
 300	if (dax_is_zero_entry(entry))
 301		return 0;
 302	else if (dax_is_empty_entry(entry))
 303		return 0;
 304	else if (dax_is_pmd_entry(entry))
 305		return PMD_SIZE;
 306	else
 307		return PAGE_SIZE;
 308}
 309
 310static unsigned long dax_end_pfn(void *entry)
 311{
 312	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
 313}
 314
 315/*
 316 * Iterate through all mapped pfns represented by an entry, i.e. skip
 317 * 'empty' and 'zero' entries.
 318 */
 319#define for_each_mapped_pfn(entry, pfn) \
 320	for (pfn = dax_to_pfn(entry); \
 321			pfn < dax_end_pfn(entry); pfn++)
 322
 323static inline bool dax_page_is_shared(struct page *page)
 324{
 325	return page->mapping == PAGE_MAPPING_DAX_SHARED;
 326}
 327
 328/*
 329 * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
 330 * refcount.
 331 */
 332static inline void dax_page_share_get(struct page *page)
 333{
 334	if (page->mapping != PAGE_MAPPING_DAX_SHARED) {
 335		/*
 336		 * Reset the index if the page was already mapped
 337		 * regularly before.
 338		 */
 339		if (page->mapping)
 340			page->share = 1;
 341		page->mapping = PAGE_MAPPING_DAX_SHARED;
 342	}
 343	page->share++;
 344}
 345
 346static inline unsigned long dax_page_share_put(struct page *page)
 347{
 348	return --page->share;
 349}
 350
 351/*
 352 * When it is called in dax_insert_entry(), the shared flag will indicate that
 353 * whether this entry is shared by multiple files.  If so, set the page->mapping
 354 * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
 355 */
 356static void dax_associate_entry(void *entry, struct address_space *mapping,
 357		struct vm_area_struct *vma, unsigned long address, bool shared)
 358{
 359	unsigned long size = dax_entry_size(entry), pfn, index;
 360	int i = 0;
 361
 362	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 363		return;
 364
 365	index = linear_page_index(vma, address & ~(size - 1));
 366	for_each_mapped_pfn(entry, pfn) {
 367		struct page *page = pfn_to_page(pfn);
 368
 369		if (shared) {
 370			dax_page_share_get(page);
 371		} else {
 372			WARN_ON_ONCE(page->mapping);
 373			page->mapping = mapping;
 374			page->index = index + i++;
 375		}
 376	}
 377}
 378
 379static void dax_disassociate_entry(void *entry, struct address_space *mapping,
 380		bool trunc)
 381{
 382	unsigned long pfn;
 383
 384	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 385		return;
 386
 387	for_each_mapped_pfn(entry, pfn) {
 388		struct page *page = pfn_to_page(pfn);
 389
 390		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
 391		if (dax_page_is_shared(page)) {
 392			/* keep the shared flag if this page is still shared */
 393			if (dax_page_share_put(page) > 0)
 394				continue;
 395		} else
 396			WARN_ON_ONCE(page->mapping && page->mapping != mapping);
 397		page->mapping = NULL;
 398		page->index = 0;
 399	}
 400}
 401
 402static struct page *dax_busy_page(void *entry)
 403{
 404	unsigned long pfn;
 405
 406	for_each_mapped_pfn(entry, pfn) {
 407		struct page *page = pfn_to_page(pfn);
 408
 409		if (page_ref_count(page) > 1)
 410			return page;
 411	}
 412	return NULL;
 413}
 414
 415/**
 416 * dax_lock_folio - Lock the DAX entry corresponding to a folio
 417 * @folio: The folio whose entry we want to lock
 418 *
 419 * Context: Process context.
 420 * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could
 421 * not be locked.
 422 */
 423dax_entry_t dax_lock_folio(struct folio *folio)
 424{
 425	XA_STATE(xas, NULL, 0);
 426	void *entry;
 427
 428	/* Ensure folio->mapping isn't freed while we look at it */
 429	rcu_read_lock();
 430	for (;;) {
 431		struct address_space *mapping = READ_ONCE(folio->mapping);
 432
 433		entry = NULL;
 434		if (!mapping || !dax_mapping(mapping))
 435			break;
 436
 437		/*
 438		 * In the device-dax case there's no need to lock, a
 439		 * struct dev_pagemap pin is sufficient to keep the
 440		 * inode alive, and we assume we have dev_pagemap pin
 441		 * otherwise we would not have a valid pfn_to_page()
 442		 * translation.
 443		 */
 444		entry = (void *)~0UL;
 445		if (S_ISCHR(mapping->host->i_mode))
 446			break;
 447
 448		xas.xa = &mapping->i_pages;
 449		xas_lock_irq(&xas);
 450		if (mapping != folio->mapping) {
 451			xas_unlock_irq(&xas);
 452			continue;
 453		}
 454		xas_set(&xas, folio->index);
 455		entry = xas_load(&xas);
 456		if (dax_is_locked(entry)) {
 457			rcu_read_unlock();
 458			wait_entry_unlocked(&xas, entry);
 459			rcu_read_lock();
 460			continue;
 461		}
 462		dax_lock_entry(&xas, entry);
 463		xas_unlock_irq(&xas);
 464		break;
 465	}
 466	rcu_read_unlock();
 467	return (dax_entry_t)entry;
 468}
 469
 470void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
 471{
 472	struct address_space *mapping = folio->mapping;
 473	XA_STATE(xas, &mapping->i_pages, folio->index);
 474
 475	if (S_ISCHR(mapping->host->i_mode))
 476		return;
 477
 478	dax_unlock_entry(&xas, (void *)cookie);
 479}
 480
 481/*
 482 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
 483 * @mapping: the file's mapping whose entry we want to lock
 484 * @index: the offset within this file
 485 * @page: output the dax page corresponding to this dax entry
 486 *
 487 * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
 488 * could not be locked.
 489 */
 490dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
 491		struct page **page)
 492{
 493	XA_STATE(xas, NULL, 0);
 494	void *entry;
 495
 496	rcu_read_lock();
 497	for (;;) {
 498		entry = NULL;
 499		if (!dax_mapping(mapping))
 500			break;
 501
 502		xas.xa = &mapping->i_pages;
 503		xas_lock_irq(&xas);
 504		xas_set(&xas, index);
 505		entry = xas_load(&xas);
 506		if (dax_is_locked(entry)) {
 507			rcu_read_unlock();
 508			wait_entry_unlocked(&xas, entry);
 509			rcu_read_lock();
 510			continue;
 511		}
 512		if (!entry ||
 513		    dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 514			/*
 515			 * Because we are looking for entry from file's mapping
 516			 * and index, so the entry may not be inserted for now,
 517			 * or even a zero/empty entry.  We don't think this is
 518			 * an error case.  So, return a special value and do
 519			 * not output @page.
 520			 */
 521			entry = (void *)~0UL;
 522		} else {
 523			*page = pfn_to_page(dax_to_pfn(entry));
 524			dax_lock_entry(&xas, entry);
 525		}
 526		xas_unlock_irq(&xas);
 527		break;
 528	}
 529	rcu_read_unlock();
 530	return (dax_entry_t)entry;
 531}
 532
 533void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
 534		dax_entry_t cookie)
 535{
 536	XA_STATE(xas, &mapping->i_pages, index);
 537
 538	if (cookie == ~0UL)
 539		return;
 540
 541	dax_unlock_entry(&xas, (void *)cookie);
 542}
 543
 544/*
 545 * Find page cache entry at given index. If it is a DAX entry, return it
 546 * with the entry locked. If the page cache doesn't contain an entry at
 547 * that index, add a locked empty entry.
 548 *
 549 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
 550 * either return that locked entry or will return VM_FAULT_FALLBACK.
 551 * This will happen if there are any PTE entries within the PMD range
 552 * that we are requesting.
 553 *
 554 * We always favor PTE entries over PMD entries. There isn't a flow where we
 555 * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
 556 * insertion will fail if it finds any PTE entries already in the tree, and a
 557 * PTE insertion will cause an existing PMD entry to be unmapped and
 558 * downgraded to PTE entries.  This happens for both PMD zero pages as
 559 * well as PMD empty entries.
 560 *
 561 * The exception to this downgrade path is for PMD entries that have
 562 * real storage backing them.  We will leave these real PMD entries in
 563 * the tree, and PTE writes will simply dirty the entire PMD entry.
 564 *
 565 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 566 * persistent memory the benefit is doubtful. We can add that later if we can
 567 * show it helps.
 568 *
 569 * On error, this function does not return an ERR_PTR.  Instead it returns
 570 * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
 571 * overlap with xarray value entries.
 572 */
 573static void *grab_mapping_entry(struct xa_state *xas,
 574		struct address_space *mapping, unsigned int order)
 575{
 576	unsigned long index = xas->xa_index;
 577	bool pmd_downgrade;	/* splitting PMD entry into PTE entries? */
 578	void *entry;
 579
 580retry:
 581	pmd_downgrade = false;
 582	xas_lock_irq(xas);
 583	entry = get_unlocked_entry(xas, order);
 584
 585	if (entry) {
 586		if (dax_is_conflict(entry))
 587			goto fallback;
 588		if (!xa_is_value(entry)) {
 589			xas_set_err(xas, -EIO);
 590			goto out_unlock;
 591		}
 592
 593		if (order == 0) {
 
 
 
 
 
 
 
 
 594			if (dax_is_pmd_entry(entry) &&
 595			    (dax_is_zero_entry(entry) ||
 596			     dax_is_empty_entry(entry))) {
 597				pmd_downgrade = true;
 598			}
 599		}
 600	}
 601
 602	if (pmd_downgrade) {
 603		/*
 604		 * Make sure 'entry' remains valid while we drop
 605		 * the i_pages lock.
 606		 */
 607		dax_lock_entry(xas, entry);
 608
 
 
 
 
 
 
 
 
 
 609		/*
 610		 * Besides huge zero pages the only other thing that gets
 611		 * downgraded are empty entries which don't need to be
 612		 * unmapped.
 613		 */
 614		if (dax_is_zero_entry(entry)) {
 615			xas_unlock_irq(xas);
 616			unmap_mapping_pages(mapping,
 617					xas->xa_index & ~PG_PMD_COLOUR,
 618					PG_PMD_NR, false);
 619			xas_reset(xas);
 620			xas_lock_irq(xas);
 621		}
 622
 623		dax_disassociate_entry(entry, mapping, false);
 624		xas_store(xas, NULL);	/* undo the PMD join */
 625		dax_wake_entry(xas, entry, WAKE_ALL);
 626		mapping->nrpages -= PG_PMD_NR;
 627		entry = NULL;
 628		xas_set(xas, index);
 629	}
 630
 631	if (entry) {
 632		dax_lock_entry(xas, entry);
 633	} else {
 634		unsigned long flags = DAX_EMPTY;
 635
 636		if (order > 0)
 637			flags |= DAX_PMD;
 638		entry = dax_make_entry(pfn_to_pfn_t(0), flags);
 639		dax_lock_entry(xas, entry);
 640		if (xas_error(xas))
 641			goto out_unlock;
 642		mapping->nrpages += 1UL << order;
 643	}
 644
 645out_unlock:
 646	xas_unlock_irq(xas);
 647	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
 648		goto retry;
 649	if (xas->xa_node == XA_ERROR(-ENOMEM))
 650		return xa_mk_internal(VM_FAULT_OOM);
 651	if (xas_error(xas))
 652		return xa_mk_internal(VM_FAULT_SIGBUS);
 653	return entry;
 654fallback:
 655	xas_unlock_irq(xas);
 656	return xa_mk_internal(VM_FAULT_FALLBACK);
 657}
 658
 659/**
 660 * dax_layout_busy_page_range - find first pinned page in @mapping
 661 * @mapping: address space to scan for a page with ref count > 1
 662 * @start: Starting offset. Page containing 'start' is included.
 663 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
 664 *       pages from 'start' till the end of file are included.
 665 *
 666 * DAX requires ZONE_DEVICE mapped pages. These pages are never
 667 * 'onlined' to the page allocator so they are considered idle when
 668 * page->count == 1. A filesystem uses this interface to determine if
 669 * any page in the mapping is busy, i.e. for DMA, or other
 670 * get_user_pages() usages.
 671 *
 672 * It is expected that the filesystem is holding locks to block the
 673 * establishment of new mappings in this address_space. I.e. it expects
 674 * to be able to run unmap_mapping_range() and subsequently not race
 675 * mapping_mapped() becoming true.
 676 */
 677struct page *dax_layout_busy_page_range(struct address_space *mapping,
 678					loff_t start, loff_t end)
 679{
 680	void *entry;
 681	unsigned int scanned = 0;
 682	struct page *page = NULL;
 683	pgoff_t start_idx = start >> PAGE_SHIFT;
 684	pgoff_t end_idx;
 685	XA_STATE(xas, &mapping->i_pages, start_idx);
 686
 687	/*
 688	 * In the 'limited' case get_user_pages() for dax is disabled.
 689	 */
 690	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 691		return NULL;
 
 
 
 692
 693	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
 694		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 695
 696	/* If end == LLONG_MAX, all pages from start to till end of file */
 697	if (end == LLONG_MAX)
 698		end_idx = ULONG_MAX;
 699	else
 700		end_idx = end >> PAGE_SHIFT;
 701	/*
 702	 * If we race get_user_pages_fast() here either we'll see the
 703	 * elevated page count in the iteration and wait, or
 704	 * get_user_pages_fast() will see that the page it took a reference
 705	 * against is no longer mapped in the page tables and bail to the
 706	 * get_user_pages() slow path.  The slow path is protected by
 707	 * pte_lock() and pmd_lock(). New references are not taken without
 708	 * holding those locks, and unmap_mapping_pages() will not zero the
 709	 * pte or pmd without holding the respective lock, so we are
 710	 * guaranteed to either see new references or prevent new
 711	 * references from being established.
 712	 */
 713	unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
 714
 715	xas_lock_irq(&xas);
 716	xas_for_each(&xas, entry, end_idx) {
 717		if (WARN_ON_ONCE(!xa_is_value(entry)))
 718			continue;
 719		if (unlikely(dax_is_locked(entry)))
 720			entry = get_unlocked_entry(&xas, 0);
 721		if (entry)
 722			page = dax_busy_page(entry);
 723		put_unlocked_entry(&xas, entry, WAKE_NEXT);
 724		if (page)
 725			break;
 726		if (++scanned % XA_CHECK_SCHED)
 727			continue;
 728
 729		xas_pause(&xas);
 730		xas_unlock_irq(&xas);
 731		cond_resched();
 732		xas_lock_irq(&xas);
 733	}
 734	xas_unlock_irq(&xas);
 735	return page;
 736}
 737EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
 738
 739struct page *dax_layout_busy_page(struct address_space *mapping)
 740{
 741	return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
 
 
 
 
 
 
 
 
 
 
 
 742}
 743EXPORT_SYMBOL_GPL(dax_layout_busy_page);
 744
 745static int __dax_invalidate_entry(struct address_space *mapping,
 746					  pgoff_t index, bool trunc)
 747{
 748	XA_STATE(xas, &mapping->i_pages, index);
 749	int ret = 0;
 750	void *entry;
 
 751
 752	xas_lock_irq(&xas);
 753	entry = get_unlocked_entry(&xas, 0);
 754	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 755		goto out;
 756	if (!trunc &&
 757	    (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
 758	     xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
 759		goto out;
 760	dax_disassociate_entry(entry, mapping, trunc);
 761	xas_store(&xas, NULL);
 762	mapping->nrpages -= 1UL << dax_entry_order(entry);
 763	ret = 1;
 764out:
 765	put_unlocked_entry(&xas, entry, WAKE_ALL);
 766	xas_unlock_irq(&xas);
 767	return ret;
 768}
 769
 770static int __dax_clear_dirty_range(struct address_space *mapping,
 771		pgoff_t start, pgoff_t end)
 772{
 773	XA_STATE(xas, &mapping->i_pages, start);
 774	unsigned int scanned = 0;
 775	void *entry;
 776
 777	xas_lock_irq(&xas);
 778	xas_for_each(&xas, entry, end) {
 779		entry = get_unlocked_entry(&xas, 0);
 780		xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
 781		xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
 782		put_unlocked_entry(&xas, entry, WAKE_NEXT);
 783
 784		if (++scanned % XA_CHECK_SCHED)
 785			continue;
 786
 787		xas_pause(&xas);
 788		xas_unlock_irq(&xas);
 789		cond_resched();
 790		xas_lock_irq(&xas);
 791	}
 792	xas_unlock_irq(&xas);
 793
 794	return 0;
 795}
 796
 797/*
 798 * Delete DAX entry at @index from @mapping.  Wait for it
 799 * to be unlocked before deleting it.
 800 */
 801int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 802{
 803	int ret = __dax_invalidate_entry(mapping, index, true);
 804
 805	/*
 806	 * This gets called from truncate / punch_hole path. As such, the caller
 807	 * must hold locks protecting against concurrent modifications of the
 808	 * page cache (usually fs-private i_mmap_sem for writing). Since the
 809	 * caller has seen a DAX entry for this index, we better find it
 810	 * at that index as well...
 811	 */
 812	WARN_ON_ONCE(!ret);
 813	return ret;
 814}
 815
 816/*
 817 * Invalidate DAX entry if it is clean.
 818 */
 819int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 820				      pgoff_t index)
 821{
 822	return __dax_invalidate_entry(mapping, index, false);
 823}
 824
 825static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
 826{
 827	return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
 828}
 829
 830static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
 
 
 831{
 832	pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
 833	void *vto, *kaddr;
 
 
 834	long rc;
 835	int id;
 836
 
 
 
 
 837	id = dax_read_lock();
 838	rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
 839				&kaddr, NULL);
 840	if (rc < 0) {
 841		dax_read_unlock(id);
 842		return rc;
 843	}
 844	vto = kmap_atomic(vmf->cow_page);
 845	copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
 846	kunmap_atomic(vto);
 847	dax_read_unlock(id);
 848	return 0;
 849}
 850
 851/*
 852 * MAP_SYNC on a dax mapping guarantees dirty metadata is
 853 * flushed on write-faults (non-cow), but not read-faults.
 854 */
 855static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
 856		struct vm_area_struct *vma)
 857{
 858	return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
 859		(iter->iomap.flags & IOMAP_F_DIRTY);
 860}
 861
 862/*
 863 * By this point grab_mapping_entry() has ensured that we have a locked entry
 864 * of the appropriate size so we don't have to worry about downgrading PMDs to
 865 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 866 * already in the tree, we will skip the insertion and just dirty the PMD as
 867 * appropriate.
 868 */
 869static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
 870		const struct iomap_iter *iter, void *entry, pfn_t pfn,
 871		unsigned long flags)
 872{
 873	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 874	void *new_entry = dax_make_entry(pfn, flags);
 875	bool write = iter->flags & IOMAP_WRITE;
 876	bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
 877	bool shared = iter->iomap.flags & IOMAP_F_SHARED;
 878
 879	if (dirty)
 880		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 881
 882	if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
 883		unsigned long index = xas->xa_index;
 884		/* we are replacing a zero page with block mapping */
 885		if (dax_is_pmd_entry(entry))
 886			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 887					PG_PMD_NR, false);
 888		else /* pte entry */
 889			unmap_mapping_pages(mapping, index, 1, false);
 890	}
 891
 892	xas_reset(xas);
 893	xas_lock_irq(xas);
 894	if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 895		void *old;
 896
 897		dax_disassociate_entry(entry, mapping, false);
 898		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
 899				shared);
 
 
 900		/*
 901		 * Only swap our new entry into the page cache if the current
 902		 * entry is a zero page or an empty entry.  If a normal PTE or
 903		 * PMD entry is already in the cache, we leave it alone.  This
 904		 * means that if we are trying to insert a PTE and the
 905		 * existing entry is a PMD, we will just leave the PMD in the
 906		 * tree and dirty it if necessary.
 907		 */
 908		old = dax_lock_entry(xas, new_entry);
 909		WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
 910					DAX_LOCKED));
 
 
 
 
 
 911		entry = new_entry;
 912	} else {
 913		xas_load(xas);	/* Walk the xa_state */
 914	}
 915
 916	if (dirty)
 917		xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
 918
 919	if (write && shared)
 920		xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
 921
 922	xas_unlock_irq(xas);
 923	return entry;
 924}
 925
 926static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
 927		struct address_space *mapping, void *entry)
 
 
 
 
 
 
 
 
 
 
 
 928{
 929	unsigned long pfn, index, count, end;
 930	long ret = 0;
 931	struct vm_area_struct *vma;
 
 
 
 932
 933	/*
 934	 * A page got tagged dirty in DAX mapping? Something is seriously
 935	 * wrong.
 936	 */
 937	if (WARN_ON(!xa_is_value(entry)))
 938		return -EIO;
 939
 940	if (unlikely(dax_is_locked(entry))) {
 941		void *old_entry = entry;
 
 
 942
 943		entry = get_unlocked_entry(xas, 0);
 
 
 
 
 
 
 
 
 944
 945		/* Entry got punched out / reallocated? */
 946		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 947			goto put_unlocked;
 948		/*
 949		 * Entry got reallocated elsewhere? No need to writeback.
 950		 * We have to compare pfns as we must not bail out due to
 951		 * difference in lockbit or entry type.
 
 
 952		 */
 953		if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
 954			goto put_unlocked;
 955		if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 956					dax_is_zero_entry(entry))) {
 957			ret = -EIO;
 958			goto put_unlocked;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 959		}
 960
 961		/* Another fsync thread may have already done this entry */
 962		if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
 963			goto put_unlocked;
 964	}
 
 
 965
 966	/* Lock the entry to serialize with page faults */
 967	dax_lock_entry(xas, entry);
 
 
 
 
 
 
 968
 969	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970	 * We can clear the tag now but we have to be careful so that concurrent
 971	 * dax_writeback_one() calls for the same index cannot finish before we
 972	 * actually flush the caches. This is achieved as the calls will look
 973	 * at the entry only under the i_pages lock and once they do that
 974	 * they will see the entry locked and wait for it to unlock.
 975	 */
 976	xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
 977	xas_unlock_irq(xas);
 978
 979	/*
 980	 * If dax_writeback_mapping_range() was given a wbc->range_start
 981	 * in the middle of a PMD, the 'index' we use needs to be
 982	 * aligned to the start of the PMD.
 983	 * This allows us to flush for PMD_SIZE and not have to worry about
 984	 * partial PMD writebacks.
 985	 */
 986	pfn = dax_to_pfn(entry);
 987	count = 1UL << dax_entry_order(entry);
 988	index = xas->xa_index & ~(count - 1);
 989	end = index + count - 1;
 990
 991	/* Walk all mappings of a given index of a file and writeprotect them */
 992	i_mmap_lock_read(mapping);
 993	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
 994		pfn_mkclean_range(pfn, count, index, vma);
 995		cond_resched();
 996	}
 997	i_mmap_unlock_read(mapping);
 998
 999	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
1000	/*
1001	 * After we have flushed the cache, we can clear the dirty tag. There
1002	 * cannot be new dirty data in the pfn after the flush has completed as
1003	 * the pfn mappings are writeprotected and fault waits for mapping
1004	 * entry lock.
1005	 */
1006	xas_reset(xas);
1007	xas_lock_irq(xas);
1008	xas_store(xas, entry);
1009	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
1010	dax_wake_entry(xas, entry, WAKE_NEXT);
1011
1012	trace_dax_writeback_one(mapping->host, index, count);
1013	return ret;
1014
1015 put_unlocked:
1016	put_unlocked_entry(xas, entry, WAKE_NEXT);
 
1017	return ret;
1018}
1019
1020/*
1021 * Flush the mapping to the persistent domain within the byte range of [start,
1022 * end]. This is required by data integrity operations to ensure file data is
1023 * on persistent storage prior to completion of the operation.
1024 */
1025int dax_writeback_mapping_range(struct address_space *mapping,
1026		struct dax_device *dax_dev, struct writeback_control *wbc)
1027{
1028	XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
1029	struct inode *inode = mapping->host;
1030	pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
1031	void *entry;
1032	int ret = 0;
1033	unsigned int scanned = 0;
 
 
1034
1035	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1036		return -EIO;
1037
1038	if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
1039		return 0;
1040
1041	trace_dax_writeback_range(inode, xas.xa_index, end_index);
 
 
 
 
 
 
 
 
 
1042
1043	tag_pages_for_writeback(mapping, xas.xa_index, end_index);
 
 
 
 
1044
1045	xas_lock_irq(&xas);
1046	xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
1047		ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
1048		if (ret < 0) {
1049			mapping_set_error(mapping, ret);
1050			break;
1051		}
1052		if (++scanned % XA_CHECK_SCHED)
1053			continue;
1054
1055		xas_pause(&xas);
1056		xas_unlock_irq(&xas);
1057		cond_resched();
1058		xas_lock_irq(&xas);
 
 
 
 
 
 
 
 
 
 
1059	}
1060	xas_unlock_irq(&xas);
1061	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1062	return ret;
 
1063}
1064EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1065
1066static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
1067		size_t size, void **kaddr, pfn_t *pfnp)
1068{
1069	pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1070	int id, rc = 0;
 
 
 
 
 
 
 
 
1071	long length;
1072
 
 
 
1073	id = dax_read_lock();
1074	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1075				   DAX_ACCESS, kaddr, pfnp);
1076	if (length < 0) {
1077		rc = length;
1078		goto out;
1079	}
1080	if (!pfnp)
1081		goto out_check_addr;
1082	rc = -EINVAL;
1083	if (PFN_PHYS(length) < size)
1084		goto out;
1085	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1086		goto out;
1087	/* For larger pages we need devmap */
1088	if (length > 1 && !pfn_t_devmap(*pfnp))
1089		goto out;
1090	rc = 0;
1091
1092out_check_addr:
1093	if (!kaddr)
1094		goto out;
1095	if (!*kaddr)
1096		rc = -EFAULT;
1097out:
1098	dax_read_unlock(id);
1099	return rc;
1100}
1101
1102/**
1103 * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page
1104 * by copying the data before and after the range to be written.
1105 * @pos:	address to do copy from.
1106 * @length:	size of copy operation.
1107 * @align_size:	aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
1108 * @srcmap:	iomap srcmap
1109 * @daddr:	destination address to copy to.
1110 *
1111 * This can be called from two places. Either during DAX write fault (page
1112 * aligned), to copy the length size data to daddr. Or, while doing normal DAX
1113 * write operation, dax_iomap_iter() might call this to do the copy of either
1114 * start or end unaligned address. In the latter case the rest of the copy of
1115 * aligned ranges is taken care by dax_iomap_iter() itself.
1116 * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the
1117 * area to make sure no old data remains.
1118 */
1119static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size,
1120		const struct iomap *srcmap, void *daddr)
1121{
1122	loff_t head_off = pos & (align_size - 1);
1123	size_t size = ALIGN(head_off + length, align_size);
1124	loff_t end = pos + length;
1125	loff_t pg_end = round_up(end, align_size);
1126	/* copy_all is usually in page fault case */
1127	bool copy_all = head_off == 0 && end == pg_end;
1128	/* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */
1129	bool zero_edge = srcmap->flags & IOMAP_F_SHARED ||
1130			 srcmap->type == IOMAP_UNWRITTEN;
1131	void *saddr = NULL;
1132	int ret = 0;
1133
1134	if (!zero_edge) {
1135		ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
1136		if (ret)
1137			return dax_mem2blk_err(ret);
1138	}
1139
1140	if (copy_all) {
1141		if (zero_edge)
1142			memset(daddr, 0, size);
1143		else
1144			ret = copy_mc_to_kernel(daddr, saddr, length);
1145		goto out;
1146	}
1147
1148	/* Copy the head part of the range */
1149	if (head_off) {
1150		if (zero_edge)
1151			memset(daddr, 0, head_off);
1152		else {
1153			ret = copy_mc_to_kernel(daddr, saddr, head_off);
1154			if (ret)
1155				return -EIO;
1156		}
1157	}
1158
1159	/* Copy the tail part of the range */
1160	if (end < pg_end) {
1161		loff_t tail_off = head_off + length;
1162		loff_t tail_len = pg_end - end;
1163
1164		if (zero_edge)
1165			memset(daddr + tail_off, 0, tail_len);
1166		else {
1167			ret = copy_mc_to_kernel(daddr + tail_off,
1168						saddr + tail_off, tail_len);
1169			if (ret)
1170				return -EIO;
1171		}
1172	}
1173out:
1174	if (zero_edge)
1175		dax_flush(srcmap->dax_dev, daddr, size);
1176	return ret ? -EIO : 0;
1177}
1178
1179/*
1180 * The user has performed a load from a hole in the file.  Allocating a new
1181 * page in the file would cause excessive storage usage for workloads with
1182 * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1183 * If this page is ever written to we will re-fault and change the mapping to
1184 * point to real DAX storage instead.
1185 */
1186static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1187		const struct iomap_iter *iter, void **entry)
1188{
1189	struct inode *inode = iter->inode;
1190	unsigned long vaddr = vmf->address;
1191	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1192	vm_fault_t ret;
1193
1194	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
1195
1196	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1197	trace_dax_load_hole(inode, vmf, ret);
1198	return ret;
1199}
1200
1201#ifdef CONFIG_FS_DAX_PMD
1202static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1203		const struct iomap_iter *iter, void **entry)
1204{
1205	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1206	unsigned long pmd_addr = vmf->address & PMD_MASK;
1207	struct vm_area_struct *vma = vmf->vma;
1208	struct inode *inode = mapping->host;
1209	pgtable_t pgtable = NULL;
 
1210	struct page *zero_page;
1211	spinlock_t *ptl;
1212	pmd_t pmd_entry;
1213	pfn_t pfn;
1214
1215	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1216
1217	if (unlikely(!zero_page))
1218		goto fallback;
1219
1220	pfn = page_to_pfn_t(zero_page);
1221	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
1222				  DAX_PMD | DAX_ZERO_PAGE);
1223
1224	if (arch_needs_pgtable_deposit()) {
1225		pgtable = pte_alloc_one(vma->vm_mm);
1226		if (!pgtable)
1227			return VM_FAULT_OOM;
1228	}
1229
1230	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1231	if (!pmd_none(*(vmf->pmd))) {
1232		spin_unlock(ptl);
1233		goto fallback;
1234	}
1235
1236	if (pgtable) {
1237		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1238		mm_inc_nr_ptes(vma->vm_mm);
1239	}
1240	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1241	pmd_entry = pmd_mkhuge(pmd_entry);
1242	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1243	spin_unlock(ptl);
1244	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1245	return VM_FAULT_NOPAGE;
1246
1247fallback:
1248	if (pgtable)
1249		pte_free(vma->vm_mm, pgtable);
1250	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1251	return VM_FAULT_FALLBACK;
1252}
1253#else
1254static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1255		const struct iomap_iter *iter, void **entry)
1256{
1257	return VM_FAULT_FALLBACK;
1258}
1259#endif /* CONFIG_FS_DAX_PMD */
1260
1261static s64 dax_unshare_iter(struct iomap_iter *iter)
1262{
1263	struct iomap *iomap = &iter->iomap;
1264	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1265	loff_t pos = iter->pos;
1266	loff_t length = iomap_length(iter);
1267	int id = 0;
1268	s64 ret = 0;
1269	void *daddr = NULL, *saddr = NULL;
1270
1271	/* don't bother with blocks that are not shared to start with */
1272	if (!(iomap->flags & IOMAP_F_SHARED))
1273		return length;
1274
1275	id = dax_read_lock();
1276	ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
1277	if (ret < 0)
1278		goto out_unlock;
1279
1280	/* zero the distance if srcmap is HOLE or UNWRITTEN */
1281	if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) {
1282		memset(daddr, 0, length);
1283		dax_flush(iomap->dax_dev, daddr, length);
1284		ret = length;
1285		goto out_unlock;
1286	}
1287
1288	ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
1289	if (ret < 0)
1290		goto out_unlock;
1291
1292	if (copy_mc_to_kernel(daddr, saddr, length) == 0)
1293		ret = length;
1294	else
1295		ret = -EIO;
1296
1297out_unlock:
1298	dax_read_unlock(id);
1299	return dax_mem2blk_err(ret);
1300}
1301
1302int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1303		const struct iomap_ops *ops)
1304{
1305	struct iomap_iter iter = {
1306		.inode		= inode,
1307		.pos		= pos,
1308		.len		= len,
1309		.flags		= IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
1310	};
1311	int ret;
1312
1313	while ((ret = iomap_iter(&iter, ops)) > 0)
1314		iter.processed = dax_unshare_iter(&iter);
1315	return ret;
1316}
1317EXPORT_SYMBOL_GPL(dax_file_unshare);
1318
1319static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
 
1320{
1321	const struct iomap *iomap = &iter->iomap;
1322	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1323	unsigned offset = offset_in_page(pos);
1324	pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1325	void *kaddr;
1326	long ret;
1327
1328	ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
1329				NULL);
1330	if (ret < 0)
1331		return dax_mem2blk_err(ret);
1332
1333	memset(kaddr + offset, 0, size);
1334	if (iomap->flags & IOMAP_F_SHARED)
1335		ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap,
1336					    kaddr);
1337	else
1338		dax_flush(iomap->dax_dev, kaddr + offset, size);
1339	return ret;
1340}
1341
1342static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
 
 
1343{
1344	const struct iomap *iomap = &iter->iomap;
1345	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1346	loff_t pos = iter->pos;
1347	u64 length = iomap_length(iter);
1348	s64 written = 0;
1349
1350	/* already zeroed?  we're done. */
1351	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1352		return length;
1353
1354	/*
1355	 * invalidate the pages whose sharing state is to be changed
1356	 * because of CoW.
1357	 */
1358	if (iomap->flags & IOMAP_F_SHARED)
1359		invalidate_inode_pages2_range(iter->inode->i_mapping,
1360					      pos >> PAGE_SHIFT,
1361					      (pos + length - 1) >> PAGE_SHIFT);
1362
1363	do {
1364		unsigned offset = offset_in_page(pos);
1365		unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1366		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1367		long rc;
1368		int id;
1369
1370		id = dax_read_lock();
1371		if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
1372			rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
1373		else
1374			rc = dax_memzero(iter, pos, size);
1375		dax_read_unlock(id);
1376
1377		if (rc < 0)
1378			return rc;
1379		pos += size;
1380		length -= size;
1381		written += size;
1382	} while (length > 0);
1383
1384	if (did_zero)
1385		*did_zero = true;
1386	return written;
1387}
1388
1389int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1390		const struct iomap_ops *ops)
1391{
1392	struct iomap_iter iter = {
1393		.inode		= inode,
1394		.pos		= pos,
1395		.len		= len,
1396		.flags		= IOMAP_DAX | IOMAP_ZERO,
1397	};
1398	int ret;
1399
1400	while ((ret = iomap_iter(&iter, ops)) > 0)
1401		iter.processed = dax_zero_iter(&iter, did_zero);
1402	return ret;
1403}
1404EXPORT_SYMBOL_GPL(dax_zero_range);
1405
1406int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1407		const struct iomap_ops *ops)
1408{
1409	unsigned int blocksize = i_blocksize(inode);
1410	unsigned int off = pos & (blocksize - 1);
1411
1412	/* Block boundary? Nothing to do */
1413	if (!off)
1414		return 0;
1415	return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
1416}
1417EXPORT_SYMBOL_GPL(dax_truncate_page);
1418
1419static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1420		struct iov_iter *iter)
 
1421{
1422	const struct iomap *iomap = &iomi->iomap;
1423	const struct iomap *srcmap = iomap_iter_srcmap(iomi);
1424	loff_t length = iomap_length(iomi);
1425	loff_t pos = iomi->pos;
1426	struct dax_device *dax_dev = iomap->dax_dev;
 
1427	loff_t end = pos + length, done = 0;
1428	bool write = iov_iter_rw(iter) == WRITE;
1429	bool cow = write && iomap->flags & IOMAP_F_SHARED;
1430	ssize_t ret = 0;
1431	size_t xfer;
1432	int id;
1433
1434	if (!write) {
1435		end = min(end, i_size_read(iomi->inode));
1436		if (pos >= end)
1437			return 0;
1438
1439		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1440			return iov_iter_zero(min(length, end - pos), iter);
1441	}
1442
1443	/*
1444	 * In DAX mode, enforce either pure overwrites of written extents, or
1445	 * writes to unwritten extents as part of a copy-on-write operation.
1446	 */
1447	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
1448			!(iomap->flags & IOMAP_F_SHARED)))
1449		return -EIO;
1450
1451	/*
1452	 * Write can allocate block for an area which has a hole page mapped
1453	 * into page tables. We have to tear down these mappings so that data
1454	 * written by write(2) is visible in mmap.
1455	 */
1456	if (iomap->flags & IOMAP_F_NEW || cow) {
1457		/*
1458		 * Filesystem allows CoW on non-shared extents. The src extents
1459		 * may have been mmapped with dirty mark before. To be able to
1460		 * invalidate its dax entries, we need to clear the dirty mark
1461		 * in advance.
1462		 */
1463		if (cow)
1464			__dax_clear_dirty_range(iomi->inode->i_mapping,
1465						pos >> PAGE_SHIFT,
1466						(end - 1) >> PAGE_SHIFT);
1467		invalidate_inode_pages2_range(iomi->inode->i_mapping,
1468					      pos >> PAGE_SHIFT,
1469					      (end - 1) >> PAGE_SHIFT);
1470	}
1471
1472	id = dax_read_lock();
1473	while (pos < end) {
1474		unsigned offset = pos & (PAGE_SIZE - 1);
1475		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1476		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1477		ssize_t map_len;
1478		bool recovery = false;
1479		void *kaddr;
 
1480
1481		if (fatal_signal_pending(current)) {
1482			ret = -EINTR;
1483			break;
1484		}
1485
 
 
 
 
1486		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1487				DAX_ACCESS, &kaddr, NULL);
1488		if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) {
1489			map_len = dax_direct_access(dax_dev, pgoff,
1490					PHYS_PFN(size), DAX_RECOVERY_WRITE,
1491					&kaddr, NULL);
1492			if (map_len > 0)
1493				recovery = true;
1494		}
1495		if (map_len < 0) {
1496			ret = dax_mem2blk_err(map_len);
1497			break;
1498		}
1499
1500		if (cow) {
1501			ret = dax_iomap_copy_around(pos, length, PAGE_SIZE,
1502						    srcmap, kaddr);
1503			if (ret)
1504				break;
1505		}
1506
1507		map_len = PFN_PHYS(map_len);
1508		kaddr += offset;
1509		map_len -= offset;
1510		if (map_len > end - pos)
1511			map_len = end - pos;
1512
1513		if (recovery)
1514			xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
1515					map_len, iter);
1516		else if (write)
1517			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
 
 
1518					map_len, iter);
1519		else
1520			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1521					map_len, iter);
1522
1523		pos += xfer;
1524		length -= xfer;
1525		done += xfer;
1526
1527		if (xfer == 0)
1528			ret = -EFAULT;
1529		if (xfer < map_len)
1530			break;
 
 
 
 
 
1531	}
1532	dax_read_unlock(id);
1533
1534	return done ? done : ret;
1535}
1536
1537/**
1538 * dax_iomap_rw - Perform I/O to a DAX file
1539 * @iocb:	The control block for this I/O
1540 * @iter:	The addresses to do I/O from or to
1541 * @ops:	iomap ops passed from the file system
1542 *
1543 * This function performs read and write operations to directly mapped
1544 * persistent memory.  The callers needs to take care of read/write exclusion
1545 * and evicting any page cache pages in the region under I/O.
1546 */
1547ssize_t
1548dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1549		const struct iomap_ops *ops)
1550{
1551	struct iomap_iter iomi = {
1552		.inode		= iocb->ki_filp->f_mapping->host,
1553		.pos		= iocb->ki_pos,
1554		.len		= iov_iter_count(iter),
1555		.flags		= IOMAP_DAX,
1556	};
1557	loff_t done = 0;
1558	int ret;
1559
1560	if (!iomi.len)
1561		return 0;
1562
1563	if (iov_iter_rw(iter) == WRITE) {
1564		lockdep_assert_held_write(&iomi.inode->i_rwsem);
1565		iomi.flags |= IOMAP_WRITE;
1566	} else {
1567		lockdep_assert_held(&iomi.inode->i_rwsem);
1568	}
1569
1570	if (iocb->ki_flags & IOCB_NOWAIT)
1571		iomi.flags |= IOMAP_NOWAIT;
1572
1573	while ((ret = iomap_iter(&iomi, ops)) > 0)
1574		iomi.processed = dax_iomap_iter(&iomi, iter);
 
 
 
1575
1576	done = iomi.pos - iocb->ki_pos;
1577	iocb->ki_pos = iomi.pos;
1578	return done ? done : ret;
1579}
1580EXPORT_SYMBOL_GPL(dax_iomap_rw);
1581
1582static vm_fault_t dax_fault_return(int error)
1583{
1584	if (error == 0)
1585		return VM_FAULT_NOPAGE;
1586	return vmf_error(error);
 
 
1587}
1588
1589/*
1590 * When handling a synchronous page fault and the inode need a fsync, we can
1591 * insert the PTE/PMD into page tables only after that fsync happened. Skip
1592 * insertion for now and return the pfn so that caller can insert it after the
1593 * fsync is done.
1594 */
1595static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
 
1596{
1597	if (WARN_ON_ONCE(!pfnp))
1598		return VM_FAULT_SIGBUS;
1599	*pfnp = pfn;
1600	return VM_FAULT_NEEDDSYNC;
1601}
1602
1603static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1604		const struct iomap_iter *iter)
1605{
1606	vm_fault_t ret;
1607	int error = 0;
1608
1609	switch (iter->iomap.type) {
1610	case IOMAP_HOLE:
1611	case IOMAP_UNWRITTEN:
1612		clear_user_highpage(vmf->cow_page, vmf->address);
1613		break;
1614	case IOMAP_MAPPED:
1615		error = copy_cow_page_dax(vmf, iter);
1616		break;
1617	default:
1618		WARN_ON_ONCE(1);
1619		error = -EIO;
1620		break;
1621	}
1622
1623	if (error)
1624		return dax_fault_return(error);
1625
1626	__SetPageUptodate(vmf->cow_page);
1627	ret = finish_fault(vmf);
1628	if (!ret)
1629		return VM_FAULT_DONE_COW;
1630	return ret;
1631}
1632
1633/**
1634 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
1635 * @vmf:	vm fault instance
1636 * @iter:	iomap iter
1637 * @pfnp:	pfn to be returned
1638 * @xas:	the dax mapping tree of a file
1639 * @entry:	an unlocked dax entry to be inserted
1640 * @pmd:	distinguish whether it is a pmd fault
1641 */
1642static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1643		const struct iomap_iter *iter, pfn_t *pfnp,
1644		struct xa_state *xas, void **entry, bool pmd)
1645{
1646	const struct iomap *iomap = &iter->iomap;
1647	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1648	size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1649	loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1650	bool write = iter->flags & IOMAP_WRITE;
1651	unsigned long entry_flags = pmd ? DAX_PMD : 0;
1652	int err = 0;
1653	pfn_t pfn;
1654	void *kaddr;
1655
1656	if (!pmd && vmf->cow_page)
1657		return dax_fault_cow_page(vmf, iter);
1658
1659	/* if we are reading UNWRITTEN and HOLE, return a hole. */
1660	if (!write &&
1661	    (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1662		if (!pmd)
1663			return dax_load_hole(xas, vmf, iter, entry);
1664		return dax_pmd_load_hole(xas, vmf, iter, entry);
1665	}
1666
1667	if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
1668		WARN_ON_ONCE(1);
1669		return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1670	}
1671
1672	err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
1673	if (err)
1674		return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1675
1676	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
1677
1678	if (write && iomap->flags & IOMAP_F_SHARED) {
1679		err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr);
1680		if (err)
1681			return dax_fault_return(err);
1682	}
1683
1684	if (dax_fault_is_synchronous(iter, vmf->vma))
1685		return dax_fault_synchronous_pfnp(pfnp, pfn);
1686
1687	/* insert PMD pfn */
1688	if (pmd)
1689		return vmf_insert_pfn_pmd(vmf, pfn, write);
1690
1691	/* insert PTE pfn */
1692	if (write)
1693		return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1694	return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1695}
1696
1697static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1698			       int *iomap_errp, const struct iomap_ops *ops)
1699{
1700	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1701	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1702	struct iomap_iter iter = {
1703		.inode		= mapping->host,
1704		.pos		= (loff_t)vmf->pgoff << PAGE_SHIFT,
1705		.len		= PAGE_SIZE,
1706		.flags		= IOMAP_DAX | IOMAP_FAULT,
1707	};
1708	vm_fault_t ret = 0;
 
 
1709	void *entry;
1710	int error;
1711
1712	trace_dax_pte_fault(iter.inode, vmf, ret);
1713	/*
1714	 * Check whether offset isn't beyond end of file now. Caller is supposed
1715	 * to hold locks serializing us with truncate / punch hole so this is
1716	 * a reliable test.
1717	 */
1718	if (iter.pos >= i_size_read(iter.inode)) {
1719		ret = VM_FAULT_SIGBUS;
1720		goto out;
1721	}
1722
1723	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1724		iter.flags |= IOMAP_WRITE;
1725
1726	entry = grab_mapping_entry(&xas, mapping, 0);
1727	if (xa_is_internal(entry)) {
1728		ret = xa_to_internal(entry);
1729		goto out;
1730	}
1731
1732	/*
1733	 * It is possible, particularly with mixed reads & writes to private
1734	 * mappings, that we have raced with a PMD fault that overlaps with
1735	 * the PTE we need to set up.  If so just return and the fault will be
1736	 * retried.
1737	 */
1738	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1739		ret = VM_FAULT_NOPAGE;
1740		goto unlock_entry;
1741	}
1742
1743	while ((error = iomap_iter(&iter, ops)) > 0) {
1744		if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
1745			iter.processed = -EIO;	/* fs corruption? */
1746			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1747		}
1748
1749		ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1750		if (ret != VM_FAULT_SIGBUS &&
1751		    (iter.iomap.flags & IOMAP_F_NEW)) {
 
 
 
 
 
 
 
 
 
 
 
 
1752			count_vm_event(PGMAJFAULT);
1753			count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1754			ret |= VM_FAULT_MAJOR;
 
 
 
 
 
 
 
 
 
 
1755		}
1756
1757		if (!(ret & VM_FAULT_ERROR))
1758			iter.processed = PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1759	}
1760
1761	if (iomap_errp)
1762		*iomap_errp = error;
1763	if (!ret && error)
1764		ret = dax_fault_return(error);
 
1765
1766unlock_entry:
1767	dax_unlock_entry(&xas, entry);
1768out:
1769	trace_dax_pte_fault_done(iter.inode, vmf, ret);
1770	return ret;
 
 
 
 
 
 
 
 
 
 
1771}
1772
1773#ifdef CONFIG_FS_DAX_PMD
1774static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1775		pgoff_t max_pgoff)
1776{
 
1777	unsigned long pmd_addr = vmf->address & PMD_MASK;
1778	bool write = vmf->flags & FAULT_FLAG_WRITE;
 
 
 
 
 
1779
1780	/*
1781	 * Make sure that the faulting address's PMD offset (color) matches
1782	 * the PMD offset from the start of the file.  This is necessary so
1783	 * that a PMD range in the page table overlaps exactly with a PMD
1784	 * range in the page cache.
1785	 */
1786	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1787	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1788		return true;
1789
1790	/* Fall back to PTEs if we're going to COW */
1791	if (write && !(vmf->vma->vm_flags & VM_SHARED))
1792		return true;
1793
1794	/* If the PMD would extend outside the VMA */
1795	if (pmd_addr < vmf->vma->vm_start)
1796		return true;
1797	if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1798		return true;
1799
1800	/* If the PMD would extend beyond the file size */
1801	if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1802		return true;
 
 
 
 
 
 
 
 
 
1803
1804	return false;
 
 
1805}
1806
1807static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1808			       const struct iomap_ops *ops)
1809{
1810	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1811	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1812	struct iomap_iter iter = {
1813		.inode		= mapping->host,
1814		.len		= PMD_SIZE,
1815		.flags		= IOMAP_DAX | IOMAP_FAULT,
1816	};
1817	vm_fault_t ret = VM_FAULT_FALLBACK;
1818	pgoff_t max_pgoff;
 
1819	void *entry;
1820
1821	if (vmf->flags & FAULT_FLAG_WRITE)
1822		iter.flags |= IOMAP_WRITE;
1823
1824	/*
1825	 * Check whether offset isn't beyond end of file now. Caller is
1826	 * supposed to hold locks serializing us with truncate / punch hole so
1827	 * this is a reliable test.
1828	 */
1829	max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
 
1830
1831	trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
1832
1833	if (xas.xa_index >= max_pgoff) {
1834		ret = VM_FAULT_SIGBUS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1835		goto out;
1836	}
1837
1838	if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
 
1839		goto fallback;
1840
1841	/*
1842	 * grab_mapping_entry() will make sure we get an empty PMD entry,
1843	 * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1844	 * entry is already in the array, for instance), it will return
1845	 * VM_FAULT_FALLBACK.
1846	 */
1847	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1848	if (xa_is_internal(entry)) {
1849		ret = xa_to_internal(entry);
1850		goto fallback;
1851	}
1852
1853	/*
1854	 * It is possible, particularly with mixed reads & writes to private
1855	 * mappings, that we have raced with a PTE fault that overlaps with
1856	 * the PMD we need to set up.  If so just return and the fault will be
1857	 * retried.
1858	 */
1859	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1860			!pmd_devmap(*vmf->pmd)) {
1861		ret = 0;
1862		goto unlock_entry;
1863	}
1864
1865	iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1866	while (iomap_iter(&iter, ops) > 0) {
1867		if (iomap_length(&iter) < PMD_SIZE)
1868			continue; /* actually breaks out of the loop */
1869
1870		ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1871		if (ret != VM_FAULT_FALLBACK)
1872			iter.processed = PMD_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1873	}
1874
1875unlock_entry:
1876	dax_unlock_entry(&xas, entry);
1877fallback:
1878	if (ret == VM_FAULT_FALLBACK) {
1879		split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1880		count_vm_event(THP_FAULT_FALLBACK);
1881	}
1882out:
1883	trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
1884	return ret;
1885}
1886#else
1887static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1888			       const struct iomap_ops *ops)
1889{
1890	return VM_FAULT_FALLBACK;
1891}
1892#endif /* CONFIG_FS_DAX_PMD */
1893
1894/**
1895 * dax_iomap_fault - handle a page fault on a DAX file
1896 * @vmf: The description of the fault
1897 * @order: Order of the page to fault in
1898 * @pfnp: PFN to insert for synchronous faults if fsync is required
1899 * @iomap_errp: Storage for detailed error code in case of error
1900 * @ops: Iomap ops passed from the file system
1901 *
1902 * When a page fault occurs, filesystems may call this helper in
1903 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1904 * has done all the necessary locking for page fault to proceed
1905 * successfully.
1906 */
1907vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
1908		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1909{
1910	if (order == 0)
 
1911		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1912	else if (order == PMD_ORDER)
1913		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1914	else
1915		return VM_FAULT_FALLBACK;
 
1916}
1917EXPORT_SYMBOL_GPL(dax_iomap_fault);
1918
1919/*
1920 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1921 * @vmf: The description of the fault
 
1922 * @pfn: PFN to insert
1923 * @order: Order of entry to insert.
1924 *
1925 * This function inserts a writeable PTE or PMD entry into the page tables
1926 * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1927 */
1928static vm_fault_t
1929dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
 
 
1930{
1931	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1932	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1933	void *entry;
1934	vm_fault_t ret;
1935
1936	xas_lock_irq(&xas);
1937	entry = get_unlocked_entry(&xas, order);
1938	/* Did we race with someone splitting entry or so? */
1939	if (!entry || dax_is_conflict(entry) ||
1940	    (order == 0 && !dax_is_pte_entry(entry))) {
1941		put_unlocked_entry(&xas, entry, WAKE_NEXT);
1942		xas_unlock_irq(&xas);
 
1943		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1944						      VM_FAULT_NOPAGE);
1945		return VM_FAULT_NOPAGE;
1946	}
1947	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1948	dax_lock_entry(&xas, entry);
1949	xas_unlock_irq(&xas);
1950	if (order == 0)
1951		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
 
 
 
1952#ifdef CONFIG_FS_DAX_PMD
1953	else if (order == PMD_ORDER)
1954		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
 
 
1955#endif
1956	else
1957		ret = VM_FAULT_FALLBACK;
1958	dax_unlock_entry(&xas, entry);
1959	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1960	return ret;
 
1961}
1962
1963/**
1964 * dax_finish_sync_fault - finish synchronous page fault
1965 * @vmf: The description of the fault
1966 * @order: Order of entry to be inserted
1967 * @pfn: PFN to insert
1968 *
1969 * This function ensures that the file range touched by the page fault is
1970 * stored persistently on the media and handles inserting of appropriate page
1971 * table entry.
1972 */
1973vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
1974		pfn_t pfn)
1975{
1976	int err;
1977	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1978	size_t len = PAGE_SIZE << order;
1979
 
 
 
 
 
 
1980	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1981	if (err)
1982		return VM_FAULT_SIGBUS;
1983	return dax_insert_pfn_mkwrite(vmf, pfn, order);
1984}
1985EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1986
1987static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
1988		struct iomap_iter *it_dest, u64 len, bool *same)
1989{
1990	const struct iomap *smap = &it_src->iomap;
1991	const struct iomap *dmap = &it_dest->iomap;
1992	loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
1993	void *saddr, *daddr;
1994	int id, ret;
1995
1996	len = min(len, min(smap->length, dmap->length));
1997
1998	if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
1999		*same = true;
2000		return len;
2001	}
2002
2003	if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
2004		*same = false;
2005		return 0;
2006	}
2007
2008	id = dax_read_lock();
2009	ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
2010				      &saddr, NULL);
2011	if (ret < 0)
2012		goto out_unlock;
2013
2014	ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
2015				      &daddr, NULL);
2016	if (ret < 0)
2017		goto out_unlock;
2018
2019	*same = !memcmp(saddr, daddr, len);
2020	if (!*same)
2021		len = 0;
2022	dax_read_unlock(id);
2023	return len;
2024
2025out_unlock:
2026	dax_read_unlock(id);
2027	return -EIO;
2028}
2029
2030int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
2031		struct inode *dst, loff_t dstoff, loff_t len, bool *same,
2032		const struct iomap_ops *ops)
2033{
2034	struct iomap_iter src_iter = {
2035		.inode		= src,
2036		.pos		= srcoff,
2037		.len		= len,
2038		.flags		= IOMAP_DAX,
2039	};
2040	struct iomap_iter dst_iter = {
2041		.inode		= dst,
2042		.pos		= dstoff,
2043		.len		= len,
2044		.flags		= IOMAP_DAX,
2045	};
2046	int ret, compared = 0;
2047
2048	while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
2049	       (ret = iomap_iter(&dst_iter, ops)) > 0) {
2050		compared = dax_range_compare_iter(&src_iter, &dst_iter,
2051				min(src_iter.len, dst_iter.len), same);
2052		if (compared < 0)
2053			return ret;
2054		src_iter.processed = dst_iter.processed = compared;
2055	}
2056	return ret;
2057}
2058
2059int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
2060			      struct file *file_out, loff_t pos_out,
2061			      loff_t *len, unsigned int remap_flags,
2062			      const struct iomap_ops *ops)
2063{
2064	return __generic_remap_file_range_prep(file_in, pos_in, file_out,
2065					       pos_out, len, remap_flags, ops);
2066}
2067EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);
v4.17
 
   1/*
   2 * fs/dax.c - Direct Access filesystem code
   3 * Copyright (c) 2013-2014 Intel Corporation
   4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 */
  16
  17#include <linux/atomic.h>
  18#include <linux/blkdev.h>
  19#include <linux/buffer_head.h>
  20#include <linux/dax.h>
  21#include <linux/fs.h>
  22#include <linux/genhd.h>
  23#include <linux/highmem.h>
  24#include <linux/memcontrol.h>
  25#include <linux/mm.h>
  26#include <linux/mutex.h>
  27#include <linux/pagevec.h>
  28#include <linux/sched.h>
  29#include <linux/sched/signal.h>
  30#include <linux/uio.h>
  31#include <linux/vmstat.h>
  32#include <linux/pfn_t.h>
  33#include <linux/sizes.h>
  34#include <linux/mmu_notifier.h>
  35#include <linux/iomap.h>
  36#include "internal.h"
 
  37
  38#define CREATE_TRACE_POINTS
  39#include <trace/events/fs_dax.h>
  40
  41/* We choose 4096 entries - same as per-zone page wait tables */
  42#define DAX_WAIT_TABLE_BITS 12
  43#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
  44
  45/* The 'colour' (ie low bits) within a PMD of a page offset.  */
  46#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
  47#define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
  48
  49static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
  50
  51static int __init init_dax_wait_table(void)
  52{
  53	int i;
  54
  55	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
  56		init_waitqueue_head(wait_table + i);
  57	return 0;
  58}
  59fs_initcall(init_dax_wait_table);
  60
  61/*
  62 * We use lowest available bit in exceptional entry for locking, one bit for
  63 * the entry size (PMD) and two more to tell us if the entry is a zero page or
  64 * an empty entry that is just used for locking.  In total four special bits.
 
  65 *
  66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
  67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
  68 * block allocation.
  69 */
  70#define RADIX_DAX_SHIFT		(RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
  71#define RADIX_DAX_ENTRY_LOCK	(1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
  72#define RADIX_DAX_PMD		(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
  73#define RADIX_DAX_ZERO_PAGE	(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
  74#define RADIX_DAX_EMPTY		(1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
 
 
 
 
 
  75
  76static unsigned long dax_radix_pfn(void *entry)
  77{
  78	return (unsigned long)entry >> RADIX_DAX_SHIFT;
  79}
  80
  81static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
  82{
  83	return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
  84			(pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
  85}
  86
  87static unsigned int dax_radix_order(void *entry)
  88{
  89	if ((unsigned long)entry & RADIX_DAX_PMD)
  90		return PMD_SHIFT - PAGE_SHIFT;
  91	return 0;
  92}
  93
  94static int dax_is_pmd_entry(void *entry)
  95{
  96	return (unsigned long)entry & RADIX_DAX_PMD;
  97}
  98
  99static int dax_is_pte_entry(void *entry)
 100{
 101	return !((unsigned long)entry & RADIX_DAX_PMD);
 102}
 103
 104static int dax_is_zero_entry(void *entry)
 105{
 106	return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
 107}
 108
 109static int dax_is_empty_entry(void *entry)
 110{
 111	return (unsigned long)entry & RADIX_DAX_EMPTY;
 112}
 113
 114/*
 115 * DAX radix tree locking
 
 
 
 
 
 
 
 
 
 116 */
 117struct exceptional_entry_key {
 118	struct address_space *mapping;
 119	pgoff_t entry_start;
 120};
 121
 122struct wait_exceptional_entry_queue {
 123	wait_queue_entry_t wait;
 124	struct exceptional_entry_key key;
 125};
 126
 127static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
 128		pgoff_t index, void *entry, struct exceptional_entry_key *key)
 
 
 
 
 
 
 
 
 
 
 129{
 130	unsigned long hash;
 
 131
 132	/*
 133	 * If 'entry' is a PMD, align the 'index' that we use for the wait
 134	 * queue to the start of that PMD.  This ensures that all offsets in
 135	 * the range covered by the PMD map to the same bit lock.
 136	 */
 137	if (dax_is_pmd_entry(entry))
 138		index &= ~PG_PMD_COLOUR;
 139
 140	key->mapping = mapping;
 141	key->entry_start = index;
 142
 143	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
 144	return wait_table + hash;
 145}
 146
 147static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
 148				       int sync, void *keyp)
 149{
 150	struct exceptional_entry_key *key = keyp;
 151	struct wait_exceptional_entry_queue *ewait =
 152		container_of(wait, struct wait_exceptional_entry_queue, wait);
 153
 154	if (key->mapping != ewait->key.mapping ||
 155	    key->entry_start != ewait->key.entry_start)
 156		return 0;
 157	return autoremove_wake_function(wait, mode, sync, NULL);
 158}
 159
 160/*
 161 * @entry may no longer be the entry at the index in the mapping.
 162 * The important information it's conveying is whether the entry at
 163 * this index used to be a PMD entry.
 164 */
 165static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
 166		pgoff_t index, void *entry, bool wake_all)
 167{
 168	struct exceptional_entry_key key;
 169	wait_queue_head_t *wq;
 170
 171	wq = dax_entry_waitqueue(mapping, index, entry, &key);
 172
 173	/*
 174	 * Checking for locked entry and prepare_to_wait_exclusive() happens
 175	 * under the i_pages lock, ditto for entry handling in our callers.
 176	 * So at this point all tasks that could have seen our entry locked
 177	 * must be in the waitqueue and the following check will see them.
 178	 */
 179	if (waitqueue_active(wq))
 180		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 181}
 182
 183/*
 184 * Check whether the given slot is locked.  Must be called with the i_pages
 185 * lock held.
 186 */
 187static inline int slot_locked(struct address_space *mapping, void **slot)
 188{
 189	unsigned long entry = (unsigned long)
 190		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
 191	return entry & RADIX_DAX_ENTRY_LOCK;
 192}
 193
 194/*
 195 * Mark the given slot as locked.  Must be called with the i_pages lock held.
 196 */
 197static inline void *lock_slot(struct address_space *mapping, void **slot)
 198{
 199	unsigned long entry = (unsigned long)
 200		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
 201
 202	entry |= RADIX_DAX_ENTRY_LOCK;
 203	radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
 204	return (void *)entry;
 205}
 206
 207/*
 208 * Mark the given slot as unlocked.  Must be called with the i_pages lock held.
 209 */
 210static inline void *unlock_slot(struct address_space *mapping, void **slot)
 211{
 212	unsigned long entry = (unsigned long)
 213		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
 214
 215	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
 216	radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
 217	return (void *)entry;
 218}
 219
 220/*
 221 * Lookup entry in radix tree, wait for it to become unlocked if it is
 222 * exceptional entry and return it. The caller must call
 223 * put_unlocked_mapping_entry() when he decided not to lock the entry or
 224 * put_locked_mapping_entry() when he locked the entry and now wants to
 225 * unlock it.
 226 *
 227 * Must be called with the i_pages lock held.
 228 */
 229static void *get_unlocked_mapping_entry(struct address_space *mapping,
 230					pgoff_t index, void ***slotp)
 231{
 232	void *entry, **slot;
 233	struct wait_exceptional_entry_queue ewait;
 234	wait_queue_head_t *wq;
 235
 236	init_wait(&ewait.wait);
 237	ewait.wait.func = wake_exceptional_entry_func;
 238
 239	for (;;) {
 240		entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
 241					  &slot);
 242		if (!entry ||
 243		    WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
 244		    !slot_locked(mapping, slot)) {
 245			if (slotp)
 246				*slotp = slot;
 247			return entry;
 248		}
 249
 250		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
 251		prepare_to_wait_exclusive(wq, &ewait.wait,
 252					  TASK_UNINTERRUPTIBLE);
 253		xa_unlock_irq(&mapping->i_pages);
 
 254		schedule();
 255		finish_wait(wq, &ewait.wait);
 256		xa_lock_irq(&mapping->i_pages);
 257	}
 258}
 259
 260static void dax_unlock_mapping_entry(struct address_space *mapping,
 261				     pgoff_t index)
 
 
 
 
 262{
 263	void *entry, **slot;
 
 
 
 
 264
 265	xa_lock_irq(&mapping->i_pages);
 266	entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
 267	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
 268			 !slot_locked(mapping, slot))) {
 269		xa_unlock_irq(&mapping->i_pages);
 270		return;
 271	}
 272	unlock_slot(mapping, slot);
 273	xa_unlock_irq(&mapping->i_pages);
 274	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 
 275}
 276
 277static void put_locked_mapping_entry(struct address_space *mapping,
 278		pgoff_t index)
 279{
 280	dax_unlock_mapping_entry(mapping, index);
 
 281}
 282
 283/*
 284 * Called when we are done with radix tree entry we looked up via
 285 * get_unlocked_mapping_entry() and which we didn't lock in the end.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 286 */
 287static void put_unlocked_mapping_entry(struct address_space *mapping,
 288				       pgoff_t index, void *entry)
 289{
 290	if (!entry)
 291		return;
 292
 293	/* We have to wake up next waiter for the radix tree entry lock */
 294	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 295}
 296
 297static unsigned long dax_entry_size(void *entry)
 298{
 299	if (dax_is_zero_entry(entry))
 300		return 0;
 301	else if (dax_is_empty_entry(entry))
 302		return 0;
 303	else if (dax_is_pmd_entry(entry))
 304		return PMD_SIZE;
 305	else
 306		return PAGE_SIZE;
 307}
 308
 309static unsigned long dax_radix_end_pfn(void *entry)
 310{
 311	return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
 312}
 313
 314/*
 315 * Iterate through all mapped pfns represented by an entry, i.e. skip
 316 * 'empty' and 'zero' entries.
 317 */
 318#define for_each_mapped_pfn(entry, pfn) \
 319	for (pfn = dax_radix_pfn(entry); \
 320			pfn < dax_radix_end_pfn(entry); pfn++)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 321
 322static void dax_associate_entry(void *entry, struct address_space *mapping)
 323{
 324	unsigned long pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 325
 326	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 327		return;
 328
 
 329	for_each_mapped_pfn(entry, pfn) {
 330		struct page *page = pfn_to_page(pfn);
 331
 332		WARN_ON_ONCE(page->mapping);
 333		page->mapping = mapping;
 
 
 
 
 
 334	}
 335}
 336
 337static void dax_disassociate_entry(void *entry, struct address_space *mapping,
 338		bool trunc)
 339{
 340	unsigned long pfn;
 341
 342	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 343		return;
 344
 345	for_each_mapped_pfn(entry, pfn) {
 346		struct page *page = pfn_to_page(pfn);
 347
 348		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
 349		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
 
 
 
 
 
 350		page->mapping = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 351	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 352}
 353
 354/*
 355 * Find radix tree entry at given index. If it points to an exceptional entry,
 356 * return it with the radix tree entry locked. If the radix tree doesn't
 357 * contain given index, create an empty exceptional entry for the index and
 358 * return with it locked.
 359 *
 360 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
 361 * either return that locked entry or will return an error.  This error will
 362 * happen if there are any 4k entries within the 2MiB range that we are
 363 * requesting.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 364 *
 365 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
 366 * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
 367 * insertion will fail if it finds any 4k entries already in the tree, and a
 368 * 4k insertion will cause an existing 2MiB entry to be unmapped and
 369 * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
 370 * well as 2MiB empty entries.
 371 *
 372 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
 373 * real storage backing them.  We will leave these real 2MiB DAX entries in
 374 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
 375 *
 376 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 377 * persistent memory the benefit is doubtful. We can add that later if we can
 378 * show it helps.
 
 
 
 
 379 */
 380static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 381		unsigned long size_flag)
 382{
 383	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
 384	void *entry, **slot;
 
 385
 386restart:
 387	xa_lock_irq(&mapping->i_pages);
 388	entry = get_unlocked_mapping_entry(mapping, index, &slot);
 
 389
 390	if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
 391		entry = ERR_PTR(-EIO);
 392		goto out_unlock;
 393	}
 
 
 
 394
 395	if (entry) {
 396		if (size_flag & RADIX_DAX_PMD) {
 397			if (dax_is_pte_entry(entry)) {
 398				put_unlocked_mapping_entry(mapping, index,
 399						entry);
 400				entry = ERR_PTR(-EEXIST);
 401				goto out_unlock;
 402			}
 403		} else { /* trying to grab a PTE entry */
 404			if (dax_is_pmd_entry(entry) &&
 405			    (dax_is_zero_entry(entry) ||
 406			     dax_is_empty_entry(entry))) {
 407				pmd_downgrade = true;
 408			}
 409		}
 410	}
 411
 412	/* No entry for given index? Make sure radix tree is big enough. */
 413	if (!entry || pmd_downgrade) {
 414		int err;
 
 
 
 415
 416		if (pmd_downgrade) {
 417			/*
 418			 * Make sure 'entry' remains valid while we drop
 419			 * the i_pages lock.
 420			 */
 421			entry = lock_slot(mapping, slot);
 422		}
 423
 424		xa_unlock_irq(&mapping->i_pages);
 425		/*
 426		 * Besides huge zero pages the only other thing that gets
 427		 * downgraded are empty entries which don't need to be
 428		 * unmapped.
 429		 */
 430		if (pmd_downgrade && dax_is_zero_entry(entry))
 431			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 432							PG_PMD_NR, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433
 434		err = radix_tree_preload(
 435				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
 436		if (err) {
 437			if (pmd_downgrade)
 438				put_locked_mapping_entry(mapping, index);
 439			return ERR_PTR(err);
 440		}
 441		xa_lock_irq(&mapping->i_pages);
 442
 443		if (!entry) {
 444			/*
 445			 * We needed to drop the i_pages lock while calling
 446			 * radix_tree_preload() and we didn't have an entry to
 447			 * lock.  See if another thread inserted an entry at
 448			 * our index during this time.
 449			 */
 450			entry = __radix_tree_lookup(&mapping->i_pages, index,
 451					NULL, &slot);
 452			if (entry) {
 453				radix_tree_preload_end();
 454				xa_unlock_irq(&mapping->i_pages);
 455				goto restart;
 456			}
 457		}
 458
 459		if (pmd_downgrade) {
 460			dax_disassociate_entry(entry, mapping, false);
 461			radix_tree_delete(&mapping->i_pages, index);
 462			mapping->nrexceptional--;
 463			dax_wake_mapping_entry_waiter(mapping, index, entry,
 464					true);
 465		}
 
 
 
 
 
 
 
 
 
 
 
 466
 467		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
 
 
 
 
 
 
 
 
 
 
 
 
 468
 469		err = __radix_tree_insert(&mapping->i_pages, index,
 470				dax_radix_order(entry), entry);
 471		radix_tree_preload_end();
 472		if (err) {
 473			xa_unlock_irq(&mapping->i_pages);
 474			/*
 475			 * Our insertion of a DAX entry failed, most likely
 476			 * because we were inserting a PMD entry and it
 477			 * collided with a PTE sized entry at a different
 478			 * index in the PMD range.  We haven't inserted
 479			 * anything into the radix tree and have no waiters to
 480			 * wake.
 481			 */
 482			return ERR_PTR(err);
 483		}
 484		/* Good, we have inserted empty locked entry into the tree. */
 485		mapping->nrexceptional++;
 486		xa_unlock_irq(&mapping->i_pages);
 487		return entry;
 488	}
 489	entry = lock_slot(mapping, slot);
 490 out_unlock:
 491	xa_unlock_irq(&mapping->i_pages);
 492	return entry;
 493}
 
 494
 495static int __dax_invalidate_mapping_entry(struct address_space *mapping,
 496					  pgoff_t index, bool trunc)
 497{
 
 498	int ret = 0;
 499	void *entry;
 500	struct radix_tree_root *pages = &mapping->i_pages;
 501
 502	xa_lock_irq(pages);
 503	entry = get_unlocked_mapping_entry(mapping, index, NULL);
 504	if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
 505		goto out;
 506	if (!trunc &&
 507	    (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
 508	     radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
 509		goto out;
 510	dax_disassociate_entry(entry, mapping, trunc);
 511	radix_tree_delete(pages, index);
 512	mapping->nrexceptional--;
 513	ret = 1;
 514out:
 515	put_unlocked_mapping_entry(mapping, index, entry);
 516	xa_unlock_irq(pages);
 517	return ret;
 518}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 519/*
 520 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
 521 * entry to get unlocked before deleting it.
 522 */
 523int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 524{
 525	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
 526
 527	/*
 528	 * This gets called from truncate / punch_hole path. As such, the caller
 529	 * must hold locks protecting against concurrent modifications of the
 530	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
 531	 * caller has seen exceptional entry for this index, we better find it
 532	 * at that index as well...
 533	 */
 534	WARN_ON_ONCE(!ret);
 535	return ret;
 536}
 537
 538/*
 539 * Invalidate exceptional DAX entry if it is clean.
 540 */
 541int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 542				      pgoff_t index)
 543{
 544	return __dax_invalidate_mapping_entry(mapping, index, false);
 
 
 
 
 
 545}
 546
 547static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
 548		sector_t sector, size_t size, struct page *to,
 549		unsigned long vaddr)
 550{
 
 551	void *vto, *kaddr;
 552	pgoff_t pgoff;
 553	pfn_t pfn;
 554	long rc;
 555	int id;
 556
 557	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
 558	if (rc)
 559		return rc;
 560
 561	id = dax_read_lock();
 562	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
 
 563	if (rc < 0) {
 564		dax_read_unlock(id);
 565		return rc;
 566	}
 567	vto = kmap_atomic(to);
 568	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
 569	kunmap_atomic(vto);
 570	dax_read_unlock(id);
 571	return 0;
 572}
 573
 574/*
 
 
 
 
 
 
 
 
 
 
 
 575 * By this point grab_mapping_entry() has ensured that we have a locked entry
 576 * of the appropriate size so we don't have to worry about downgrading PMDs to
 577 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 578 * already in the tree, we will skip the insertion and just dirty the PMD as
 579 * appropriate.
 580 */
 581static void *dax_insert_mapping_entry(struct address_space *mapping,
 582				      struct vm_fault *vmf,
 583				      void *entry, pfn_t pfn_t,
 584				      unsigned long flags, bool dirty)
 585{
 586	struct radix_tree_root *pages = &mapping->i_pages;
 587	unsigned long pfn = pfn_t_to_pfn(pfn_t);
 588	pgoff_t index = vmf->pgoff;
 589	void *new_entry;
 590
 591	if (dirty)
 592		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 593
 594	if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
 
 595		/* we are replacing a zero page with block mapping */
 596		if (dax_is_pmd_entry(entry))
 597			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 598							PG_PMD_NR, false);
 599		else /* pte entry */
 600			unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
 601	}
 602
 603	xa_lock_irq(pages);
 604	new_entry = dax_radix_locked_entry(pfn, flags);
 605	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
 
 
 606		dax_disassociate_entry(entry, mapping, false);
 607		dax_associate_entry(new_entry, mapping);
 608	}
 609
 610	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 611		/*
 612		 * Only swap our new entry into the radix tree if the current
 613		 * entry is a zero page or an empty entry.  If a normal PTE or
 614		 * PMD entry is already in the tree, we leave it alone.  This
 615		 * means that if we are trying to insert a PTE and the
 616		 * existing entry is a PMD, we will just leave the PMD in the
 617		 * tree and dirty it if necessary.
 618		 */
 619		struct radix_tree_node *node;
 620		void **slot;
 621		void *ret;
 622
 623		ret = __radix_tree_lookup(pages, index, &node, &slot);
 624		WARN_ON_ONCE(ret != entry);
 625		__radix_tree_replace(pages, node, slot,
 626				     new_entry, NULL);
 627		entry = new_entry;
 
 
 628	}
 629
 630	if (dirty)
 631		radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
 
 
 
 632
 633	xa_unlock_irq(pages);
 634	return entry;
 635}
 636
 637static inline unsigned long
 638pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 639{
 640	unsigned long address;
 641
 642	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 643	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
 644	return address;
 645}
 646
 647/* Walk all mappings of a given index of a file and writeprotect them */
 648static void dax_mapping_entry_mkclean(struct address_space *mapping,
 649				      pgoff_t index, unsigned long pfn)
 650{
 
 
 651	struct vm_area_struct *vma;
 652	pte_t pte, *ptep = NULL;
 653	pmd_t *pmdp = NULL;
 654	spinlock_t *ptl;
 655
 656	i_mmap_lock_read(mapping);
 657	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
 658		unsigned long address, start, end;
 
 
 
 659
 660		cond_resched();
 661
 662		if (!(vma->vm_flags & VM_SHARED))
 663			continue;
 664
 665		address = pgoff_address(index, vma);
 666
 667		/*
 668		 * Note because we provide start/end to follow_pte_pmd it will
 669		 * call mmu_notifier_invalidate_range_start() on our behalf
 670		 * before taking any lock.
 671		 */
 672		if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
 673			continue;
 674
 
 
 
 675		/*
 676		 * No need to call mmu_notifier_invalidate_range() as we are
 677		 * downgrading page table protection not changing it to point
 678		 * to a new page.
 679		 *
 680		 * See Documentation/vm/mmu_notifier.txt
 681		 */
 682		if (pmdp) {
 683#ifdef CONFIG_FS_DAX_PMD
 684			pmd_t pmd;
 685
 686			if (pfn != pmd_pfn(*pmdp))
 687				goto unlock_pmd;
 688			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
 689				goto unlock_pmd;
 690
 691			flush_cache_page(vma, address, pfn);
 692			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
 693			pmd = pmd_wrprotect(pmd);
 694			pmd = pmd_mkclean(pmd);
 695			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 696unlock_pmd:
 697#endif
 698			spin_unlock(ptl);
 699		} else {
 700			if (pfn != pte_pfn(*ptep))
 701				goto unlock_pte;
 702			if (!pte_dirty(*ptep) && !pte_write(*ptep))
 703				goto unlock_pte;
 704
 705			flush_cache_page(vma, address, pfn);
 706			pte = ptep_clear_flush(vma, address, ptep);
 707			pte = pte_wrprotect(pte);
 708			pte = pte_mkclean(pte);
 709			set_pte_at(vma->vm_mm, address, ptep, pte);
 710unlock_pte:
 711			pte_unmap_unlock(ptep, ptl);
 712		}
 713
 714		mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
 
 
 715	}
 716	i_mmap_unlock_read(mapping);
 717}
 718
 719static int dax_writeback_one(struct dax_device *dax_dev,
 720		struct address_space *mapping, pgoff_t index, void *entry)
 721{
 722	struct radix_tree_root *pages = &mapping->i_pages;
 723	void *entry2, **slot;
 724	unsigned long pfn;
 725	long ret = 0;
 726	size_t size;
 727
 728	/*
 729	 * A page got tagged dirty in DAX mapping? Something is seriously
 730	 * wrong.
 731	 */
 732	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
 733		return -EIO;
 734
 735	xa_lock_irq(pages);
 736	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
 737	/* Entry got punched out / reallocated? */
 738	if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
 739		goto put_unlocked;
 740	/*
 741	 * Entry got reallocated elsewhere? No need to writeback. We have to
 742	 * compare pfns as we must not bail out due to difference in lockbit
 743	 * or entry type.
 744	 */
 745	if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
 746		goto put_unlocked;
 747	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 748				dax_is_zero_entry(entry))) {
 749		ret = -EIO;
 750		goto put_unlocked;
 751	}
 752
 753	/* Another fsync thread may have already written back this entry */
 754	if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
 755		goto put_unlocked;
 756	/* Lock the entry to serialize with page faults */
 757	entry = lock_slot(mapping, slot);
 758	/*
 759	 * We can clear the tag now but we have to be careful so that concurrent
 760	 * dax_writeback_one() calls for the same index cannot finish before we
 761	 * actually flush the caches. This is achieved as the calls will look
 762	 * at the entry only under the i_pages lock and once they do that
 763	 * they will see the entry locked and wait for it to unlock.
 764	 */
 765	radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
 766	xa_unlock_irq(pages);
 767
 768	/*
 769	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
 770	 * in the middle of a PMD, the 'index' we are given will be aligned to
 771	 * the start index of the PMD, as will the pfn we pull from 'entry'.
 772	 * This allows us to flush for PMD_SIZE and not have to worry about
 773	 * partial PMD writebacks.
 774	 */
 775	pfn = dax_radix_pfn(entry);
 776	size = PAGE_SIZE << dax_radix_order(entry);
 
 
 777
 778	dax_mapping_entry_mkclean(mapping, index, pfn);
 779	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
 
 
 
 
 
 
 
 780	/*
 781	 * After we have flushed the cache, we can clear the dirty tag. There
 782	 * cannot be new dirty data in the pfn after the flush has completed as
 783	 * the pfn mappings are writeprotected and fault waits for mapping
 784	 * entry lock.
 785	 */
 786	xa_lock_irq(pages);
 787	radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
 788	xa_unlock_irq(pages);
 789	trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
 790	put_locked_mapping_entry(mapping, index);
 
 
 791	return ret;
 792
 793 put_unlocked:
 794	put_unlocked_mapping_entry(mapping, index, entry2);
 795	xa_unlock_irq(pages);
 796	return ret;
 797}
 798
 799/*
 800 * Flush the mapping to the persistent domain within the byte range of [start,
 801 * end]. This is required by data integrity operations to ensure file data is
 802 * on persistent storage prior to completion of the operation.
 803 */
 804int dax_writeback_mapping_range(struct address_space *mapping,
 805		struct block_device *bdev, struct writeback_control *wbc)
 806{
 
 807	struct inode *inode = mapping->host;
 808	pgoff_t start_index, end_index;
 809	pgoff_t indices[PAGEVEC_SIZE];
 810	struct dax_device *dax_dev;
 811	struct pagevec pvec;
 812	bool done = false;
 813	int i, ret = 0;
 814
 815	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
 816		return -EIO;
 817
 818	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
 819		return 0;
 820
 821	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
 822	if (!dax_dev)
 823		return -EIO;
 824
 825	start_index = wbc->range_start >> PAGE_SHIFT;
 826	end_index = wbc->range_end >> PAGE_SHIFT;
 827
 828	trace_dax_writeback_range(inode, start_index, end_index);
 829
 830	tag_pages_for_writeback(mapping, start_index, end_index);
 831
 832	pagevec_init(&pvec);
 833	while (!done) {
 834		pvec.nr = find_get_entries_tag(mapping, start_index,
 835				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
 836				pvec.pages, indices);
 837
 838		if (pvec.nr == 0)
 
 
 
 
 839			break;
 
 
 
 840
 841		for (i = 0; i < pvec.nr; i++) {
 842			if (indices[i] > end_index) {
 843				done = true;
 844				break;
 845			}
 846
 847			ret = dax_writeback_one(dax_dev, mapping, indices[i],
 848					pvec.pages[i]);
 849			if (ret < 0) {
 850				mapping_set_error(mapping, ret);
 851				goto out;
 852			}
 853		}
 854		start_index = indices[pvec.nr - 1] + 1;
 855	}
 856out:
 857	put_dax(dax_dev);
 858	trace_dax_writeback_range_done(inode, start_index, end_index);
 859	return (ret < 0 ? ret : 0);
 860}
 861EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 862
 863static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 
 864{
 865	return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
 866}
 867
 868static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
 869			 pfn_t *pfnp)
 870{
 871	const sector_t sector = dax_iomap_sector(iomap, pos);
 872	pgoff_t pgoff;
 873	void *kaddr;
 874	int id, rc;
 875	long length;
 876
 877	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
 878	if (rc)
 879		return rc;
 880	id = dax_read_lock();
 881	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
 882				   &kaddr, pfnp);
 883	if (length < 0) {
 884		rc = length;
 885		goto out;
 886	}
 
 
 887	rc = -EINVAL;
 888	if (PFN_PHYS(length) < size)
 889		goto out;
 890	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
 891		goto out;
 892	/* For larger pages we need devmap */
 893	if (length > 1 && !pfn_t_devmap(*pfnp))
 894		goto out;
 895	rc = 0;
 
 
 
 
 
 
 896out:
 897	dax_read_unlock(id);
 898	return rc;
 899}
 900
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 901/*
 902 * The user has performed a load from a hole in the file.  Allocating a new
 903 * page in the file would cause excessive storage usage for workloads with
 904 * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
 905 * If this page is ever written to we will re-fault and change the mapping to
 906 * point to real DAX storage instead.
 907 */
 908static int dax_load_hole(struct address_space *mapping, void *entry,
 909			 struct vm_fault *vmf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 910{
 
 
 
 911	struct inode *inode = mapping->host;
 912	unsigned long vaddr = vmf->address;
 913	int ret = VM_FAULT_NOPAGE;
 914	struct page *zero_page;
 915	void *entry2;
 
 916	pfn_t pfn;
 917
 918	zero_page = ZERO_PAGE(0);
 919	if (unlikely(!zero_page)) {
 920		ret = VM_FAULT_OOM;
 921		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922	}
 923
 924	pfn = page_to_pfn_t(zero_page);
 925	entry2 = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
 926			RADIX_DAX_ZERO_PAGE, false);
 927	if (IS_ERR(entry2)) {
 928		ret = VM_FAULT_SIGBUS;
 929		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 930	}
 931
 932	vm_insert_mixed(vmf->vma, vaddr, pfn);
 933out:
 934	trace_dax_load_hole(inode, vmf, ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 935	return ret;
 936}
 
 937
 938static bool dax_range_is_aligned(struct block_device *bdev,
 939				 unsigned int offset, unsigned int length)
 940{
 941	unsigned short sector_size = bdev_logical_block_size(bdev);
 
 
 
 
 
 942
 943	if (!IS_ALIGNED(offset, sector_size))
 944		return false;
 945	if (!IS_ALIGNED(length, sector_size))
 946		return false;
 947
 948	return true;
 
 
 
 
 
 
 949}
 950
 951int __dax_zero_page_range(struct block_device *bdev,
 952		struct dax_device *dax_dev, sector_t sector,
 953		unsigned int offset, unsigned int size)
 954{
 955	if (dax_range_is_aligned(bdev, offset, size)) {
 956		sector_t start_sector = sector + (offset >> 9);
 
 
 
 
 
 
 
 957
 958		return blkdev_issue_zeroout(bdev, start_sector,
 959				size >> 9, GFP_NOFS, 0);
 960	} else {
 961		pgoff_t pgoff;
 962		long rc, id;
 963		void *kaddr;
 964		pfn_t pfn;
 
 965
 966		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
 967		if (rc)
 968			return rc;
 
 
 
 969
 970		id = dax_read_lock();
 971		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
 972				&pfn);
 973		if (rc < 0) {
 974			dax_read_unlock(id);
 
 
 
 975			return rc;
 976		}
 977		memset(kaddr + offset, 0, size);
 978		dax_flush(dax_dev, kaddr + offset, size);
 979		dax_read_unlock(id);
 980	}
 981	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982}
 983EXPORT_SYMBOL_GPL(__dax_zero_page_range);
 984
 985static loff_t
 986dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 987		struct iomap *iomap)
 988{
 989	struct block_device *bdev = iomap->bdev;
 
 
 
 990	struct dax_device *dax_dev = iomap->dax_dev;
 991	struct iov_iter *iter = data;
 992	loff_t end = pos + length, done = 0;
 
 
 993	ssize_t ret = 0;
 
 994	int id;
 995
 996	if (iov_iter_rw(iter) == READ) {
 997		end = min(end, i_size_read(inode));
 998		if (pos >= end)
 999			return 0;
1000
1001		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1002			return iov_iter_zero(min(length, end - pos), iter);
1003	}
1004
1005	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
 
 
 
 
 
1006		return -EIO;
1007
1008	/*
1009	 * Write can allocate block for an area which has a hole page mapped
1010	 * into page tables. We have to tear down these mappings so that data
1011	 * written by write(2) is visible in mmap.
1012	 */
1013	if (iomap->flags & IOMAP_F_NEW) {
1014		invalidate_inode_pages2_range(inode->i_mapping,
 
 
 
 
 
 
 
 
 
 
1015					      pos >> PAGE_SHIFT,
1016					      (end - 1) >> PAGE_SHIFT);
1017	}
1018
1019	id = dax_read_lock();
1020	while (pos < end) {
1021		unsigned offset = pos & (PAGE_SIZE - 1);
1022		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1023		const sector_t sector = dax_iomap_sector(iomap, pos);
1024		ssize_t map_len;
1025		pgoff_t pgoff;
1026		void *kaddr;
1027		pfn_t pfn;
1028
1029		if (fatal_signal_pending(current)) {
1030			ret = -EINTR;
1031			break;
1032		}
1033
1034		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1035		if (ret)
1036			break;
1037
1038		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1039				&kaddr, &pfn);
 
 
 
 
 
 
 
1040		if (map_len < 0) {
1041			ret = map_len;
1042			break;
1043		}
1044
 
 
 
 
 
 
 
1045		map_len = PFN_PHYS(map_len);
1046		kaddr += offset;
1047		map_len -= offset;
1048		if (map_len > end - pos)
1049			map_len = end - pos;
1050
1051		/*
1052		 * The userspace address for the memory copy has already been
1053		 * validated via access_ok() in either vfs_read() or
1054		 * vfs_write(), depending on which operation we are doing.
1055		 */
1056		if (iov_iter_rw(iter) == WRITE)
1057			map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1058					map_len, iter);
1059		else
1060			map_len = copy_to_iter(kaddr, map_len, iter);
1061		if (map_len <= 0) {
1062			ret = map_len ? map_len : -EFAULT;
 
 
 
 
 
 
 
1063			break;
1064		}
1065
1066		pos += map_len;
1067		length -= map_len;
1068		done += map_len;
1069	}
1070	dax_read_unlock(id);
1071
1072	return done ? done : ret;
1073}
1074
1075/**
1076 * dax_iomap_rw - Perform I/O to a DAX file
1077 * @iocb:	The control block for this I/O
1078 * @iter:	The addresses to do I/O from or to
1079 * @ops:	iomap ops passed from the file system
1080 *
1081 * This function performs read and write operations to directly mapped
1082 * persistent memory.  The callers needs to take care of read/write exclusion
1083 * and evicting any page cache pages in the region under I/O.
1084 */
1085ssize_t
1086dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1087		const struct iomap_ops *ops)
1088{
1089	struct address_space *mapping = iocb->ki_filp->f_mapping;
1090	struct inode *inode = mapping->host;
1091	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1092	unsigned flags = 0;
 
 
 
 
 
 
 
1093
1094	if (iov_iter_rw(iter) == WRITE) {
1095		lockdep_assert_held_exclusive(&inode->i_rwsem);
1096		flags |= IOMAP_WRITE;
1097	} else {
1098		lockdep_assert_held(&inode->i_rwsem);
1099	}
1100
1101	while (iov_iter_count(iter)) {
1102		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1103				iter, dax_iomap_actor);
1104		if (ret <= 0)
1105			break;
1106		pos += ret;
1107		done += ret;
1108	}
1109
1110	iocb->ki_pos += done;
 
1111	return done ? done : ret;
1112}
1113EXPORT_SYMBOL_GPL(dax_iomap_rw);
1114
1115static int dax_fault_return(int error)
1116{
1117	if (error == 0)
1118		return VM_FAULT_NOPAGE;
1119	if (error == -ENOMEM)
1120		return VM_FAULT_OOM;
1121	return VM_FAULT_SIGBUS;
1122}
1123
1124/*
1125 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1126 * flushed on write-faults (non-cow), but not read-faults.
 
 
1127 */
1128static bool dax_fault_is_synchronous(unsigned long flags,
1129		struct vm_area_struct *vma, struct iomap *iomap)
1130{
1131	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1132		&& (iomap->flags & IOMAP_F_DIRTY);
 
 
1133}
1134
1135static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136			       int *iomap_errp, const struct iomap_ops *ops)
1137{
1138	struct vm_area_struct *vma = vmf->vma;
1139	struct address_space *mapping = vma->vm_file->f_mapping;
1140	struct inode *inode = mapping->host;
1141	unsigned long vaddr = vmf->address;
1142	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1143	struct iomap iomap = { 0 };
1144	unsigned flags = IOMAP_FAULT;
1145	int error, major = 0;
1146	bool write = vmf->flags & FAULT_FLAG_WRITE;
1147	bool sync;
1148	int vmf_ret = 0;
1149	void *entry;
1150	pfn_t pfn;
1151
1152	trace_dax_pte_fault(inode, vmf, vmf_ret);
1153	/*
1154	 * Check whether offset isn't beyond end of file now. Caller is supposed
1155	 * to hold locks serializing us with truncate / punch hole so this is
1156	 * a reliable test.
1157	 */
1158	if (pos >= i_size_read(inode)) {
1159		vmf_ret = VM_FAULT_SIGBUS;
1160		goto out;
1161	}
1162
1163	if (write && !vmf->cow_page)
1164		flags |= IOMAP_WRITE;
1165
1166	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1167	if (IS_ERR(entry)) {
1168		vmf_ret = dax_fault_return(PTR_ERR(entry));
1169		goto out;
1170	}
1171
1172	/*
1173	 * It is possible, particularly with mixed reads & writes to private
1174	 * mappings, that we have raced with a PMD fault that overlaps with
1175	 * the PTE we need to set up.  If so just return and the fault will be
1176	 * retried.
1177	 */
1178	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1179		vmf_ret = VM_FAULT_NOPAGE;
1180		goto unlock_entry;
1181	}
1182
1183	/*
1184	 * Note that we don't bother to use iomap_apply here: DAX required
1185	 * the file system block size to be equal the page size, which means
1186	 * that we never have to deal with more than a single extent here.
1187	 */
1188	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1189	if (iomap_errp)
1190		*iomap_errp = error;
1191	if (error) {
1192		vmf_ret = dax_fault_return(error);
1193		goto unlock_entry;
1194	}
1195	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1196		error = -EIO;	/* fs corruption? */
1197		goto error_finish_iomap;
1198	}
1199
1200	if (vmf->cow_page) {
1201		sector_t sector = dax_iomap_sector(&iomap, pos);
1202
1203		switch (iomap.type) {
1204		case IOMAP_HOLE:
1205		case IOMAP_UNWRITTEN:
1206			clear_user_highpage(vmf->cow_page, vaddr);
1207			break;
1208		case IOMAP_MAPPED:
1209			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1210					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1211			break;
1212		default:
1213			WARN_ON_ONCE(1);
1214			error = -EIO;
1215			break;
1216		}
1217
1218		if (error)
1219			goto error_finish_iomap;
1220
1221		__SetPageUptodate(vmf->cow_page);
1222		vmf_ret = finish_fault(vmf);
1223		if (!vmf_ret)
1224			vmf_ret = VM_FAULT_DONE_COW;
1225		goto finish_iomap;
1226	}
1227
1228	sync = dax_fault_is_synchronous(flags, vma, &iomap);
1229
1230	switch (iomap.type) {
1231	case IOMAP_MAPPED:
1232		if (iomap.flags & IOMAP_F_NEW) {
1233			count_vm_event(PGMAJFAULT);
1234			count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1235			major = VM_FAULT_MAJOR;
1236		}
1237		error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1238		if (error < 0)
1239			goto error_finish_iomap;
1240
1241		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1242						 0, write && !sync);
1243		if (IS_ERR(entry)) {
1244			error = PTR_ERR(entry);
1245			goto error_finish_iomap;
1246		}
1247
1248		/*
1249		 * If we are doing synchronous page fault and inode needs fsync,
1250		 * we can insert PTE into page tables only after that happens.
1251		 * Skip insertion for now and return the pfn so that caller can
1252		 * insert it after fsync is done.
1253		 */
1254		if (sync) {
1255			if (WARN_ON_ONCE(!pfnp)) {
1256				error = -EIO;
1257				goto error_finish_iomap;
1258			}
1259			*pfnp = pfn;
1260			vmf_ret = VM_FAULT_NEEDDSYNC | major;
1261			goto finish_iomap;
1262		}
1263		trace_dax_insert_mapping(inode, vmf, entry);
1264		if (write)
1265			error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
1266		else
1267			error = vm_insert_mixed(vma, vaddr, pfn);
1268
1269		/* -EBUSY is fine, somebody else faulted on the same PTE */
1270		if (error == -EBUSY)
1271			error = 0;
1272		break;
1273	case IOMAP_UNWRITTEN:
1274	case IOMAP_HOLE:
1275		if (!write) {
1276			vmf_ret = dax_load_hole(mapping, entry, vmf);
1277			goto finish_iomap;
1278		}
1279		/*FALLTHRU*/
1280	default:
1281		WARN_ON_ONCE(1);
1282		error = -EIO;
1283		break;
1284	}
1285
1286 error_finish_iomap:
1287	vmf_ret = dax_fault_return(error) | major;
1288 finish_iomap:
1289	if (ops->iomap_end) {
1290		int copied = PAGE_SIZE;
1291
1292		if (vmf_ret & VM_FAULT_ERROR)
1293			copied = 0;
1294		/*
1295		 * The fault is done by now and there's no way back (other
1296		 * thread may be already happily using PTE we have installed).
1297		 * Just ignore error from ->iomap_end since we cannot do much
1298		 * with it.
1299		 */
1300		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1301	}
1302 unlock_entry:
1303	put_locked_mapping_entry(mapping, vmf->pgoff);
1304 out:
1305	trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1306	return vmf_ret;
1307}
1308
1309#ifdef CONFIG_FS_DAX_PMD
1310static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1311		void *entry)
1312{
1313	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1314	unsigned long pmd_addr = vmf->address & PMD_MASK;
1315	struct inode *inode = mapping->host;
1316	struct page *zero_page;
1317	void *ret = NULL;
1318	spinlock_t *ptl;
1319	pmd_t pmd_entry;
1320	pfn_t pfn;
1321
1322	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
 
 
 
 
 
 
 
 
1323
1324	if (unlikely(!zero_page))
1325		goto fallback;
 
1326
1327	pfn = page_to_pfn_t(zero_page);
1328	ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1329			RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1330	if (IS_ERR(ret))
1331		goto fallback;
1332
1333	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1334	if (!pmd_none(*(vmf->pmd))) {
1335		spin_unlock(ptl);
1336		goto fallback;
1337	}
1338
1339	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1340	pmd_entry = pmd_mkhuge(pmd_entry);
1341	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1342	spin_unlock(ptl);
1343	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1344	return VM_FAULT_NOPAGE;
1345
1346fallback:
1347	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1348	return VM_FAULT_FALLBACK;
1349}
1350
1351static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1352			       const struct iomap_ops *ops)
1353{
1354	struct vm_area_struct *vma = vmf->vma;
1355	struct address_space *mapping = vma->vm_file->f_mapping;
1356	unsigned long pmd_addr = vmf->address & PMD_MASK;
1357	bool write = vmf->flags & FAULT_FLAG_WRITE;
1358	bool sync;
1359	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1360	struct inode *inode = mapping->host;
1361	int result = VM_FAULT_FALLBACK;
1362	struct iomap iomap = { 0 };
1363	pgoff_t max_pgoff, pgoff;
1364	void *entry;
1365	loff_t pos;
1366	int error;
1367	pfn_t pfn;
1368
1369	/*
1370	 * Check whether offset isn't beyond end of file now. Caller is
1371	 * supposed to hold locks serializing us with truncate / punch hole so
1372	 * this is a reliable test.
1373	 */
1374	pgoff = linear_page_index(vma, pmd_addr);
1375	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1376
1377	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1378
1379	/*
1380	 * Make sure that the faulting address's PMD offset (color) matches
1381	 * the PMD offset from the start of the file.  This is necessary so
1382	 * that a PMD range in the page table overlaps exactly with a PMD
1383	 * range in the radix tree.
1384	 */
1385	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1386	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1387		goto fallback;
1388
1389	/* Fall back to PTEs if we're going to COW */
1390	if (write && !(vma->vm_flags & VM_SHARED))
1391		goto fallback;
1392
1393	/* If the PMD would extend outside the VMA */
1394	if (pmd_addr < vma->vm_start)
1395		goto fallback;
1396	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1397		goto fallback;
1398
1399	if (pgoff >= max_pgoff) {
1400		result = VM_FAULT_SIGBUS;
1401		goto out;
1402	}
1403
1404	/* If the PMD would extend beyond the file size */
1405	if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1406		goto fallback;
1407
1408	/*
1409	 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1410	 * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
1411	 * is already in the tree, for instance), it will return -EEXIST and
1412	 * we just fall back to 4k entries.
1413	 */
1414	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1415	if (IS_ERR(entry))
 
1416		goto fallback;
 
1417
1418	/*
1419	 * It is possible, particularly with mixed reads & writes to private
1420	 * mappings, that we have raced with a PTE fault that overlaps with
1421	 * the PMD we need to set up.  If so just return and the fault will be
1422	 * retried.
1423	 */
1424	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1425			!pmd_devmap(*vmf->pmd)) {
1426		result = 0;
1427		goto unlock_entry;
1428	}
1429
1430	/*
1431	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1432	 * setting up a mapping, so really we're using iomap_begin() as a way
1433	 * to look up our filesystem block.
1434	 */
1435	pos = (loff_t)pgoff << PAGE_SHIFT;
1436	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1437	if (error)
1438		goto unlock_entry;
1439
1440	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1441		goto finish_iomap;
1442
1443	sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1444
1445	switch (iomap.type) {
1446	case IOMAP_MAPPED:
1447		error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1448		if (error < 0)
1449			goto finish_iomap;
1450
1451		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1452						RADIX_DAX_PMD, write && !sync);
1453		if (IS_ERR(entry))
1454			goto finish_iomap;
1455
1456		/*
1457		 * If we are doing synchronous page fault and inode needs fsync,
1458		 * we can insert PMD into page tables only after that happens.
1459		 * Skip insertion for now and return the pfn so that caller can
1460		 * insert it after fsync is done.
1461		 */
1462		if (sync) {
1463			if (WARN_ON_ONCE(!pfnp))
1464				goto finish_iomap;
1465			*pfnp = pfn;
1466			result = VM_FAULT_NEEDDSYNC;
1467			goto finish_iomap;
1468		}
1469
1470		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1471		result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1472					    write);
1473		break;
1474	case IOMAP_UNWRITTEN:
1475	case IOMAP_HOLE:
1476		if (WARN_ON_ONCE(write))
1477			break;
1478		result = dax_pmd_load_hole(vmf, &iomap, entry);
1479		break;
1480	default:
1481		WARN_ON_ONCE(1);
1482		break;
1483	}
1484
1485 finish_iomap:
1486	if (ops->iomap_end) {
1487		int copied = PMD_SIZE;
1488
1489		if (result == VM_FAULT_FALLBACK)
1490			copied = 0;
1491		/*
1492		 * The fault is done by now and there's no way back (other
1493		 * thread may be already happily using PMD we have installed).
1494		 * Just ignore error from ->iomap_end since we cannot do much
1495		 * with it.
1496		 */
1497		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1498				&iomap);
1499	}
1500 unlock_entry:
1501	put_locked_mapping_entry(mapping, pgoff);
1502 fallback:
1503	if (result == VM_FAULT_FALLBACK) {
1504		split_huge_pmd(vma, vmf->pmd, vmf->address);
1505		count_vm_event(THP_FAULT_FALLBACK);
1506	}
1507out:
1508	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1509	return result;
1510}
1511#else
1512static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1513			       const struct iomap_ops *ops)
1514{
1515	return VM_FAULT_FALLBACK;
1516}
1517#endif /* CONFIG_FS_DAX_PMD */
1518
1519/**
1520 * dax_iomap_fault - handle a page fault on a DAX file
1521 * @vmf: The description of the fault
1522 * @pe_size: Size of the page to fault in
1523 * @pfnp: PFN to insert for synchronous faults if fsync is required
1524 * @iomap_errp: Storage for detailed error code in case of error
1525 * @ops: Iomap ops passed from the file system
1526 *
1527 * When a page fault occurs, filesystems may call this helper in
1528 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1529 * has done all the necessary locking for page fault to proceed
1530 * successfully.
1531 */
1532int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1533		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1534{
1535	switch (pe_size) {
1536	case PE_SIZE_PTE:
1537		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1538	case PE_SIZE_PMD:
1539		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1540	default:
1541		return VM_FAULT_FALLBACK;
1542	}
1543}
1544EXPORT_SYMBOL_GPL(dax_iomap_fault);
1545
1546/**
1547 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1548 * @vmf: The description of the fault
1549 * @pe_size: Size of entry to be inserted
1550 * @pfn: PFN to insert
 
1551 *
1552 * This function inserts writeable PTE or PMD entry into page tables for mmaped
1553 * DAX file.  It takes care of marking corresponding radix tree entry as dirty
1554 * as well.
1555 */
1556static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1557				  enum page_entry_size pe_size,
1558				  pfn_t pfn)
1559{
1560	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1561	void *entry, **slot;
1562	pgoff_t index = vmf->pgoff;
1563	int vmf_ret, error;
1564
1565	xa_lock_irq(&mapping->i_pages);
1566	entry = get_unlocked_mapping_entry(mapping, index, &slot);
1567	/* Did we race with someone splitting entry or so? */
1568	if (!entry ||
1569	    (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1570	    (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1571		put_unlocked_mapping_entry(mapping, index, entry);
1572		xa_unlock_irq(&mapping->i_pages);
1573		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1574						      VM_FAULT_NOPAGE);
1575		return VM_FAULT_NOPAGE;
1576	}
1577	radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
1578	entry = lock_slot(mapping, slot);
1579	xa_unlock_irq(&mapping->i_pages);
1580	switch (pe_size) {
1581	case PE_SIZE_PTE:
1582		error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1583		vmf_ret = dax_fault_return(error);
1584		break;
1585#ifdef CONFIG_FS_DAX_PMD
1586	case PE_SIZE_PMD:
1587		vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1588			pfn, true);
1589		break;
1590#endif
1591	default:
1592		vmf_ret = VM_FAULT_FALLBACK;
1593	}
1594	put_locked_mapping_entry(mapping, index);
1595	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret);
1596	return vmf_ret;
1597}
1598
1599/**
1600 * dax_finish_sync_fault - finish synchronous page fault
1601 * @vmf: The description of the fault
1602 * @pe_size: Size of entry to be inserted
1603 * @pfn: PFN to insert
1604 *
1605 * This function ensures that the file range touched by the page fault is
1606 * stored persistently on the media and handles inserting of appropriate page
1607 * table entry.
1608 */
1609int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1610			  pfn_t pfn)
1611{
1612	int err;
1613	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1614	size_t len = 0;
1615
1616	if (pe_size == PE_SIZE_PTE)
1617		len = PAGE_SIZE;
1618	else if (pe_size == PE_SIZE_PMD)
1619		len = PMD_SIZE;
1620	else
1621		WARN_ON_ONCE(1);
1622	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1623	if (err)
1624		return VM_FAULT_SIGBUS;
1625	return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1626}
1627EXPORT_SYMBOL_GPL(dax_finish_sync_fault);