Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * fs/dax.c - Direct Access filesystem code
   3 * Copyright (c) 2013-2014 Intel Corporation
   4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 */
  16
  17#include <linux/atomic.h>
  18#include <linux/blkdev.h>
  19#include <linux/buffer_head.h>
  20#include <linux/dax.h>
  21#include <linux/fs.h>
  22#include <linux/genhd.h>
  23#include <linux/highmem.h>
  24#include <linux/memcontrol.h>
  25#include <linux/mm.h>
  26#include <linux/mutex.h>
  27#include <linux/pagevec.h>
  28#include <linux/pmem.h>
  29#include <linux/sched.h>
 
  30#include <linux/uio.h>
  31#include <linux/vmstat.h>
  32#include <linux/pfn_t.h>
  33#include <linux/sizes.h>
 
 
 
 
 
 
  34
  35static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
  36{
  37	struct request_queue *q = bdev->bd_queue;
  38	long rc = -EIO;
 
 
 
 
 
 
  39
  40	dax->addr = (void __pmem *) ERR_PTR(-EIO);
  41	if (blk_queue_enter(q, true) != 0)
  42		return rc;
  43
  44	rc = bdev_direct_access(bdev, dax);
  45	if (rc < 0) {
  46		dax->addr = (void __pmem *) ERR_PTR(rc);
  47		blk_queue_exit(q);
  48		return rc;
  49	}
  50	return rc;
 
 
 
 
 
 
 
 
 
  51}
 
  52
  53static void dax_unmap_atomic(struct block_device *bdev,
  54		const struct blk_dax_ctl *dax)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55{
  56	if (IS_ERR(dax->addr))
  57		return;
  58	blk_queue_exit(bdev->bd_queue);
  59}
  60
  61struct page *read_dax_sector(struct block_device *bdev, sector_t n)
  62{
  63	struct page *page = alloc_pages(GFP_KERNEL, 0);
  64	struct blk_dax_ctl dax = {
  65		.size = PAGE_SIZE,
  66		.sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
  67	};
  68	long rc;
 
 
 
 
 
 
 
 
  69
  70	if (!page)
  71		return ERR_PTR(-ENOMEM);
 
 
  72
  73	rc = dax_map_atomic(bdev, &dax);
  74	if (rc < 0)
  75		return ERR_PTR(rc);
  76	memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
  77	dax_unmap_atomic(bdev, &dax);
  78	return page;
 
 
 
 
 
 
 
  79}
  80
  81/*
  82 * dax_clear_sectors() is called from within transaction context from XFS,
  83 * and hence this means the stack from this point must follow GFP_NOFS
  84 * semantics for all operations.
  85 */
  86int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
  87{
  88	struct blk_dax_ctl dax = {
  89		.sector = _sector,
  90		.size = _size,
  91	};
  92
  93	might_sleep();
  94	do {
  95		long count, sz;
  96
  97		count = dax_map_atomic(bdev, &dax);
  98		if (count < 0)
  99			return count;
 100		sz = min_t(long, count, SZ_128K);
 101		clear_pmem(dax.addr, sz);
 102		dax.size -= sz;
 103		dax.sector += sz / 512;
 104		dax_unmap_atomic(bdev, &dax);
 105		cond_resched();
 106	} while (dax.size);
 107
 108	wmb_pmem();
 109	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110}
 111EXPORT_SYMBOL_GPL(dax_clear_sectors);
 112
 113/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
 114static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
 115		loff_t pos, loff_t end)
 116{
 117	loff_t final = end - pos + first; /* The final byte of the buffer */
 
 
 118
 119	if (first > 0)
 120		clear_pmem(addr, first);
 121	if (final < size)
 122		clear_pmem(addr + final, size - final);
 123}
 124
 125static bool buffer_written(struct buffer_head *bh)
 
 
 
 
 
 126{
 127	return buffer_mapped(bh) && !buffer_unwritten(bh);
 
 
 
 
 
 
 
 
 
 
 
 
 128}
 129
 130/*
 131 * When ext4 encounters a hole, it returns without modifying the buffer_head
 132 * which means that we can't trust b_size.  To cope with this, we set b_state
 133 * to 0 before calling get_block and, if any bit is set, we know we can trust
 134 * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
 135 * and would save us time calling get_block repeatedly.
 
 
 
 136 */
 137static bool buffer_size_valid(struct buffer_head *bh)
 138{
 139	return bh->b_state != 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 140}
 141
 
 
 
 
 
 
 
 
 
 
 
 
 142
 143static sector_t to_sector(const struct buffer_head *bh,
 144		const struct inode *inode)
 
 
 
 
 
 
 
 
 
 
 
 
 145{
 146	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
 
 
 
 147
 148	return sector;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 149}
 150
 151static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
 152		      loff_t start, loff_t end, get_block_t get_block,
 153		      struct buffer_head *bh)
 
 154{
 155	loff_t pos = start, max = start, bh_max = start;
 156	bool hole = false, need_wmb = false;
 157	struct block_device *bdev = NULL;
 158	int rw = iov_iter_rw(iter), rc;
 159	long map_len = 0;
 160	struct blk_dax_ctl dax = {
 161		.addr = (void __pmem *) ERR_PTR(-EIO),
 162	};
 163
 164	if (rw == READ)
 165		end = min(end, i_size_read(inode));
 
 
 
 
 
 
 
 
 
 166
 167	while (pos < end) {
 168		size_t len;
 169		if (pos == max) {
 170			unsigned blkbits = inode->i_blkbits;
 171			long page = pos >> PAGE_SHIFT;
 172			sector_t block = page << (PAGE_SHIFT - blkbits);
 173			unsigned first = pos - (block << blkbits);
 174			long size;
 175
 176			if (pos == bh_max) {
 177				bh->b_size = PAGE_ALIGN(end - pos);
 178				bh->b_state = 0;
 179				rc = get_block(inode, block, bh, rw == WRITE);
 180				if (rc)
 181					break;
 182				if (!buffer_size_valid(bh))
 183					bh->b_size = 1 << blkbits;
 184				bh_max = pos - first + bh->b_size;
 185				bdev = bh->b_bdev;
 186			} else {
 187				unsigned done = bh->b_size -
 188						(bh_max - (pos - first));
 189				bh->b_blocknr += done >> blkbits;
 190				bh->b_size -= done;
 191			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 192
 193			hole = rw == READ && !buffer_written(bh);
 194			if (hole) {
 195				size = bh->b_size - first;
 196			} else {
 197				dax_unmap_atomic(bdev, &dax);
 198				dax.sector = to_sector(bh, inode);
 199				dax.size = bh->b_size;
 200				map_len = dax_map_atomic(bdev, &dax);
 201				if (map_len < 0) {
 202					rc = map_len;
 203					break;
 204				}
 205				if (buffer_unwritten(bh) || buffer_new(bh)) {
 206					dax_new_buf(dax.addr, map_len, first,
 207							pos, end);
 208					need_wmb = true;
 209				}
 210				dax.addr += first;
 211				size = map_len - first;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 212			}
 213			max = min(pos + size, end);
 214		}
 
 215
 216		if (iov_iter_rw(iter) == WRITE) {
 217			len = copy_from_iter_pmem(dax.addr, max - pos, iter);
 218			need_wmb = true;
 219		} else if (!hole)
 220			len = copy_to_iter((void __force *) dax.addr, max - pos,
 221					iter);
 222		else
 223			len = iov_iter_zero(max - pos, iter);
 224
 225		if (!len) {
 226			rc = -EFAULT;
 227			break;
 
 
 
 
 
 
 
 
 
 228		}
 229
 230		pos += len;
 231		if (!IS_ERR(dax.addr))
 232			dax.addr += len;
 
 
 
 233	}
 234
 235	if (need_wmb)
 236		wmb_pmem();
 237	dax_unmap_atomic(bdev, &dax);
 
 238
 239	return (pos == start) ? rc : pos - start;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 240}
 241
 242/**
 243 * dax_do_io - Perform I/O to a DAX file
 244 * @iocb: The control block for this I/O
 245 * @inode: The file which the I/O is directed at
 246 * @iter: The addresses to do I/O from or to
 247 * @pos: The file offset where the I/O starts
 248 * @get_block: The filesystem method used to translate file offsets to blocks
 249 * @end_io: A filesystem callback for I/O completion
 250 * @flags: See below
 251 *
 252 * This function uses the same locking scheme as do_blockdev_direct_IO:
 253 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
 254 * caller for writes.  For reads, we take and release the i_mutex ourselves.
 255 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
 256 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
 257 * is in progress.
 258 */
 259ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
 260		  struct iov_iter *iter, loff_t pos, get_block_t get_block,
 261		  dio_iodone_t end_io, int flags)
 262{
 263	struct buffer_head bh;
 264	ssize_t retval = -EINVAL;
 265	loff_t end = pos + iov_iter_count(iter);
 266
 267	memset(&bh, 0, sizeof(bh));
 268	bh.b_bdev = inode->i_sb->s_bdev;
 269
 270	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
 271		struct address_space *mapping = inode->i_mapping;
 272		inode_lock(inode);
 273		retval = filemap_write_and_wait_range(mapping, pos, end - 1);
 274		if (retval) {
 275			inode_unlock(inode);
 276			goto out;
 277		}
 278	}
 279
 280	/* Protects against truncate */
 281	if (!(flags & DIO_SKIP_DIO_COUNT))
 282		inode_dio_begin(inode);
 
 
 283
 284	retval = dax_io(inode, iter, pos, end, get_block, &bh);
 
 285
 286	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
 287		inode_unlock(inode);
 
 
 
 
 
 
 
 
 
 
 
 288
 289	if (end_io) {
 290		int err;
 
 
 
 
 
 
 
 
 
 
 
 291
 292		err = end_io(iocb, pos, retval, bh.b_private);
 293		if (err)
 294			retval = err;
 
 295	}
 
 
 
 
 296
 297	if (!(flags & DIO_SKIP_DIO_COUNT))
 298		inode_dio_end(inode);
 299 out:
 300	return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 301}
 302EXPORT_SYMBOL_GPL(dax_do_io);
 303
 304/*
 305 * The user has performed a load from a hole in the file.  Allocating
 306 * a new page in the file would cause excessive storage usage for
 307 * workloads with sparse files.  We allocate a page cache page instead.
 308 * We'll kick it out of the page cache if it's ever written to,
 309 * otherwise it will simply fall out of the page cache under memory
 310 * pressure without ever having been dirtied.
 311 */
 312static int dax_load_hole(struct address_space *mapping, struct page *page,
 313							struct vm_fault *vmf)
 314{
 315	unsigned long size;
 316	struct inode *inode = mapping->host;
 317	if (!page)
 318		page = find_or_create_page(mapping, vmf->pgoff,
 319						GFP_KERNEL | __GFP_ZERO);
 320	if (!page)
 321		return VM_FAULT_OOM;
 322	/* Recheck i_size under page lock to avoid truncate race */
 323	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 324	if (vmf->pgoff >= size) {
 325		unlock_page(page);
 326		put_page(page);
 327		return VM_FAULT_SIGBUS;
 328	}
 329
 330	vmf->page = page;
 331	return VM_FAULT_LOCKED;
 
 
 
 
 
 
 
 332}
 333
 334static int copy_user_bh(struct page *to, struct inode *inode,
 335		struct buffer_head *bh, unsigned long vaddr)
 
 
 
 336{
 337	struct blk_dax_ctl dax = {
 338		.sector = to_sector(bh, inode),
 339		.size = bh->b_size,
 340	};
 341	struct block_device *bdev = bh->b_bdev;
 342	void *vto;
 
 
 
 
 343
 344	if (dax_map_atomic(bdev, &dax) < 0)
 345		return PTR_ERR(dax.addr);
 
 
 
 
 
 
 
 
 346	vto = kmap_atomic(to);
 347	copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
 348	kunmap_atomic(vto);
 349	dax_unmap_atomic(bdev, &dax);
 350	return 0;
 351}
 352
 353#define NO_SECTOR -1
 354#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
 355
 356static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
 357		sector_t sector, bool pmd_entry, bool dirty)
 
 
 
 
 
 358{
 359	struct radix_tree_root *page_tree = &mapping->page_tree;
 360	pgoff_t pmd_index = DAX_PMD_INDEX(index);
 361	int type, error = 0;
 362	void *entry;
 363
 364	WARN_ON_ONCE(pmd_entry && !dirty);
 365	if (dirty)
 366		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 367
 368	spin_lock_irq(&mapping->tree_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 369
 370	entry = radix_tree_lookup(page_tree, pmd_index);
 371	if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
 372		index = pmd_index;
 373		goto dirty;
 
 
 
 
 
 
 
 
 
 
 
 
 374	}
 375
 376	entry = radix_tree_lookup(page_tree, index);
 377	if (entry) {
 378		type = RADIX_DAX_TYPE(entry);
 379		if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
 380					type != RADIX_DAX_PMD)) {
 381			error = -EIO;
 382			goto unlock;
 383		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 384
 385		if (!pmd_entry || type == RADIX_DAX_PMD)
 386			goto dirty;
 
 
 387
 388		/*
 389		 * We only insert dirty PMD entries into the radix tree.  This
 390		 * means we don't need to worry about removing a dirty PTE
 391		 * entry and inserting a clean PMD entry, thus reducing the
 392		 * range we would flush with a follow-up fsync/msync call.
 393		 */
 394		radix_tree_delete(&mapping->page_tree, index);
 395		mapping->nrexceptional--;
 396	}
 397
 398	if (sector == NO_SECTOR) {
 399		/*
 400		 * This can happen during correct operation if our pfn_mkwrite
 401		 * fault raced against a hole punch operation.  If this
 402		 * happens the pte that was hole punched will have been
 403		 * unmapped and the radix tree entry will have been removed by
 404		 * the time we are called, but the call will still happen.  We
 405		 * will return all the way up to wp_pfn_shared(), where the
 406		 * pte_same() check will fail, eventually causing page fault
 407		 * to be retried by the CPU.
 408		 */
 409		goto unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 410	}
 
 
 411
 412	error = radix_tree_insert(page_tree, index,
 413			RADIX_DAX_ENTRY(sector, pmd_entry));
 414	if (error)
 415		goto unlock;
 
 416
 417	mapping->nrexceptional++;
 418 dirty:
 419	if (dirty)
 420		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
 421 unlock:
 422	spin_unlock_irq(&mapping->tree_lock);
 423	return error;
 424}
 425
 426static int dax_writeback_one(struct block_device *bdev,
 427		struct address_space *mapping, pgoff_t index, void *entry)
 428{
 429	struct radix_tree_root *page_tree = &mapping->page_tree;
 430	int type = RADIX_DAX_TYPE(entry);
 431	struct radix_tree_node *node;
 432	struct blk_dax_ctl dax;
 433	void **slot;
 434	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435
 436	spin_lock_irq(&mapping->tree_lock);
 437	/*
 438	 * Regular page slots are stabilized by the page lock even
 439	 * without the tree itself locked.  These unlocked entries
 440	 * need verification under the tree lock.
 
 
 441	 */
 442	if (!__radix_tree_lookup(page_tree, index, &node, &slot))
 443		goto unlock;
 444	if (*slot != entry)
 445		goto unlock;
 446
 447	/* another fsync thread may have already written back this entry */
 448	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
 449		goto unlock;
 450
 451	if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
 452		ret = -EIO;
 453		goto unlock;
 454	}
 455
 456	dax.sector = RADIX_DAX_SECTOR(entry);
 457	dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
 458	spin_unlock_irq(&mapping->tree_lock);
 459
 
 
 460	/*
 461	 * We cannot hold tree_lock while calling dax_map_atomic() because it
 462	 * eventually calls cond_resched().
 
 
 463	 */
 464	ret = dax_map_atomic(bdev, &dax);
 465	if (ret < 0)
 466		return ret;
 467
 468	if (WARN_ON_ONCE(ret < dax.size)) {
 469		ret = -EIO;
 470		goto unmap;
 471	}
 472
 473	wb_cache_pmem(dax.addr, dax.size);
 474
 475	spin_lock_irq(&mapping->tree_lock);
 476	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
 477	spin_unlock_irq(&mapping->tree_lock);
 478 unmap:
 479	dax_unmap_atomic(bdev, &dax);
 480	return ret;
 481
 482 unlock:
 483	spin_unlock_irq(&mapping->tree_lock);
 484	return ret;
 485}
 486
 487/*
 488 * Flush the mapping to the persistent domain within the byte range of [start,
 489 * end]. This is required by data integrity operations to ensure file data is
 490 * on persistent storage prior to completion of the operation.
 491 */
 492int dax_writeback_mapping_range(struct address_space *mapping,
 493		struct block_device *bdev, struct writeback_control *wbc)
 494{
 
 495	struct inode *inode = mapping->host;
 496	pgoff_t start_index, end_index, pmd_index;
 497	pgoff_t indices[PAGEVEC_SIZE];
 498	struct pagevec pvec;
 499	bool done = false;
 500	int i, ret = 0;
 501	void *entry;
 
 
 502
 503	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
 504		return -EIO;
 505
 506	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
 507		return 0;
 508
 509	start_index = wbc->range_start >> PAGE_SHIFT;
 510	end_index = wbc->range_end >> PAGE_SHIFT;
 511	pmd_index = DAX_PMD_INDEX(start_index);
 512
 513	rcu_read_lock();
 514	entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
 515	rcu_read_unlock();
 516
 517	/* see if the start of our range is covered by a PMD entry */
 518	if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
 519		start_index = pmd_index;
 520
 521	tag_pages_for_writeback(mapping, start_index, end_index);
 522
 523	pagevec_init(&pvec, 0);
 524	while (!done) {
 525		pvec.nr = find_get_entries_tag(mapping, start_index,
 526				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
 527				pvec.pages, indices);
 528
 529		if (pvec.nr == 0)
 
 
 
 
 530			break;
 531
 532		for (i = 0; i < pvec.nr; i++) {
 533			if (indices[i] > end_index) {
 534				done = true;
 535				break;
 536			}
 537
 538			ret = dax_writeback_one(bdev, mapping, indices[i],
 539					pvec.pages[i]);
 540			if (ret < 0)
 541				return ret;
 542		}
 
 
 
 
 
 
 
 543	}
 544	wmb_pmem();
 545	return 0;
 
 546}
 547EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 548
 549static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
 550			struct vm_area_struct *vma, struct vm_fault *vmf)
 551{
 552	unsigned long vaddr = (unsigned long)vmf->virtual_address;
 553	struct address_space *mapping = inode->i_mapping;
 554	struct block_device *bdev = bh->b_bdev;
 555	struct blk_dax_ctl dax = {
 556		.sector = to_sector(bh, inode),
 557		.size = bh->b_size,
 558	};
 559	pgoff_t size;
 560	int error;
 561
 562	i_mmap_lock_read(mapping);
 
 
 
 
 
 
 563
 564	/*
 565	 * Check truncate didn't happen while we were allocating a block.
 566	 * If it did, this block may or may not be still allocated to the
 567	 * file.  We can't tell the filesystem to free it because we can't
 568	 * take i_mutex here.  In the worst case, the file still has blocks
 569	 * allocated past the end of the file.
 570	 */
 571	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 572	if (unlikely(vmf->pgoff >= size)) {
 573		error = -EIO;
 574		goto out;
 575	}
 576
 577	if (dax_map_atomic(bdev, &dax) < 0) {
 578		error = PTR_ERR(dax.addr);
 579		goto out;
 580	}
 581
 582	if (buffer_unwritten(bh) || buffer_new(bh)) {
 583		clear_pmem(dax.addr, PAGE_SIZE);
 584		wmb_pmem();
 585	}
 586	dax_unmap_atomic(bdev, &dax);
 587
 588	error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
 589			vmf->flags & FAULT_FLAG_WRITE);
 590	if (error)
 591		goto out;
 592
 593	error = vm_insert_mixed(vma, vaddr, dax.pfn);
 594
 595 out:
 596	i_mmap_unlock_read(mapping);
 597
 598	return error;
 599}
 600
 601/**
 602 * __dax_fault - handle a page fault on a DAX file
 603 * @vma: The virtual memory area where the fault occurred
 604 * @vmf: The description of the fault
 605 * @get_block: The filesystem method used to translate file offsets to blocks
 606 * @complete_unwritten: The filesystem method used to convert unwritten blocks
 607 *	to written so the data written to them is exposed. This is required for
 608 *	required by write faults for filesystems that will return unwritten
 609 *	extent mappings from @get_block, but it is optional for reads as
 610 *	dax_insert_mapping() will always zero unwritten blocks. If the fs does
 611 *	not support unwritten extents, the it should pass NULL.
 612 *
 613 * When a page fault occurs, filesystems may call this helper in their
 614 * fault handler for DAX files. __dax_fault() assumes the caller has done all
 615 * the necessary locking for the page fault to proceed successfully.
 616 */
 617int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 618			get_block_t get_block, dax_iodone_t complete_unwritten)
 
 619{
 620	struct file *file = vma->vm_file;
 621	struct address_space *mapping = file->f_mapping;
 622	struct inode *inode = mapping->host;
 623	struct page *page;
 624	struct buffer_head bh;
 625	unsigned long vaddr = (unsigned long)vmf->virtual_address;
 626	unsigned blkbits = inode->i_blkbits;
 627	sector_t block;
 628	pgoff_t size;
 629	int error;
 630	int major = 0;
 631
 632	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 633	if (vmf->pgoff >= size)
 634		return VM_FAULT_SIGBUS;
 635
 636	memset(&bh, 0, sizeof(bh));
 637	block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
 638	bh.b_bdev = inode->i_sb->s_bdev;
 639	bh.b_size = PAGE_SIZE;
 640
 641 repeat:
 642	page = find_get_page(mapping, vmf->pgoff);
 643	if (page) {
 644		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
 645			put_page(page);
 646			return VM_FAULT_RETRY;
 647		}
 648		if (unlikely(page->mapping != mapping)) {
 649			unlock_page(page);
 650			put_page(page);
 651			goto repeat;
 652		}
 653		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 654		if (unlikely(vmf->pgoff >= size)) {
 655			/*
 656			 * We have a struct page covering a hole in the file
 657			 * from a read fault and we've raced with a truncate
 658			 */
 659			error = -EIO;
 660			goto unlock_page;
 661		}
 662	}
 663
 664	error = get_block(inode, block, &bh, 0);
 665	if (!error && (bh.b_size < PAGE_SIZE))
 666		error = -EIO;		/* fs corruption? */
 667	if (error)
 668		goto unlock_page;
 
 
 
 
 
 
 
 
 669
 670	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
 671		if (vmf->flags & FAULT_FLAG_WRITE) {
 672			error = get_block(inode, block, &bh, 1);
 673			count_vm_event(PGMAJFAULT);
 674			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
 675			major = VM_FAULT_MAJOR;
 676			if (!error && (bh.b_size < PAGE_SIZE))
 677				error = -EIO;
 678			if (error)
 679				goto unlock_page;
 680		} else {
 681			return dax_load_hole(mapping, page, vmf);
 682		}
 
 683	}
 684
 685	if (vmf->cow_page) {
 686		struct page *new_page = vmf->cow_page;
 687		if (buffer_written(&bh))
 688			error = copy_user_bh(new_page, inode, &bh, vaddr);
 689		else
 690			clear_user_highpage(new_page, vaddr);
 691		if (error)
 692			goto unlock_page;
 693		vmf->page = page;
 694		if (!page) {
 695			i_mmap_lock_read(mapping);
 696			/* Check we didn't race with truncate */
 697			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
 698								PAGE_SHIFT;
 699			if (vmf->pgoff >= size) {
 700				i_mmap_unlock_read(mapping);
 701				error = -EIO;
 702				goto out;
 703			}
 704		}
 705		return VM_FAULT_LOCKED;
 706	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707
 708	/* Check we didn't race with a read fault installing a new page */
 709	if (!page && major)
 710		page = find_lock_page(mapping, vmf->pgoff);
 711
 712	if (page) {
 713		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
 714							PAGE_SIZE, 0);
 715		delete_from_page_cache(page);
 716		unlock_page(page);
 717		put_page(page);
 718		page = NULL;
 719	}
 720
 
 
 
 721	/*
 722	 * If we successfully insert the new mapping over an unwritten extent,
 723	 * we need to ensure we convert the unwritten extent. If there is an
 724	 * error inserting the mapping, the filesystem needs to leave it as
 725	 * unwritten to prevent exposure of the stale underlying data to
 726	 * userspace, but we still need to call the completion function so
 727	 * the private resources on the mapping buffer can be released. We
 728	 * indicate what the callback should do via the uptodate variable, same
 729	 * as for normal BH based IO completions.
 730	 */
 731	error = dax_insert_mapping(inode, &bh, vma, vmf);
 732	if (buffer_unwritten(&bh)) {
 733		if (complete_unwritten)
 734			complete_unwritten(&bh, !error);
 735		else
 736			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
 737	}
 738
 739 out:
 740	if (error == -ENOMEM)
 741		return VM_FAULT_OOM | major;
 742	/* -EBUSY is fine, somebody else faulted on the same PTE */
 743	if ((error < 0) && (error != -EBUSY))
 744		return VM_FAULT_SIGBUS | major;
 745	return VM_FAULT_NOPAGE | major;
 746
 747 unlock_page:
 748	if (page) {
 749		unlock_page(page);
 750		put_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 751	}
 752	goto out;
 
 
 753}
 754EXPORT_SYMBOL(__dax_fault);
 755
 756/**
 757 * dax_fault - handle a page fault on a DAX file
 758 * @vma: The virtual memory area where the fault occurred
 759 * @vmf: The description of the fault
 760 * @get_block: The filesystem method used to translate file offsets to blocks
 761 *
 762 * When a page fault occurs, filesystems may call this helper in their
 763 * fault handler for DAX files.
 
 764 */
 765int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 766	      get_block_t get_block, dax_iodone_t complete_unwritten)
 
 767{
 768	int result;
 769	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
 
 
 
 
 
 
 
 
 
 770
 771	if (vmf->flags & FAULT_FLAG_WRITE) {
 772		sb_start_pagefault(sb);
 773		file_update_time(vma->vm_file);
 774	}
 775	result = __dax_fault(vma, vmf, get_block, complete_unwritten);
 776	if (vmf->flags & FAULT_FLAG_WRITE)
 777		sb_end_pagefault(sb);
 778
 779	return result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 780}
 781EXPORT_SYMBOL_GPL(dax_fault);
 782
 783#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 784/*
 785 * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
 786 * more often than one might expect in the below function.
 787 */
 788#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
 789
 790static void __dax_dbg(struct buffer_head *bh, unsigned long address,
 791		const char *reason, const char *fn)
 792{
 793	if (bh) {
 794		char bname[BDEVNAME_SIZE];
 795		bdevname(bh->b_bdev, bname);
 796		pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
 797			"length %zd fallback: %s\n", fn, current->comm,
 798			address, bname, bh->b_state, (u64)bh->b_blocknr,
 799			bh->b_size, reason);
 800	} else {
 801		pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
 802			current->comm, address, reason);
 803	}
 804}
 805
 806#define dax_pmd_dbg(bh, address, reason)	__dax_dbg(bh, address, reason, "dax_pmd")
 807
 808int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
 809		pmd_t *pmd, unsigned int flags, get_block_t get_block,
 810		dax_iodone_t complete_unwritten)
 811{
 812	struct file *file = vma->vm_file;
 813	struct address_space *mapping = file->f_mapping;
 
 814	struct inode *inode = mapping->host;
 815	struct buffer_head bh;
 816	unsigned blkbits = inode->i_blkbits;
 817	unsigned long pmd_addr = address & PMD_MASK;
 818	bool write = flags & FAULT_FLAG_WRITE;
 819	struct block_device *bdev;
 820	pgoff_t size, pgoff;
 821	sector_t block;
 822	int error, result = 0;
 823	bool alloc = false;
 
 
 824
 825	/* dax pmd mappings require pfn_t_devmap() */
 826	if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
 827		return VM_FAULT_FALLBACK;
 
 
 
 
 
 
 
 828
 829	/* Fall back to PTEs if we're going to COW */
 830	if (write && !(vma->vm_flags & VM_SHARED)) {
 831		split_huge_pmd(vma, pmd, address);
 832		dax_pmd_dbg(NULL, address, "cow write");
 833		return VM_FAULT_FALLBACK;
 
 
 834	}
 835	/* If the PMD would extend outside the VMA */
 836	if (pmd_addr < vma->vm_start) {
 837		dax_pmd_dbg(NULL, address, "vma start unaligned");
 838		return VM_FAULT_FALLBACK;
 
 
 
 
 
 
 839	}
 840	if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
 841		dax_pmd_dbg(NULL, address, "vma end unaligned");
 842		return VM_FAULT_FALLBACK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 843	}
 844
 845	pgoff = linear_page_index(vma, pmd_addr);
 846	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 847	if (pgoff >= size)
 848		return VM_FAULT_SIGBUS;
 849	/* If the PMD would cover blocks out of the file */
 850	if ((pgoff | PG_PMD_COLOUR) >= size) {
 851		dax_pmd_dbg(NULL, address,
 852				"offset + huge page size > file size");
 853		return VM_FAULT_FALLBACK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 854	}
 855
 856	memset(&bh, 0, sizeof(bh));
 857	bh.b_bdev = inode->i_sb->s_bdev;
 858	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
 859
 860	bh.b_size = PMD_SIZE;
 
 
 
 
 
 
 
 
 
 861
 862	if (get_block(inode, block, &bh, 0) != 0)
 863		return VM_FAULT_SIGBUS;
 864
 865	if (!buffer_mapped(&bh) && write) {
 866		if (get_block(inode, block, &bh, 1) != 0)
 867			return VM_FAULT_SIGBUS;
 868		alloc = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 869	}
 870
 871	bdev = bh.b_bdev;
 
 
 
 
 872
 873	/*
 874	 * If the filesystem isn't willing to tell us the length of a hole,
 875	 * just fall back to PTEs.  Calling get_block 512 times in a loop
 876	 * would be silly.
 877	 */
 878	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
 879		dax_pmd_dbg(&bh, address, "allocated block too small");
 880		return VM_FAULT_FALLBACK;
 
 881	}
 
 
 
 
 
 
 882
 883	/*
 884	 * If we allocated new storage, make sure no process has any
 885	 * zero pages covering this hole
 886	 */
 887	if (alloc) {
 888		loff_t lstart = pgoff << PAGE_SHIFT;
 889		loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
 
 
 
 
 
 
 
 
 890
 891		truncate_pagecache_range(inode, lstart, lend);
 
 
 
 
 
 
 
 
 
 
 892	}
 893
 894	i_mmap_lock_read(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 895
 896	/*
 897	 * If a truncate happened while we were allocating blocks, we may
 898	 * leave blocks allocated to the file that are beyond EOF.  We can't
 899	 * take i_mutex here, so just leave them hanging; they'll be freed
 900	 * when the file is deleted.
 901	 */
 902	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 903	if (pgoff >= size) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 904		result = VM_FAULT_SIGBUS;
 905		goto out;
 906	}
 907	if ((pgoff | PG_PMD_COLOUR) >= size) {
 908		dax_pmd_dbg(&bh, address,
 909				"offset + huge page size > file size");
 
 
 
 
 
 
 
 
 
 
 
 910		goto fallback;
 911	}
 912
 913	if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
 914		spinlock_t *ptl;
 915		pmd_t entry;
 916		struct page *zero_page = get_huge_zero_page();
 
 
 
 
 
 
 
 917
 918		if (unlikely(!zero_page)) {
 919			dax_pmd_dbg(&bh, address, "no zero page");
 920			goto fallback;
 921		}
 
 
 
 
 
 
 922
 923		ptl = pmd_lock(vma->vm_mm, pmd);
 924		if (!pmd_none(*pmd)) {
 925			spin_unlock(ptl);
 926			dax_pmd_dbg(&bh, address, "pmd already present");
 927			goto fallback;
 928		}
 929
 930		dev_dbg(part_to_dev(bdev->bd_part),
 931				"%s: %s addr: %lx pfn: <zero> sect: %llx\n",
 932				__func__, current->comm, address,
 933				(unsigned long long) to_sector(&bh, inode));
 934
 935		entry = mk_pmd(zero_page, vma->vm_page_prot);
 936		entry = pmd_mkhuge(entry);
 937		set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
 938		result = VM_FAULT_NOPAGE;
 939		spin_unlock(ptl);
 940	} else {
 941		struct blk_dax_ctl dax = {
 942			.sector = to_sector(&bh, inode),
 943			.size = PMD_SIZE,
 944		};
 945		long length = dax_map_atomic(bdev, &dax);
 946
 947		if (length < 0) {
 948			result = VM_FAULT_SIGBUS;
 949			goto out;
 950		}
 951		if (length < PMD_SIZE) {
 952			dax_pmd_dbg(&bh, address, "dax-length too small");
 953			dax_unmap_atomic(bdev, &dax);
 954			goto fallback;
 955		}
 956		if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
 957			dax_pmd_dbg(&bh, address, "pfn unaligned");
 958			dax_unmap_atomic(bdev, &dax);
 959			goto fallback;
 960		}
 961
 962		if (!pfn_t_devmap(dax.pfn)) {
 963			dax_unmap_atomic(bdev, &dax);
 964			dax_pmd_dbg(&bh, address, "pfn not in memmap");
 965			goto fallback;
 966		}
 967
 968		if (buffer_unwritten(&bh) || buffer_new(&bh)) {
 969			clear_pmem(dax.addr, PMD_SIZE);
 970			wmb_pmem();
 971			count_vm_event(PGMAJFAULT);
 972			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
 973			result |= VM_FAULT_MAJOR;
 974		}
 975		dax_unmap_atomic(bdev, &dax);
 976
 977		/*
 978		 * For PTE faults we insert a radix tree entry for reads, and
 979		 * leave it clean.  Then on the first write we dirty the radix
 980		 * tree entry via the dax_pfn_mkwrite() path.  This sequence
 981		 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
 982		 * call into get_block() to translate the pgoff to a sector in
 983		 * order to be able to create a new radix tree entry.
 984		 *
 985		 * The PMD path doesn't have an equivalent to
 986		 * dax_pfn_mkwrite(), though, so for a read followed by a
 987		 * write we traverse all the way through __dax_pmd_fault()
 988		 * twice.  This means we can just skip inserting a radix tree
 989		 * entry completely on the initial read and just wait until
 990		 * the write to insert a dirty entry.
 991		 */
 992		if (write) {
 993			error = dax_radix_entry(mapping, pgoff, dax.sector,
 994					true, true);
 995			if (error) {
 996				dax_pmd_dbg(&bh, address,
 997						"PMD radix insertion failed");
 998				goto fallback;
 999			}
1000		}
1001
1002		dev_dbg(part_to_dev(bdev->bd_part),
1003				"%s: %s addr: %lx pfn: %lx sect: %llx\n",
1004				__func__, current->comm, address,
1005				pfn_t_to_pfn(dax.pfn),
1006				(unsigned long long) dax.sector);
1007		result |= vmf_insert_pfn_pmd(vma, address, pmd,
1008				dax.pfn, write);
 
 
 
 
 
1009	}
1010
1011 out:
1012	i_mmap_unlock_read(mapping);
1013
1014	if (buffer_unwritten(&bh))
1015		complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
1016
1017	return result;
1018
 
 
 
 
 
 
 
 
 
 
 
 
 
1019 fallback:
1020	count_vm_event(THP_FAULT_FALLBACK);
1021	result = VM_FAULT_FALLBACK;
1022	goto out;
 
 
 
 
1023}
1024EXPORT_SYMBOL_GPL(__dax_pmd_fault);
 
 
 
 
 
 
1025
1026/**
1027 * dax_pmd_fault - handle a PMD fault on a DAX file
1028 * @vma: The virtual memory area where the fault occurred
1029 * @vmf: The description of the fault
1030 * @get_block: The filesystem method used to translate file offsets to blocks
 
 
 
1031 *
1032 * When a page fault occurs, filesystems may call this helper in their
1033 * pmd_fault handler for DAX files.
 
 
1034 */
1035int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1036			pmd_t *pmd, unsigned int flags, get_block_t get_block,
1037			dax_iodone_t complete_unwritten)
1038{
1039	int result;
1040	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
1041
1042	if (flags & FAULT_FLAG_WRITE) {
1043		sb_start_pagefault(sb);
1044		file_update_time(vma->vm_file);
1045	}
1046	result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
1047				complete_unwritten);
1048	if (flags & FAULT_FLAG_WRITE)
1049		sb_end_pagefault(sb);
1050
1051	return result;
1052}
1053EXPORT_SYMBOL_GPL(dax_pmd_fault);
1054#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1055
1056/**
1057 * dax_pfn_mkwrite - handle first write to DAX page
1058 * @vma: The virtual memory area where the fault occurred
1059 * @vmf: The description of the fault
 
 
 
 
 
1060 */
1061int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 
1062{
1063	struct file *file = vma->vm_file;
1064	int error;
1065
1066	/*
1067	 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
1068	 * RADIX_DAX_PTE entry already exists in the radix tree from a
1069	 * previous call to __dax_fault().  We just want to look up that PTE
1070	 * entry using vmf->pgoff and make sure the dirty tag is set.  This
1071	 * saves us from having to make a call to get_block() here to look
1072	 * up the sector.
1073	 */
1074	error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
1075			true);
1076
1077	if (error == -ENOMEM)
1078		return VM_FAULT_OOM;
1079	if (error)
1080		return VM_FAULT_SIGBUS;
1081	return VM_FAULT_NOPAGE;
1082}
1083EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1084
1085/**
1086 * dax_zero_page_range - zero a range within a page of a DAX file
1087 * @inode: The file being truncated
1088 * @from: The file offset that is being truncated to
1089 * @length: The number of bytes to zero
1090 * @get_block: The filesystem method used to translate file offsets to blocks
1091 *
1092 * This function can be called by a filesystem when it is zeroing part of a
1093 * page in a DAX file.  This is intended for hole-punch operations.  If
1094 * you are truncating a file, the helper function dax_truncate_page() may be
1095 * more convenient.
1096 *
1097 * We work in terms of PAGE_SIZE here for commonality with
1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1099 * took care of disposing of the unnecessary blocks.  Even if the filesystem
1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1101 * since the file might be mmapped.
1102 */
1103int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1104							get_block_t get_block)
1105{
1106	struct buffer_head bh;
1107	pgoff_t index = from >> PAGE_SHIFT;
1108	unsigned offset = from & (PAGE_SIZE-1);
1109	int err;
1110
1111	/* Block boundary? Nothing to do */
1112	if (!length)
1113		return 0;
1114	BUG_ON((offset + length) > PAGE_SIZE);
1115
1116	memset(&bh, 0, sizeof(bh));
1117	bh.b_bdev = inode->i_sb->s_bdev;
1118	bh.b_size = PAGE_SIZE;
1119	err = get_block(inode, index, &bh, 0);
1120	if (err < 0)
1121		return err;
1122	if (buffer_written(&bh)) {
1123		struct block_device *bdev = bh.b_bdev;
1124		struct blk_dax_ctl dax = {
1125			.sector = to_sector(&bh, inode),
1126			.size = PAGE_SIZE,
1127		};
1128
1129		if (dax_map_atomic(bdev, &dax) < 0)
1130			return PTR_ERR(dax.addr);
1131		clear_pmem(dax.addr + offset, length);
1132		wmb_pmem();
1133		dax_unmap_atomic(bdev, &dax);
1134	}
1135
1136	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1137}
1138EXPORT_SYMBOL_GPL(dax_zero_page_range);
1139
1140/**
1141 * dax_truncate_page - handle a partial page being truncated in a DAX file
1142 * @inode: The file being truncated
1143 * @from: The file offset that is being truncated to
1144 * @get_block: The filesystem method used to translate file offsets to blocks
1145 *
1146 * Similar to block_truncate_page(), this function can be called by a
1147 * filesystem when it is truncating a DAX file to handle the partial page.
1148 *
1149 * We work in terms of PAGE_SIZE here for commonality with
1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1151 * took care of disposing of the unnecessary blocks.  Even if the filesystem
1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1153 * since the file might be mmapped.
1154 */
1155int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
 
1156{
1157	unsigned length = PAGE_ALIGN(from) - from;
1158	return dax_zero_page_range(inode, from, length, get_block);
 
 
 
 
 
 
 
1159}
1160EXPORT_SYMBOL_GPL(dax_truncate_page);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/dax.c - Direct Access filesystem code
   4 * Copyright (c) 2013-2014 Intel Corporation
   5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/atomic.h>
  10#include <linux/blkdev.h>
  11#include <linux/buffer_head.h>
  12#include <linux/dax.h>
  13#include <linux/fs.h>
  14#include <linux/genhd.h>
  15#include <linux/highmem.h>
  16#include <linux/memcontrol.h>
  17#include <linux/mm.h>
  18#include <linux/mutex.h>
  19#include <linux/pagevec.h>
 
  20#include <linux/sched.h>
  21#include <linux/sched/signal.h>
  22#include <linux/uio.h>
  23#include <linux/vmstat.h>
  24#include <linux/pfn_t.h>
  25#include <linux/sizes.h>
  26#include <linux/mmu_notifier.h>
  27#include <linux/iomap.h>
  28#include <asm/pgalloc.h>
  29
  30#define CREATE_TRACE_POINTS
  31#include <trace/events/fs_dax.h>
  32
  33static inline unsigned int pe_order(enum page_entry_size pe_size)
  34{
  35	if (pe_size == PE_SIZE_PTE)
  36		return PAGE_SHIFT - PAGE_SHIFT;
  37	if (pe_size == PE_SIZE_PMD)
  38		return PMD_SHIFT - PAGE_SHIFT;
  39	if (pe_size == PE_SIZE_PUD)
  40		return PUD_SHIFT - PAGE_SHIFT;
  41	return ~0;
  42}
  43
  44/* We choose 4096 entries - same as per-zone page wait tables */
  45#define DAX_WAIT_TABLE_BITS 12
  46#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
  47
  48/* The 'colour' (ie low bits) within a PMD of a page offset.  */
  49#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
  50#define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
  51
  52/* The order of a PMD entry */
  53#define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
  54
  55static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
  56
  57static int __init init_dax_wait_table(void)
  58{
  59	int i;
  60
  61	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
  62		init_waitqueue_head(wait_table + i);
  63	return 0;
  64}
  65fs_initcall(init_dax_wait_table);
  66
  67/*
  68 * DAX pagecache entries use XArray value entries so they can't be mistaken
  69 * for pages.  We use one bit for locking, one bit for the entry size (PMD)
  70 * and two more to tell us if the entry is a zero page or an empty entry that
  71 * is just used for locking.  In total four special bits.
  72 *
  73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
  74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
  75 * block allocation.
  76 */
  77#define DAX_SHIFT	(4)
  78#define DAX_LOCKED	(1UL << 0)
  79#define DAX_PMD		(1UL << 1)
  80#define DAX_ZERO_PAGE	(1UL << 2)
  81#define DAX_EMPTY	(1UL << 3)
  82
  83static unsigned long dax_to_pfn(void *entry)
  84{
  85	return xa_to_value(entry) >> DAX_SHIFT;
 
 
  86}
  87
  88static void *dax_make_entry(pfn_t pfn, unsigned long flags)
  89{
  90	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
  91}
  92
  93static bool dax_is_locked(void *entry)
  94{
  95	return xa_to_value(entry) & DAX_LOCKED;
  96}
  97
  98static unsigned int dax_entry_order(void *entry)
  99{
 100	if (xa_to_value(entry) & DAX_PMD)
 101		return PMD_ORDER;
 102	return 0;
 103}
 104
 105static unsigned long dax_is_pmd_entry(void *entry)
 106{
 107	return xa_to_value(entry) & DAX_PMD;
 108}
 109
 110static bool dax_is_pte_entry(void *entry)
 111{
 112	return !(xa_to_value(entry) & DAX_PMD);
 113}
 114
 115static int dax_is_zero_entry(void *entry)
 116{
 117	return xa_to_value(entry) & DAX_ZERO_PAGE;
 118}
 119
 120static int dax_is_empty_entry(void *entry)
 121{
 122	return xa_to_value(entry) & DAX_EMPTY;
 123}
 124
 125/*
 126 * true if the entry that was found is of a smaller order than the entry
 127 * we were looking for
 128 */
 129static bool dax_is_conflict(void *entry)
 130{
 131	return entry == XA_RETRY_ENTRY;
 132}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 133
 134/*
 135 * DAX page cache entry locking
 136 */
 137struct exceptional_entry_key {
 138	struct xarray *xa;
 139	pgoff_t entry_start;
 140};
 141
 142struct wait_exceptional_entry_queue {
 143	wait_queue_entry_t wait;
 144	struct exceptional_entry_key key;
 145};
 146
 147static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
 148		void *entry, struct exceptional_entry_key *key)
 149{
 150	unsigned long hash;
 151	unsigned long index = xas->xa_index;
 152
 153	/*
 154	 * If 'entry' is a PMD, align the 'index' that we use for the wait
 155	 * queue to the start of that PMD.  This ensures that all offsets in
 156	 * the range covered by the PMD map to the same bit lock.
 157	 */
 158	if (dax_is_pmd_entry(entry))
 159		index &= ~PG_PMD_COLOUR;
 160	key->xa = xas->xa;
 161	key->entry_start = index;
 162
 163	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
 164	return wait_table + hash;
 165}
 
 166
 167static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
 168		unsigned int mode, int sync, void *keyp)
 
 169{
 170	struct exceptional_entry_key *key = keyp;
 171	struct wait_exceptional_entry_queue *ewait =
 172		container_of(wait, struct wait_exceptional_entry_queue, wait);
 173
 174	if (key->xa != ewait->key.xa ||
 175	    key->entry_start != ewait->key.entry_start)
 176		return 0;
 177	return autoremove_wake_function(wait, mode, sync, NULL);
 178}
 179
 180/*
 181 * @entry may no longer be the entry at the index in the mapping.
 182 * The important information it's conveying is whether the entry at
 183 * this index used to be a PMD entry.
 184 */
 185static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
 186{
 187	struct exceptional_entry_key key;
 188	wait_queue_head_t *wq;
 189
 190	wq = dax_entry_waitqueue(xas, entry, &key);
 191
 192	/*
 193	 * Checking for locked entry and prepare_to_wait_exclusive() happens
 194	 * under the i_pages lock, ditto for entry handling in our callers.
 195	 * So at this point all tasks that could have seen our entry locked
 196	 * must be in the waitqueue and the following check will see them.
 197	 */
 198	if (waitqueue_active(wq))
 199		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 200}
 201
 202/*
 203 * Look up entry in page cache, wait for it to become unlocked if it
 204 * is a DAX entry and return it.  The caller must subsequently call
 205 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
 206 * if it did.  The entry returned may have a larger order than @order.
 207 * If @order is larger than the order of the entry found in i_pages, this
 208 * function returns a dax_is_conflict entry.
 209 *
 210 * Must be called with the i_pages lock held.
 211 */
 212static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
 213{
 214	void *entry;
 215	struct wait_exceptional_entry_queue ewait;
 216	wait_queue_head_t *wq;
 217
 218	init_wait(&ewait.wait);
 219	ewait.wait.func = wake_exceptional_entry_func;
 220
 221	for (;;) {
 222		entry = xas_find_conflict(xas);
 223		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 224			return entry;
 225		if (dax_entry_order(entry) < order)
 226			return XA_RETRY_ENTRY;
 227		if (!dax_is_locked(entry))
 228			return entry;
 229
 230		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 231		prepare_to_wait_exclusive(wq, &ewait.wait,
 232					  TASK_UNINTERRUPTIBLE);
 233		xas_unlock_irq(xas);
 234		xas_reset(xas);
 235		schedule();
 236		finish_wait(wq, &ewait.wait);
 237		xas_lock_irq(xas);
 238	}
 239}
 240
 241/*
 242 * The only thing keeping the address space around is the i_pages lock
 243 * (it's cycled in clear_inode() after removing the entries from i_pages)
 244 * After we call xas_unlock_irq(), we cannot touch xas->xa.
 245 */
 246static void wait_entry_unlocked(struct xa_state *xas, void *entry)
 247{
 248	struct wait_exceptional_entry_queue ewait;
 249	wait_queue_head_t *wq;
 250
 251	init_wait(&ewait.wait);
 252	ewait.wait.func = wake_exceptional_entry_func;
 253
 254	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 255	/*
 256	 * Unlike get_unlocked_entry() there is no guarantee that this
 257	 * path ever successfully retrieves an unlocked entry before an
 258	 * inode dies. Perform a non-exclusive wait in case this path
 259	 * never successfully performs its own wake up.
 260	 */
 261	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
 262	xas_unlock_irq(xas);
 263	schedule();
 264	finish_wait(wq, &ewait.wait);
 265}
 266
 267static void put_unlocked_entry(struct xa_state *xas, void *entry)
 268{
 269	/* If we were the only waiter woken, wake the next one */
 270	if (entry && !dax_is_conflict(entry))
 271		dax_wake_entry(xas, entry, false);
 272}
 273
 274/*
 275 * We used the xa_state to get the entry, but then we locked the entry and
 276 * dropped the xa_lock, so we know the xa_state is stale and must be reset
 277 * before use.
 278 */
 279static void dax_unlock_entry(struct xa_state *xas, void *entry)
 280{
 281	void *old;
 282
 283	BUG_ON(dax_is_locked(entry));
 284	xas_reset(xas);
 285	xas_lock_irq(xas);
 286	old = xas_store(xas, entry);
 287	xas_unlock_irq(xas);
 288	BUG_ON(!dax_is_locked(old));
 289	dax_wake_entry(xas, entry, false);
 290}
 291
 292/*
 293 * Return: The entry stored at this location before it was locked.
 294 */
 295static void *dax_lock_entry(struct xa_state *xas, void *entry)
 296{
 297	unsigned long v = xa_to_value(entry);
 298	return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
 299}
 
 
 
 
 
 300
 301static unsigned long dax_entry_size(void *entry)
 302{
 303	if (dax_is_zero_entry(entry))
 304		return 0;
 305	else if (dax_is_empty_entry(entry))
 306		return 0;
 307	else if (dax_is_pmd_entry(entry))
 308		return PMD_SIZE;
 309	else
 310		return PAGE_SIZE;
 311}
 312
 313static unsigned long dax_end_pfn(void *entry)
 314{
 315	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
 316}
 317
 318/*
 319 * Iterate through all mapped pfns represented by an entry, i.e. skip
 320 * 'empty' and 'zero' entries.
 321 */
 322#define for_each_mapped_pfn(entry, pfn) \
 323	for (pfn = dax_to_pfn(entry); \
 324			pfn < dax_end_pfn(entry); pfn++)
 325
 326/*
 327 * TODO: for reflink+dax we need a way to associate a single page with
 328 * multiple address_space instances at different linear_page_index()
 329 * offsets.
 330 */
 331static void dax_associate_entry(void *entry, struct address_space *mapping,
 332		struct vm_area_struct *vma, unsigned long address)
 333{
 334	unsigned long size = dax_entry_size(entry), pfn, index;
 335	int i = 0;
 336
 337	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 338		return;
 339
 340	index = linear_page_index(vma, address & ~(size - 1));
 341	for_each_mapped_pfn(entry, pfn) {
 342		struct page *page = pfn_to_page(pfn);
 343
 344		WARN_ON_ONCE(page->mapping);
 345		page->mapping = mapping;
 346		page->index = index + i++;
 347	}
 348}
 349
 350static void dax_disassociate_entry(void *entry, struct address_space *mapping,
 351		bool trunc)
 352{
 353	unsigned long pfn;
 354
 355	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 356		return;
 357
 358	for_each_mapped_pfn(entry, pfn) {
 359		struct page *page = pfn_to_page(pfn);
 360
 361		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
 362		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
 363		page->mapping = NULL;
 364		page->index = 0;
 365	}
 366}
 367
 368static struct page *dax_busy_page(void *entry)
 369{
 370	unsigned long pfn;
 371
 372	for_each_mapped_pfn(entry, pfn) {
 373		struct page *page = pfn_to_page(pfn);
 374
 375		if (page_ref_count(page) > 1)
 376			return page;
 377	}
 378	return NULL;
 379}
 380
 381/*
 382 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
 383 * @page: The page whose entry we want to lock
 384 *
 385 * Context: Process context.
 386 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
 387 * not be locked.
 388 */
 389dax_entry_t dax_lock_page(struct page *page)
 390{
 391	XA_STATE(xas, NULL, 0);
 392	void *entry;
 393
 394	/* Ensure page->mapping isn't freed while we look at it */
 395	rcu_read_lock();
 396	for (;;) {
 397		struct address_space *mapping = READ_ONCE(page->mapping);
 398
 399		entry = NULL;
 400		if (!mapping || !dax_mapping(mapping))
 401			break;
 402
 403		/*
 404		 * In the device-dax case there's no need to lock, a
 405		 * struct dev_pagemap pin is sufficient to keep the
 406		 * inode alive, and we assume we have dev_pagemap pin
 407		 * otherwise we would not have a valid pfn_to_page()
 408		 * translation.
 409		 */
 410		entry = (void *)~0UL;
 411		if (S_ISCHR(mapping->host->i_mode))
 412			break;
 413
 414		xas.xa = &mapping->i_pages;
 415		xas_lock_irq(&xas);
 416		if (mapping != page->mapping) {
 417			xas_unlock_irq(&xas);
 418			continue;
 419		}
 420		xas_set(&xas, page->index);
 421		entry = xas_load(&xas);
 422		if (dax_is_locked(entry)) {
 423			rcu_read_unlock();
 424			wait_entry_unlocked(&xas, entry);
 425			rcu_read_lock();
 426			continue;
 427		}
 428		dax_lock_entry(&xas, entry);
 429		xas_unlock_irq(&xas);
 430		break;
 431	}
 432	rcu_read_unlock();
 433	return (dax_entry_t)entry;
 434}
 435
 436void dax_unlock_page(struct page *page, dax_entry_t cookie)
 437{
 438	struct address_space *mapping = page->mapping;
 439	XA_STATE(xas, &mapping->i_pages, page->index);
 440
 441	if (S_ISCHR(mapping->host->i_mode))
 442		return;
 443
 444	dax_unlock_entry(&xas, (void *)cookie);
 445}
 446
 447/*
 448 * Find page cache entry at given index. If it is a DAX entry, return it
 449 * with the entry locked. If the page cache doesn't contain an entry at
 450 * that index, add a locked empty entry.
 451 *
 452 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
 453 * either return that locked entry or will return VM_FAULT_FALLBACK.
 454 * This will happen if there are any PTE entries within the PMD range
 455 * that we are requesting.
 456 *
 457 * We always favor PTE entries over PMD entries. There isn't a flow where we
 458 * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
 459 * insertion will fail if it finds any PTE entries already in the tree, and a
 460 * PTE insertion will cause an existing PMD entry to be unmapped and
 461 * downgraded to PTE entries.  This happens for both PMD zero pages as
 462 * well as PMD empty entries.
 463 *
 464 * The exception to this downgrade path is for PMD entries that have
 465 * real storage backing them.  We will leave these real PMD entries in
 466 * the tree, and PTE writes will simply dirty the entire PMD entry.
 467 *
 468 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 469 * persistent memory the benefit is doubtful. We can add that later if we can
 470 * show it helps.
 471 *
 472 * On error, this function does not return an ERR_PTR.  Instead it returns
 473 * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
 474 * overlap with xarray value entries.
 475 */
 476static void *grab_mapping_entry(struct xa_state *xas,
 477		struct address_space *mapping, unsigned int order)
 478{
 479	unsigned long index = xas->xa_index;
 480	bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
 481	void *entry;
 482
 483retry:
 484	xas_lock_irq(xas);
 485	entry = get_unlocked_entry(xas, order);
 486
 487	if (entry) {
 488		if (dax_is_conflict(entry))
 489			goto fallback;
 490		if (!xa_is_value(entry)) {
 491			xas_set_err(xas, -EIO);
 492			goto out_unlock;
 493		}
 494
 495		if (order == 0) {
 496			if (dax_is_pmd_entry(entry) &&
 497			    (dax_is_zero_entry(entry) ||
 498			     dax_is_empty_entry(entry))) {
 499				pmd_downgrade = true;
 500			}
 
 501		}
 502	}
 503
 504	if (pmd_downgrade) {
 505		/*
 506		 * Make sure 'entry' remains valid while we drop
 507		 * the i_pages lock.
 508		 */
 509		dax_lock_entry(xas, entry);
 
 
 510
 511		/*
 512		 * Besides huge zero pages the only other thing that gets
 513		 * downgraded are empty entries which don't need to be
 514		 * unmapped.
 515		 */
 516		if (dax_is_zero_entry(entry)) {
 517			xas_unlock_irq(xas);
 518			unmap_mapping_pages(mapping,
 519					xas->xa_index & ~PG_PMD_COLOUR,
 520					PG_PMD_NR, false);
 521			xas_reset(xas);
 522			xas_lock_irq(xas);
 523		}
 524
 525		dax_disassociate_entry(entry, mapping, false);
 526		xas_store(xas, NULL);	/* undo the PMD join */
 527		dax_wake_entry(xas, entry, true);
 528		mapping->nrexceptional--;
 529		entry = NULL;
 530		xas_set(xas, index);
 531	}
 532
 533	if (entry) {
 534		dax_lock_entry(xas, entry);
 535	} else {
 536		unsigned long flags = DAX_EMPTY;
 537
 538		if (order > 0)
 539			flags |= DAX_PMD;
 540		entry = dax_make_entry(pfn_to_pfn_t(0), flags);
 541		dax_lock_entry(xas, entry);
 542		if (xas_error(xas))
 543			goto out_unlock;
 544		mapping->nrexceptional++;
 545	}
 546
 547out_unlock:
 548	xas_unlock_irq(xas);
 549	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
 550		goto retry;
 551	if (xas->xa_node == XA_ERROR(-ENOMEM))
 552		return xa_mk_internal(VM_FAULT_OOM);
 553	if (xas_error(xas))
 554		return xa_mk_internal(VM_FAULT_SIGBUS);
 555	return entry;
 556fallback:
 557	xas_unlock_irq(xas);
 558	return xa_mk_internal(VM_FAULT_FALLBACK);
 559}
 560
 561/**
 562 * dax_layout_busy_page - find first pinned page in @mapping
 563 * @mapping: address space to scan for a page with ref count > 1
 
 
 
 
 
 
 564 *
 565 * DAX requires ZONE_DEVICE mapped pages. These pages are never
 566 * 'onlined' to the page allocator so they are considered idle when
 567 * page->count == 1. A filesystem uses this interface to determine if
 568 * any page in the mapping is busy, i.e. for DMA, or other
 569 * get_user_pages() usages.
 570 *
 571 * It is expected that the filesystem is holding locks to block the
 572 * establishment of new mappings in this address_space. I.e. it expects
 573 * to be able to run unmap_mapping_range() and subsequently not race
 574 * mapping_mapped() becoming true.
 575 */
 576struct page *dax_layout_busy_page(struct address_space *mapping)
 577{
 578	XA_STATE(xas, &mapping->i_pages, 0);
 579	void *entry;
 580	unsigned int scanned = 0;
 581	struct page *page = NULL;
 
 
 
 
 
 
 
 
 
 
 582
 583	/*
 584	 * In the 'limited' case get_user_pages() for dax is disabled.
 585	 */
 586	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 587		return NULL;
 588
 589	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
 590		return NULL;
 591
 592	/*
 593	 * If we race get_user_pages_fast() here either we'll see the
 594	 * elevated page count in the iteration and wait, or
 595	 * get_user_pages_fast() will see that the page it took a reference
 596	 * against is no longer mapped in the page tables and bail to the
 597	 * get_user_pages() slow path.  The slow path is protected by
 598	 * pte_lock() and pmd_lock(). New references are not taken without
 599	 * holding those locks, and unmap_mapping_range() will not zero the
 600	 * pte or pmd without holding the respective lock, so we are
 601	 * guaranteed to either see new references or prevent new
 602	 * references from being established.
 603	 */
 604	unmap_mapping_range(mapping, 0, 0, 0);
 605
 606	xas_lock_irq(&xas);
 607	xas_for_each(&xas, entry, ULONG_MAX) {
 608		if (WARN_ON_ONCE(!xa_is_value(entry)))
 609			continue;
 610		if (unlikely(dax_is_locked(entry)))
 611			entry = get_unlocked_entry(&xas, 0);
 612		if (entry)
 613			page = dax_busy_page(entry);
 614		put_unlocked_entry(&xas, entry);
 615		if (page)
 616			break;
 617		if (++scanned % XA_CHECK_SCHED)
 618			continue;
 619
 620		xas_pause(&xas);
 621		xas_unlock_irq(&xas);
 622		cond_resched();
 623		xas_lock_irq(&xas);
 624	}
 625	xas_unlock_irq(&xas);
 626	return page;
 627}
 628EXPORT_SYMBOL_GPL(dax_layout_busy_page);
 629
 630static int __dax_invalidate_entry(struct address_space *mapping,
 631					  pgoff_t index, bool trunc)
 632{
 633	XA_STATE(xas, &mapping->i_pages, index);
 634	int ret = 0;
 635	void *entry;
 636
 637	xas_lock_irq(&xas);
 638	entry = get_unlocked_entry(&xas, 0);
 639	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 640		goto out;
 641	if (!trunc &&
 642	    (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
 643	     xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
 644		goto out;
 645	dax_disassociate_entry(entry, mapping, trunc);
 646	xas_store(&xas, NULL);
 647	mapping->nrexceptional--;
 648	ret = 1;
 649out:
 650	put_unlocked_entry(&xas, entry);
 651	xas_unlock_irq(&xas);
 652	return ret;
 653}
 
 654
 655/*
 656 * Delete DAX entry at @index from @mapping.  Wait for it
 657 * to be unlocked before deleting it.
 
 
 
 
 658 */
 659int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 
 660{
 661	int ret = __dax_invalidate_entry(mapping, index, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 662
 663	/*
 664	 * This gets called from truncate / punch_hole path. As such, the caller
 665	 * must hold locks protecting against concurrent modifications of the
 666	 * page cache (usually fs-private i_mmap_sem for writing). Since the
 667	 * caller has seen a DAX entry for this index, we better find it
 668	 * at that index as well...
 669	 */
 670	WARN_ON_ONCE(!ret);
 671	return ret;
 672}
 673
 674/*
 675 * Invalidate DAX entry if it is clean.
 676 */
 677int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 678				      pgoff_t index)
 679{
 680	return __dax_invalidate_entry(mapping, index, false);
 681}
 682
 683static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev,
 684			     sector_t sector, struct page *to, unsigned long vaddr)
 685{
 686	void *vto, *kaddr;
 687	pgoff_t pgoff;
 688	long rc;
 689	int id;
 690
 691	rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
 692	if (rc)
 693		return rc;
 694
 695	id = dax_read_lock();
 696	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
 697	if (rc < 0) {
 698		dax_read_unlock(id);
 699		return rc;
 700	}
 701	vto = kmap_atomic(to);
 702	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
 703	kunmap_atomic(vto);
 704	dax_read_unlock(id);
 705	return 0;
 706}
 707
 708/*
 709 * By this point grab_mapping_entry() has ensured that we have a locked entry
 710 * of the appropriate size so we don't have to worry about downgrading PMDs to
 711 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 712 * already in the tree, we will skip the insertion and just dirty the PMD as
 713 * appropriate.
 714 */
 715static void *dax_insert_entry(struct xa_state *xas,
 716		struct address_space *mapping, struct vm_fault *vmf,
 717		void *entry, pfn_t pfn, unsigned long flags, bool dirty)
 718{
 719	void *new_entry = dax_make_entry(pfn, flags);
 
 
 
 720
 
 721	if (dirty)
 722		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 723
 724	if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
 725		unsigned long index = xas->xa_index;
 726		/* we are replacing a zero page with block mapping */
 727		if (dax_is_pmd_entry(entry))
 728			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 729					PG_PMD_NR, false);
 730		else /* pte entry */
 731			unmap_mapping_pages(mapping, index, 1, false);
 732	}
 733
 734	xas_reset(xas);
 735	xas_lock_irq(xas);
 736	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 737		void *old;
 738
 739		dax_disassociate_entry(entry, mapping, false);
 740		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
 741		/*
 742		 * Only swap our new entry into the page cache if the current
 743		 * entry is a zero page or an empty entry.  If a normal PTE or
 744		 * PMD entry is already in the cache, we leave it alone.  This
 745		 * means that if we are trying to insert a PTE and the
 746		 * existing entry is a PMD, we will just leave the PMD in the
 747		 * tree and dirty it if necessary.
 748		 */
 749		old = dax_lock_entry(xas, new_entry);
 750		WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
 751					DAX_LOCKED));
 752		entry = new_entry;
 753	} else {
 754		xas_load(xas);	/* Walk the xa_state */
 755	}
 756
 757	if (dirty)
 758		xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
 759
 760	xas_unlock_irq(xas);
 761	return entry;
 762}
 763
 764static inline
 765unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 766{
 767	unsigned long address;
 768
 769	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 770	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
 771	return address;
 772}
 773
 774/* Walk all mappings of a given index of a file and writeprotect them */
 775static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
 776		unsigned long pfn)
 777{
 778	struct vm_area_struct *vma;
 779	pte_t pte, *ptep = NULL;
 780	pmd_t *pmdp = NULL;
 781	spinlock_t *ptl;
 782
 783	i_mmap_lock_read(mapping);
 784	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
 785		struct mmu_notifier_range range;
 786		unsigned long address;
 787
 788		cond_resched();
 789
 790		if (!(vma->vm_flags & VM_SHARED))
 791			continue;
 792
 793		address = pgoff_address(index, vma);
 794
 795		/*
 796		 * Note because we provide range to follow_pte_pmd it will
 797		 * call mmu_notifier_invalidate_range_start() on our behalf
 798		 * before taking any lock.
 
 799		 */
 800		if (follow_pte_pmd(vma->vm_mm, address, &range,
 801				   &ptep, &pmdp, &ptl))
 802			continue;
 803
 
 804		/*
 805		 * No need to call mmu_notifier_invalidate_range() as we are
 806		 * downgrading page table protection not changing it to point
 807		 * to a new page.
 808		 *
 809		 * See Documentation/vm/mmu_notifier.rst
 
 
 
 810		 */
 811		if (pmdp) {
 812#ifdef CONFIG_FS_DAX_PMD
 813			pmd_t pmd;
 814
 815			if (pfn != pmd_pfn(*pmdp))
 816				goto unlock_pmd;
 817			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
 818				goto unlock_pmd;
 819
 820			flush_cache_page(vma, address, pfn);
 821			pmd = pmdp_invalidate(vma, address, pmdp);
 822			pmd = pmd_wrprotect(pmd);
 823			pmd = pmd_mkclean(pmd);
 824			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 825unlock_pmd:
 826#endif
 827			spin_unlock(ptl);
 828		} else {
 829			if (pfn != pte_pfn(*ptep))
 830				goto unlock_pte;
 831			if (!pte_dirty(*ptep) && !pte_write(*ptep))
 832				goto unlock_pte;
 833
 834			flush_cache_page(vma, address, pfn);
 835			pte = ptep_clear_flush(vma, address, ptep);
 836			pte = pte_wrprotect(pte);
 837			pte = pte_mkclean(pte);
 838			set_pte_at(vma->vm_mm, address, ptep, pte);
 839unlock_pte:
 840			pte_unmap_unlock(ptep, ptl);
 841		}
 842
 843		mmu_notifier_invalidate_range_end(&range);
 844	}
 845	i_mmap_unlock_read(mapping);
 846}
 847
 848static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
 849		struct address_space *mapping, void *entry)
 850{
 851	unsigned long pfn, index, count;
 852	long ret = 0;
 853
 854	/*
 855	 * A page got tagged dirty in DAX mapping? Something is seriously
 856	 * wrong.
 857	 */
 858	if (WARN_ON(!xa_is_value(entry)))
 859		return -EIO;
 
 
 860
 861	if (unlikely(dax_is_locked(entry))) {
 862		void *old_entry = entry;
 863
 864		entry = get_unlocked_entry(xas, 0);
 865
 866		/* Entry got punched out / reallocated? */
 867		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 868			goto put_unlocked;
 869		/*
 870		 * Entry got reallocated elsewhere? No need to writeback.
 871		 * We have to compare pfns as we must not bail out due to
 872		 * difference in lockbit or entry type.
 873		 */
 874		if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
 875			goto put_unlocked;
 876		if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 877					dax_is_zero_entry(entry))) {
 878			ret = -EIO;
 879			goto put_unlocked;
 880		}
 881
 882		/* Another fsync thread may have already done this entry */
 883		if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
 884			goto put_unlocked;
 885	}
 886
 887	/* Lock the entry to serialize with page faults */
 888	dax_lock_entry(xas, entry);
 889
 890	/*
 891	 * We can clear the tag now but we have to be careful so that concurrent
 892	 * dax_writeback_one() calls for the same index cannot finish before we
 893	 * actually flush the caches. This is achieved as the calls will look
 894	 * at the entry only under the i_pages lock and once they do that
 895	 * they will see the entry locked and wait for it to unlock.
 896	 */
 897	xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
 898	xas_unlock_irq(xas);
 899
 
 900	/*
 901	 * If dax_writeback_mapping_range() was given a wbc->range_start
 902	 * in the middle of a PMD, the 'index' we use needs to be
 903	 * aligned to the start of the PMD.
 904	 * This allows us to flush for PMD_SIZE and not have to worry about
 905	 * partial PMD writebacks.
 906	 */
 907	pfn = dax_to_pfn(entry);
 908	count = 1UL << dax_entry_order(entry);
 909	index = xas->xa_index & ~(count - 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 910
 911	dax_entry_mkclean(mapping, index, pfn);
 912	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
 913	/*
 914	 * After we have flushed the cache, we can clear the dirty tag. There
 915	 * cannot be new dirty data in the pfn after the flush has completed as
 916	 * the pfn mappings are writeprotected and fault waits for mapping
 917	 * entry lock.
 918	 */
 919	xas_reset(xas);
 920	xas_lock_irq(xas);
 921	xas_store(xas, entry);
 922	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
 923	dax_wake_entry(xas, entry, false);
 924
 925	trace_dax_writeback_one(mapping->host, index, count);
 
 
 
 
 
 
 
 
 
 926	return ret;
 927
 928 put_unlocked:
 929	put_unlocked_entry(xas, entry);
 930	return ret;
 931}
 932
 933/*
 934 * Flush the mapping to the persistent domain within the byte range of [start,
 935 * end]. This is required by data integrity operations to ensure file data is
 936 * on persistent storage prior to completion of the operation.
 937 */
 938int dax_writeback_mapping_range(struct address_space *mapping,
 939		struct dax_device *dax_dev, struct writeback_control *wbc)
 940{
 941	XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
 942	struct inode *inode = mapping->host;
 943	pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
 
 
 
 
 944	void *entry;
 945	int ret = 0;
 946	unsigned int scanned = 0;
 947
 948	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
 949		return -EIO;
 950
 951	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
 952		return 0;
 953
 954	trace_dax_writeback_range(inode, xas.xa_index, end_index);
 
 
 955
 956	tag_pages_for_writeback(mapping, xas.xa_index, end_index);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 957
 958	xas_lock_irq(&xas);
 959	xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
 960		ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
 961		if (ret < 0) {
 962			mapping_set_error(mapping, ret);
 963			break;
 
 
 
 
 
 
 
 
 
 
 
 964		}
 965		if (++scanned % XA_CHECK_SCHED)
 966			continue;
 967
 968		xas_pause(&xas);
 969		xas_unlock_irq(&xas);
 970		cond_resched();
 971		xas_lock_irq(&xas);
 972	}
 973	xas_unlock_irq(&xas);
 974	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
 975	return ret;
 976}
 977EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 978
 979static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 
 980{
 981	return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
 982}
 
 
 
 
 
 
 
 983
 984static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
 985			 pfn_t *pfnp)
 986{
 987	const sector_t sector = dax_iomap_sector(iomap, pos);
 988	pgoff_t pgoff;
 989	int id, rc;
 990	long length;
 991
 992	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
 993	if (rc)
 994		return rc;
 995	id = dax_read_lock();
 996	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
 997				   NULL, pfnp);
 998	if (length < 0) {
 999		rc = length;
 
 
1000		goto out;
1001	}
1002	rc = -EINVAL;
1003	if (PFN_PHYS(length) < size)
 
1004		goto out;
1005	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
 
 
 
 
 
 
 
 
 
 
1006		goto out;
1007	/* For larger pages we need devmap */
1008	if (length > 1 && !pfn_t_devmap(*pfnp))
1009		goto out;
1010	rc = 0;
1011out:
1012	dax_read_unlock(id);
1013	return rc;
1014}
1015
1016/*
1017 * The user has performed a load from a hole in the file.  Allocating a new
1018 * page in the file would cause excessive storage usage for workloads with
1019 * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1020 * If this page is ever written to we will re-fault and change the mapping to
1021 * point to real DAX storage instead.
 
 
 
 
 
 
 
 
 
1022 */
1023static vm_fault_t dax_load_hole(struct xa_state *xas,
1024		struct address_space *mapping, void **entry,
1025		struct vm_fault *vmf)
1026{
 
 
1027	struct inode *inode = mapping->host;
1028	unsigned long vaddr = vmf->address;
1029	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1030	vm_fault_t ret;
 
 
 
 
 
1031
1032	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1033			DAX_ZERO_PAGE, false);
 
1034
1035	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1036	trace_dax_load_hole(inode, vmf, ret);
1037	return ret;
1038}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
1040int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size,
1041		   struct iomap *iomap)
1042{
1043	sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
1044	pgoff_t pgoff;
1045	long rc, id;
1046	void *kaddr;
1047	bool page_aligned = false;
1048
1049
1050	if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
1051	    IS_ALIGNED(size, PAGE_SIZE))
1052		page_aligned = true;
1053
1054	rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
1055	if (rc)
1056		return rc;
1057
1058	id = dax_read_lock();
1059
1060	if (page_aligned)
1061		rc = dax_zero_page_range(iomap->dax_dev, pgoff,
1062					 size >> PAGE_SHIFT);
1063	else
1064		rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
1065	if (rc < 0) {
1066		dax_read_unlock(id);
1067		return rc;
1068	}
1069
1070	if (!page_aligned) {
1071		memset(kaddr + offset, 0, size);
1072		dax_flush(iomap->dax_dev, kaddr + offset, size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073	}
1074	dax_read_unlock(id);
1075	return 0;
1076}
1077
1078static loff_t
1079dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1080		struct iomap *iomap, struct iomap *srcmap)
1081{
1082	struct block_device *bdev = iomap->bdev;
1083	struct dax_device *dax_dev = iomap->dax_dev;
1084	struct iov_iter *iter = data;
1085	loff_t end = pos + length, done = 0;
1086	ssize_t ret = 0;
1087	size_t xfer;
1088	int id;
1089
1090	if (iov_iter_rw(iter) == READ) {
1091		end = min(end, i_size_read(inode));
1092		if (pos >= end)
1093			return 0;
1094
1095		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1096			return iov_iter_zero(min(length, end - pos), iter);
 
 
 
 
 
 
 
 
 
1097	}
1098
1099	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1100		return -EIO;
1101
1102	/*
1103	 * Write can allocate block for an area which has a hole page mapped
1104	 * into page tables. We have to tear down these mappings so that data
1105	 * written by write(2) is visible in mmap.
 
 
 
 
 
1106	 */
1107	if (iomap->flags & IOMAP_F_NEW) {
1108		invalidate_inode_pages2_range(inode->i_mapping,
1109					      pos >> PAGE_SHIFT,
1110					      (end - 1) >> PAGE_SHIFT);
 
 
1111	}
1112
1113	id = dax_read_lock();
1114	while (pos < end) {
1115		unsigned offset = pos & (PAGE_SIZE - 1);
1116		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1117		const sector_t sector = dax_iomap_sector(iomap, pos);
1118		ssize_t map_len;
1119		pgoff_t pgoff;
1120		void *kaddr;
1121
1122		if (fatal_signal_pending(current)) {
1123			ret = -EINTR;
1124			break;
1125		}
1126
1127		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1128		if (ret)
1129			break;
1130
1131		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1132				&kaddr, NULL);
1133		if (map_len < 0) {
1134			ret = map_len;
1135			break;
1136		}
1137
1138		map_len = PFN_PHYS(map_len);
1139		kaddr += offset;
1140		map_len -= offset;
1141		if (map_len > end - pos)
1142			map_len = end - pos;
1143
1144		/*
1145		 * The userspace address for the memory copy has already been
1146		 * validated via access_ok() in either vfs_read() or
1147		 * vfs_write(), depending on which operation we are doing.
1148		 */
1149		if (iov_iter_rw(iter) == WRITE)
1150			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1151					map_len, iter);
1152		else
1153			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1154					map_len, iter);
1155
1156		pos += xfer;
1157		length -= xfer;
1158		done += xfer;
1159
1160		if (xfer == 0)
1161			ret = -EFAULT;
1162		if (xfer < map_len)
1163			break;
1164	}
1165	dax_read_unlock(id);
1166
1167	return done ? done : ret;
1168}
 
1169
1170/**
1171 * dax_iomap_rw - Perform I/O to a DAX file
1172 * @iocb:	The control block for this I/O
1173 * @iter:	The addresses to do I/O from or to
1174 * @ops:	iomap ops passed from the file system
1175 *
1176 * This function performs read and write operations to directly mapped
1177 * persistent memory.  The callers needs to take care of read/write exclusion
1178 * and evicting any page cache pages in the region under I/O.
1179 */
1180ssize_t
1181dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1182		const struct iomap_ops *ops)
1183{
1184	struct address_space *mapping = iocb->ki_filp->f_mapping;
1185	struct inode *inode = mapping->host;
1186	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1187	unsigned flags = 0;
1188
1189	if (iov_iter_rw(iter) == WRITE) {
1190		lockdep_assert_held_write(&inode->i_rwsem);
1191		flags |= IOMAP_WRITE;
1192	} else {
1193		lockdep_assert_held(&inode->i_rwsem);
1194	}
1195
1196	if (iocb->ki_flags & IOCB_NOWAIT)
1197		flags |= IOMAP_NOWAIT;
 
 
 
 
 
1198
1199	while (iov_iter_count(iter)) {
1200		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1201				iter, dax_iomap_actor);
1202		if (ret <= 0)
1203			break;
1204		pos += ret;
1205		done += ret;
1206	}
1207
1208	iocb->ki_pos += done;
1209	return done ? done : ret;
1210}
1211EXPORT_SYMBOL_GPL(dax_iomap_rw);
1212
1213static vm_fault_t dax_fault_return(int error)
1214{
1215	if (error == 0)
1216		return VM_FAULT_NOPAGE;
1217	return vmf_error(error);
1218}
 
1219
 
1220/*
1221 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1222 * flushed on write-faults (non-cow), but not read-faults.
1223 */
1224static bool dax_fault_is_synchronous(unsigned long flags,
1225		struct vm_area_struct *vma, struct iomap *iomap)
 
 
1226{
1227	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1228		&& (iomap->flags & IOMAP_F_DIRTY);
 
 
 
 
 
 
 
 
 
1229}
1230
1231static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1232			       int *iomap_errp, const struct iomap_ops *ops)
 
 
 
1233{
1234	struct vm_area_struct *vma = vmf->vma;
1235	struct address_space *mapping = vma->vm_file->f_mapping;
1236	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1237	struct inode *inode = mapping->host;
1238	unsigned long vaddr = vmf->address;
1239	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1240	struct iomap iomap = { .type = IOMAP_HOLE };
1241	struct iomap srcmap = { .type = IOMAP_HOLE };
1242	unsigned flags = IOMAP_FAULT;
1243	int error, major = 0;
1244	bool write = vmf->flags & FAULT_FLAG_WRITE;
1245	bool sync;
1246	vm_fault_t ret = 0;
1247	void *entry;
1248	pfn_t pfn;
1249
1250	trace_dax_pte_fault(inode, vmf, ret);
1251	/*
1252	 * Check whether offset isn't beyond end of file now. Caller is supposed
1253	 * to hold locks serializing us with truncate / punch hole so this is
1254	 * a reliable test.
1255	 */
1256	if (pos >= i_size_read(inode)) {
1257		ret = VM_FAULT_SIGBUS;
1258		goto out;
1259	}
1260
1261	if (write && !vmf->cow_page)
1262		flags |= IOMAP_WRITE;
1263
1264	entry = grab_mapping_entry(&xas, mapping, 0);
1265	if (xa_is_internal(entry)) {
1266		ret = xa_to_internal(entry);
1267		goto out;
1268	}
1269
1270	/*
1271	 * It is possible, particularly with mixed reads & writes to private
1272	 * mappings, that we have raced with a PMD fault that overlaps with
1273	 * the PTE we need to set up.  If so just return and the fault will be
1274	 * retried.
1275	 */
1276	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1277		ret = VM_FAULT_NOPAGE;
1278		goto unlock_entry;
1279	}
1280
1281	/*
1282	 * Note that we don't bother to use iomap_apply here: DAX required
1283	 * the file system block size to be equal the page size, which means
1284	 * that we never have to deal with more than a single extent here.
1285	 */
1286	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap);
1287	if (iomap_errp)
1288		*iomap_errp = error;
1289	if (error) {
1290		ret = dax_fault_return(error);
1291		goto unlock_entry;
1292	}
1293	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1294		error = -EIO;	/* fs corruption? */
1295		goto error_finish_iomap;
1296	}
1297
1298	if (vmf->cow_page) {
1299		sector_t sector = dax_iomap_sector(&iomap, pos);
1300
1301		switch (iomap.type) {
1302		case IOMAP_HOLE:
1303		case IOMAP_UNWRITTEN:
1304			clear_user_highpage(vmf->cow_page, vaddr);
1305			break;
1306		case IOMAP_MAPPED:
1307			error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
1308						  sector, vmf->cow_page, vaddr);
1309			break;
1310		default:
1311			WARN_ON_ONCE(1);
1312			error = -EIO;
1313			break;
1314		}
1315
1316		if (error)
1317			goto error_finish_iomap;
1318
1319		__SetPageUptodate(vmf->cow_page);
1320		ret = finish_fault(vmf);
1321		if (!ret)
1322			ret = VM_FAULT_DONE_COW;
1323		goto finish_iomap;
1324	}
1325
1326	sync = dax_fault_is_synchronous(flags, vma, &iomap);
 
 
1327
1328	switch (iomap.type) {
1329	case IOMAP_MAPPED:
1330		if (iomap.flags & IOMAP_F_NEW) {
1331			count_vm_event(PGMAJFAULT);
1332			count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1333			major = VM_FAULT_MAJOR;
1334		}
1335		error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1336		if (error < 0)
1337			goto error_finish_iomap;
1338
1339		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1340						 0, write && !sync);
1341
1342		/*
1343		 * If we are doing synchronous page fault and inode needs fsync,
1344		 * we can insert PTE into page tables only after that happens.
1345		 * Skip insertion for now and return the pfn so that caller can
1346		 * insert it after fsync is done.
1347		 */
1348		if (sync) {
1349			if (WARN_ON_ONCE(!pfnp)) {
1350				error = -EIO;
1351				goto error_finish_iomap;
1352			}
1353			*pfnp = pfn;
1354			ret = VM_FAULT_NEEDDSYNC | major;
1355			goto finish_iomap;
1356		}
1357		trace_dax_insert_mapping(inode, vmf, entry);
1358		if (write)
1359			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1360		else
1361			ret = vmf_insert_mixed(vma, vaddr, pfn);
1362
1363		goto finish_iomap;
1364	case IOMAP_UNWRITTEN:
1365	case IOMAP_HOLE:
1366		if (!write) {
1367			ret = dax_load_hole(&xas, mapping, &entry, vmf);
1368			goto finish_iomap;
1369		}
1370		fallthrough;
1371	default:
1372		WARN_ON_ONCE(1);
1373		error = -EIO;
1374		break;
1375	}
1376
1377 error_finish_iomap:
1378	ret = dax_fault_return(error);
1379 finish_iomap:
1380	if (ops->iomap_end) {
1381		int copied = PAGE_SIZE;
1382
1383		if (ret & VM_FAULT_ERROR)
1384			copied = 0;
1385		/*
1386		 * The fault is done by now and there's no way back (other
1387		 * thread may be already happily using PTE we have installed).
1388		 * Just ignore error from ->iomap_end since we cannot do much
1389		 * with it.
1390		 */
1391		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1392	}
1393 unlock_entry:
1394	dax_unlock_entry(&xas, entry);
1395 out:
1396	trace_dax_pte_fault_done(inode, vmf, ret);
1397	return ret | major;
1398}
1399
1400#ifdef CONFIG_FS_DAX_PMD
1401static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1402		struct iomap *iomap, void **entry)
1403{
1404	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1405	unsigned long pmd_addr = vmf->address & PMD_MASK;
1406	struct vm_area_struct *vma = vmf->vma;
1407	struct inode *inode = mapping->host;
1408	pgtable_t pgtable = NULL;
1409	struct page *zero_page;
1410	spinlock_t *ptl;
1411	pmd_t pmd_entry;
1412	pfn_t pfn;
1413
1414	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1415
1416	if (unlikely(!zero_page))
1417		goto fallback;
1418
1419	pfn = page_to_pfn_t(zero_page);
1420	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1421			DAX_PMD | DAX_ZERO_PAGE, false);
1422
1423	if (arch_needs_pgtable_deposit()) {
1424		pgtable = pte_alloc_one(vma->vm_mm);
1425		if (!pgtable)
1426			return VM_FAULT_OOM;
1427	}
1428
1429	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1430	if (!pmd_none(*(vmf->pmd))) {
1431		spin_unlock(ptl);
1432		goto fallback;
1433	}
1434
1435	if (pgtable) {
1436		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1437		mm_inc_nr_ptes(vma->vm_mm);
1438	}
1439	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1440	pmd_entry = pmd_mkhuge(pmd_entry);
1441	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1442	spin_unlock(ptl);
1443	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1444	return VM_FAULT_NOPAGE;
1445
1446fallback:
1447	if (pgtable)
1448		pte_free(vma->vm_mm, pgtable);
1449	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1450	return VM_FAULT_FALLBACK;
1451}
1452
1453static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1454			       const struct iomap_ops *ops)
1455{
1456	struct vm_area_struct *vma = vmf->vma;
1457	struct address_space *mapping = vma->vm_file->f_mapping;
1458	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1459	unsigned long pmd_addr = vmf->address & PMD_MASK;
1460	bool write = vmf->flags & FAULT_FLAG_WRITE;
1461	bool sync;
1462	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1463	struct inode *inode = mapping->host;
1464	vm_fault_t result = VM_FAULT_FALLBACK;
1465	struct iomap iomap = { .type = IOMAP_HOLE };
1466	struct iomap srcmap = { .type = IOMAP_HOLE };
1467	pgoff_t max_pgoff;
1468	void *entry;
1469	loff_t pos;
1470	int error;
1471	pfn_t pfn;
1472
1473	/*
1474	 * Check whether offset isn't beyond end of file now. Caller is
1475	 * supposed to hold locks serializing us with truncate / punch hole so
1476	 * this is a reliable test.
 
1477	 */
1478	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1479
1480	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1481
1482	/*
1483	 * Make sure that the faulting address's PMD offset (color) matches
1484	 * the PMD offset from the start of the file.  This is necessary so
1485	 * that a PMD range in the page table overlaps exactly with a PMD
1486	 * range in the page cache.
1487	 */
1488	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1489	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1490		goto fallback;
1491
1492	/* Fall back to PTEs if we're going to COW */
1493	if (write && !(vma->vm_flags & VM_SHARED))
1494		goto fallback;
1495
1496	/* If the PMD would extend outside the VMA */
1497	if (pmd_addr < vma->vm_start)
1498		goto fallback;
1499	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1500		goto fallback;
1501
1502	if (xas.xa_index >= max_pgoff) {
1503		result = VM_FAULT_SIGBUS;
1504		goto out;
1505	}
1506
1507	/* If the PMD would extend beyond the file size */
1508	if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
1509		goto fallback;
1510
1511	/*
1512	 * grab_mapping_entry() will make sure we get an empty PMD entry,
1513	 * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1514	 * entry is already in the array, for instance), it will return
1515	 * VM_FAULT_FALLBACK.
1516	 */
1517	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1518	if (xa_is_internal(entry)) {
1519		result = xa_to_internal(entry);
1520		goto fallback;
1521	}
1522
1523	/*
1524	 * It is possible, particularly with mixed reads & writes to private
1525	 * mappings, that we have raced with a PTE fault that overlaps with
1526	 * the PMD we need to set up.  If so just return and the fault will be
1527	 * retried.
1528	 */
1529	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1530			!pmd_devmap(*vmf->pmd)) {
1531		result = 0;
1532		goto unlock_entry;
1533	}
1534
1535	/*
1536	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1537	 * setting up a mapping, so really we're using iomap_begin() as a way
1538	 * to look up our filesystem block.
1539	 */
1540	pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1541	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap,
1542			&srcmap);
1543	if (error)
1544		goto unlock_entry;
1545
1546	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1547		goto finish_iomap;
 
 
 
 
1548
1549	sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1550
1551	switch (iomap.type) {
1552	case IOMAP_MAPPED:
1553		error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1554		if (error < 0)
1555			goto finish_iomap;
1556
1557		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1558						DAX_PMD, write && !sync);
 
 
 
 
 
 
1559
1560		/*
1561		 * If we are doing synchronous page fault and inode needs fsync,
1562		 * we can insert PMD into page tables only after that happens.
1563		 * Skip insertion for now and return the pfn so that caller can
1564		 * insert it after fsync is done.
 
 
 
 
 
 
 
 
 
1565		 */
1566		if (sync) {
1567			if (WARN_ON_ONCE(!pfnp))
1568				goto finish_iomap;
1569			*pfnp = pfn;
1570			result = VM_FAULT_NEEDDSYNC;
1571			goto finish_iomap;
 
 
1572		}
1573
1574		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1575		result = vmf_insert_pfn_pmd(vmf, pfn, write);
1576		break;
1577	case IOMAP_UNWRITTEN:
1578	case IOMAP_HOLE:
1579		if (WARN_ON_ONCE(write))
1580			break;
1581		result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
1582		break;
1583	default:
1584		WARN_ON_ONCE(1);
1585		break;
1586	}
1587
1588 finish_iomap:
1589	if (ops->iomap_end) {
1590		int copied = PMD_SIZE;
 
 
 
 
1591
1592		if (result == VM_FAULT_FALLBACK)
1593			copied = 0;
1594		/*
1595		 * The fault is done by now and there's no way back (other
1596		 * thread may be already happily using PMD we have installed).
1597		 * Just ignore error from ->iomap_end since we cannot do much
1598		 * with it.
1599		 */
1600		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1601				&iomap);
1602	}
1603 unlock_entry:
1604	dax_unlock_entry(&xas, entry);
1605 fallback:
1606	if (result == VM_FAULT_FALLBACK) {
1607		split_huge_pmd(vma, vmf->pmd, vmf->address);
1608		count_vm_event(THP_FAULT_FALLBACK);
1609	}
1610out:
1611	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1612	return result;
1613}
1614#else
1615static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1616			       const struct iomap_ops *ops)
1617{
1618	return VM_FAULT_FALLBACK;
1619}
1620#endif /* CONFIG_FS_DAX_PMD */
1621
1622/**
1623 * dax_iomap_fault - handle a page fault on a DAX file
 
1624 * @vmf: The description of the fault
1625 * @pe_size: Size of the page to fault in
1626 * @pfnp: PFN to insert for synchronous faults if fsync is required
1627 * @iomap_errp: Storage for detailed error code in case of error
1628 * @ops: Iomap ops passed from the file system
1629 *
1630 * When a page fault occurs, filesystems may call this helper in
1631 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1632 * has done all the necessary locking for page fault to proceed
1633 * successfully.
1634 */
1635vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1636		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1637{
1638	switch (pe_size) {
1639	case PE_SIZE_PTE:
1640		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1641	case PE_SIZE_PMD:
1642		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1643	default:
1644		return VM_FAULT_FALLBACK;
1645	}
 
 
 
 
 
 
1646}
1647EXPORT_SYMBOL_GPL(dax_iomap_fault);
 
1648
1649/*
1650 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
 
1651 * @vmf: The description of the fault
1652 * @pfn: PFN to insert
1653 * @order: Order of entry to insert.
1654 *
1655 * This function inserts a writeable PTE or PMD entry into the page tables
1656 * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1657 */
1658static vm_fault_t
1659dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1660{
1661	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1662	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1663	void *entry;
1664	vm_fault_t ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1665
1666	xas_lock_irq(&xas);
1667	entry = get_unlocked_entry(&xas, order);
1668	/* Did we race with someone splitting entry or so? */
1669	if (!entry || dax_is_conflict(entry) ||
1670	    (order == 0 && !dax_is_pte_entry(entry))) {
1671		put_unlocked_entry(&xas, entry);
1672		xas_unlock_irq(&xas);
1673		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1674						      VM_FAULT_NOPAGE);
1675		return VM_FAULT_NOPAGE;
1676	}
1677	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1678	dax_lock_entry(&xas, entry);
1679	xas_unlock_irq(&xas);
1680	if (order == 0)
1681		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1682#ifdef CONFIG_FS_DAX_PMD
1683	else if (order == PMD_ORDER)
1684		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1685#endif
1686	else
1687		ret = VM_FAULT_FALLBACK;
1688	dax_unlock_entry(&xas, entry);
1689	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1690	return ret;
1691}
 
1692
1693/**
1694 * dax_finish_sync_fault - finish synchronous page fault
1695 * @vmf: The description of the fault
1696 * @pe_size: Size of entry to be inserted
1697 * @pfn: PFN to insert
 
 
 
1698 *
1699 * This function ensures that the file range touched by the page fault is
1700 * stored persistently on the media and handles inserting of appropriate page
1701 * table entry.
 
 
1702 */
1703vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1704		enum page_entry_size pe_size, pfn_t pfn)
1705{
1706	int err;
1707	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1708	unsigned int order = pe_order(pe_size);
1709	size_t len = PAGE_SIZE << order;
1710
1711	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1712	if (err)
1713		return VM_FAULT_SIGBUS;
1714	return dax_insert_pfn_mkwrite(vmf, pfn, order);
1715}
1716EXPORT_SYMBOL_GPL(dax_finish_sync_fault);