Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * fs/dax.c - Direct Access filesystem code
   3 * Copyright (c) 2013-2014 Intel Corporation
   4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 */
  16
  17#include <linux/atomic.h>
  18#include <linux/blkdev.h>
  19#include <linux/buffer_head.h>
  20#include <linux/dax.h>
  21#include <linux/fs.h>
  22#include <linux/genhd.h>
  23#include <linux/highmem.h>
  24#include <linux/memcontrol.h>
  25#include <linux/mm.h>
  26#include <linux/mutex.h>
  27#include <linux/pagevec.h>
  28#include <linux/pmem.h>
  29#include <linux/sched.h>
  30#include <linux/uio.h>
  31#include <linux/vmstat.h>
  32#include <linux/pfn_t.h>
  33#include <linux/sizes.h>
  34
  35static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
  36{
  37	struct request_queue *q = bdev->bd_queue;
  38	long rc = -EIO;
  39
  40	dax->addr = (void __pmem *) ERR_PTR(-EIO);
  41	if (blk_queue_enter(q, true) != 0)
  42		return rc;
  43
  44	rc = bdev_direct_access(bdev, dax);
  45	if (rc < 0) {
  46		dax->addr = (void __pmem *) ERR_PTR(rc);
  47		blk_queue_exit(q);
  48		return rc;
  49	}
  50	return rc;
  51}
  52
  53static void dax_unmap_atomic(struct block_device *bdev,
  54		const struct blk_dax_ctl *dax)
  55{
  56	if (IS_ERR(dax->addr))
  57		return;
  58	blk_queue_exit(bdev->bd_queue);
  59}
  60
  61struct page *read_dax_sector(struct block_device *bdev, sector_t n)
  62{
  63	struct page *page = alloc_pages(GFP_KERNEL, 0);
  64	struct blk_dax_ctl dax = {
  65		.size = PAGE_SIZE,
  66		.sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
  67	};
  68	long rc;
  69
  70	if (!page)
  71		return ERR_PTR(-ENOMEM);
  72
  73	rc = dax_map_atomic(bdev, &dax);
  74	if (rc < 0)
  75		return ERR_PTR(rc);
  76	memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
  77	dax_unmap_atomic(bdev, &dax);
  78	return page;
  79}
  80
  81/*
  82 * dax_clear_sectors() is called from within transaction context from XFS,
  83 * and hence this means the stack from this point must follow GFP_NOFS
  84 * semantics for all operations.
  85 */
  86int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
  87{
  88	struct blk_dax_ctl dax = {
  89		.sector = _sector,
  90		.size = _size,
  91	};
  92
  93	might_sleep();
  94	do {
  95		long count, sz;
  96
  97		count = dax_map_atomic(bdev, &dax);
  98		if (count < 0)
  99			return count;
 100		sz = min_t(long, count, SZ_128K);
 101		clear_pmem(dax.addr, sz);
 102		dax.size -= sz;
 103		dax.sector += sz / 512;
 104		dax_unmap_atomic(bdev, &dax);
 105		cond_resched();
 106	} while (dax.size);
 107
 108	wmb_pmem();
 109	return 0;
 110}
 111EXPORT_SYMBOL_GPL(dax_clear_sectors);
 112
 113/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
 114static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
 115		loff_t pos, loff_t end)
 116{
 117	loff_t final = end - pos + first; /* The final byte of the buffer */
 118
 119	if (first > 0)
 120		clear_pmem(addr, first);
 121	if (final < size)
 122		clear_pmem(addr + final, size - final);
 123}
 124
 125static bool buffer_written(struct buffer_head *bh)
 126{
 127	return buffer_mapped(bh) && !buffer_unwritten(bh);
 128}
 129
 130/*
 131 * When ext4 encounters a hole, it returns without modifying the buffer_head
 132 * which means that we can't trust b_size.  To cope with this, we set b_state
 133 * to 0 before calling get_block and, if any bit is set, we know we can trust
 134 * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
 135 * and would save us time calling get_block repeatedly.
 136 */
 137static bool buffer_size_valid(struct buffer_head *bh)
 138{
 139	return bh->b_state != 0;
 140}
 141
 142
 143static sector_t to_sector(const struct buffer_head *bh,
 144		const struct inode *inode)
 145{
 146	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
 147
 148	return sector;
 149}
 150
 151static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
 152		      loff_t start, loff_t end, get_block_t get_block,
 153		      struct buffer_head *bh)
 154{
 155	loff_t pos = start, max = start, bh_max = start;
 156	bool hole = false, need_wmb = false;
 157	struct block_device *bdev = NULL;
 158	int rw = iov_iter_rw(iter), rc;
 159	long map_len = 0;
 160	struct blk_dax_ctl dax = {
 161		.addr = (void __pmem *) ERR_PTR(-EIO),
 162	};
 163
 164	if (rw == READ)
 165		end = min(end, i_size_read(inode));
 166
 167	while (pos < end) {
 168		size_t len;
 169		if (pos == max) {
 170			unsigned blkbits = inode->i_blkbits;
 171			long page = pos >> PAGE_SHIFT;
 172			sector_t block = page << (PAGE_SHIFT - blkbits);
 173			unsigned first = pos - (block << blkbits);
 174			long size;
 175
 176			if (pos == bh_max) {
 177				bh->b_size = PAGE_ALIGN(end - pos);
 178				bh->b_state = 0;
 179				rc = get_block(inode, block, bh, rw == WRITE);
 180				if (rc)
 181					break;
 182				if (!buffer_size_valid(bh))
 183					bh->b_size = 1 << blkbits;
 184				bh_max = pos - first + bh->b_size;
 185				bdev = bh->b_bdev;
 186			} else {
 187				unsigned done = bh->b_size -
 188						(bh_max - (pos - first));
 189				bh->b_blocknr += done >> blkbits;
 190				bh->b_size -= done;
 191			}
 192
 193			hole = rw == READ && !buffer_written(bh);
 194			if (hole) {
 195				size = bh->b_size - first;
 196			} else {
 197				dax_unmap_atomic(bdev, &dax);
 198				dax.sector = to_sector(bh, inode);
 199				dax.size = bh->b_size;
 200				map_len = dax_map_atomic(bdev, &dax);
 201				if (map_len < 0) {
 202					rc = map_len;
 203					break;
 204				}
 205				if (buffer_unwritten(bh) || buffer_new(bh)) {
 206					dax_new_buf(dax.addr, map_len, first,
 207							pos, end);
 208					need_wmb = true;
 209				}
 210				dax.addr += first;
 211				size = map_len - first;
 212			}
 213			max = min(pos + size, end);
 214		}
 215
 216		if (iov_iter_rw(iter) == WRITE) {
 217			len = copy_from_iter_pmem(dax.addr, max - pos, iter);
 218			need_wmb = true;
 219		} else if (!hole)
 220			len = copy_to_iter((void __force *) dax.addr, max - pos,
 221					iter);
 222		else
 223			len = iov_iter_zero(max - pos, iter);
 224
 225		if (!len) {
 226			rc = -EFAULT;
 227			break;
 228		}
 229
 230		pos += len;
 231		if (!IS_ERR(dax.addr))
 232			dax.addr += len;
 233	}
 234
 235	if (need_wmb)
 236		wmb_pmem();
 237	dax_unmap_atomic(bdev, &dax);
 238
 239	return (pos == start) ? rc : pos - start;
 240}
 241
 242/**
 243 * dax_do_io - Perform I/O to a DAX file
 244 * @iocb: The control block for this I/O
 245 * @inode: The file which the I/O is directed at
 246 * @iter: The addresses to do I/O from or to
 247 * @pos: The file offset where the I/O starts
 248 * @get_block: The filesystem method used to translate file offsets to blocks
 249 * @end_io: A filesystem callback for I/O completion
 250 * @flags: See below
 251 *
 252 * This function uses the same locking scheme as do_blockdev_direct_IO:
 253 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
 254 * caller for writes.  For reads, we take and release the i_mutex ourselves.
 255 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
 256 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
 257 * is in progress.
 258 */
 259ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
 260		  struct iov_iter *iter, loff_t pos, get_block_t get_block,
 261		  dio_iodone_t end_io, int flags)
 262{
 263	struct buffer_head bh;
 264	ssize_t retval = -EINVAL;
 265	loff_t end = pos + iov_iter_count(iter);
 266
 267	memset(&bh, 0, sizeof(bh));
 268	bh.b_bdev = inode->i_sb->s_bdev;
 269
 270	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
 271		struct address_space *mapping = inode->i_mapping;
 272		inode_lock(inode);
 273		retval = filemap_write_and_wait_range(mapping, pos, end - 1);
 274		if (retval) {
 275			inode_unlock(inode);
 276			goto out;
 277		}
 278	}
 279
 280	/* Protects against truncate */
 281	if (!(flags & DIO_SKIP_DIO_COUNT))
 282		inode_dio_begin(inode);
 283
 284	retval = dax_io(inode, iter, pos, end, get_block, &bh);
 285
 286	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
 287		inode_unlock(inode);
 288
 289	if (end_io) {
 290		int err;
 291
 292		err = end_io(iocb, pos, retval, bh.b_private);
 293		if (err)
 294			retval = err;
 295	}
 296
 297	if (!(flags & DIO_SKIP_DIO_COUNT))
 298		inode_dio_end(inode);
 299 out:
 300	return retval;
 301}
 302EXPORT_SYMBOL_GPL(dax_do_io);
 303
 304/*
 305 * The user has performed a load from a hole in the file.  Allocating
 306 * a new page in the file would cause excessive storage usage for
 307 * workloads with sparse files.  We allocate a page cache page instead.
 308 * We'll kick it out of the page cache if it's ever written to,
 309 * otherwise it will simply fall out of the page cache under memory
 310 * pressure without ever having been dirtied.
 311 */
 312static int dax_load_hole(struct address_space *mapping, struct page *page,
 313							struct vm_fault *vmf)
 314{
 315	unsigned long size;
 316	struct inode *inode = mapping->host;
 317	if (!page)
 318		page = find_or_create_page(mapping, vmf->pgoff,
 319						GFP_KERNEL | __GFP_ZERO);
 320	if (!page)
 321		return VM_FAULT_OOM;
 322	/* Recheck i_size under page lock to avoid truncate race */
 323	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 324	if (vmf->pgoff >= size) {
 325		unlock_page(page);
 326		put_page(page);
 327		return VM_FAULT_SIGBUS;
 328	}
 329
 330	vmf->page = page;
 331	return VM_FAULT_LOCKED;
 332}
 333
 334static int copy_user_bh(struct page *to, struct inode *inode,
 335		struct buffer_head *bh, unsigned long vaddr)
 336{
 337	struct blk_dax_ctl dax = {
 338		.sector = to_sector(bh, inode),
 339		.size = bh->b_size,
 340	};
 341	struct block_device *bdev = bh->b_bdev;
 342	void *vto;
 343
 344	if (dax_map_atomic(bdev, &dax) < 0)
 345		return PTR_ERR(dax.addr);
 346	vto = kmap_atomic(to);
 347	copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
 348	kunmap_atomic(vto);
 349	dax_unmap_atomic(bdev, &dax);
 350	return 0;
 351}
 352
 353#define NO_SECTOR -1
 354#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
 355
 356static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
 357		sector_t sector, bool pmd_entry, bool dirty)
 358{
 359	struct radix_tree_root *page_tree = &mapping->page_tree;
 360	pgoff_t pmd_index = DAX_PMD_INDEX(index);
 361	int type, error = 0;
 362	void *entry;
 363
 364	WARN_ON_ONCE(pmd_entry && !dirty);
 365	if (dirty)
 366		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 367
 368	spin_lock_irq(&mapping->tree_lock);
 369
 370	entry = radix_tree_lookup(page_tree, pmd_index);
 371	if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
 372		index = pmd_index;
 373		goto dirty;
 374	}
 375
 376	entry = radix_tree_lookup(page_tree, index);
 377	if (entry) {
 378		type = RADIX_DAX_TYPE(entry);
 379		if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
 380					type != RADIX_DAX_PMD)) {
 381			error = -EIO;
 382			goto unlock;
 383		}
 384
 385		if (!pmd_entry || type == RADIX_DAX_PMD)
 386			goto dirty;
 387
 388		/*
 389		 * We only insert dirty PMD entries into the radix tree.  This
 390		 * means we don't need to worry about removing a dirty PTE
 391		 * entry and inserting a clean PMD entry, thus reducing the
 392		 * range we would flush with a follow-up fsync/msync call.
 393		 */
 394		radix_tree_delete(&mapping->page_tree, index);
 395		mapping->nrexceptional--;
 396	}
 397
 398	if (sector == NO_SECTOR) {
 399		/*
 400		 * This can happen during correct operation if our pfn_mkwrite
 401		 * fault raced against a hole punch operation.  If this
 402		 * happens the pte that was hole punched will have been
 403		 * unmapped and the radix tree entry will have been removed by
 404		 * the time we are called, but the call will still happen.  We
 405		 * will return all the way up to wp_pfn_shared(), where the
 406		 * pte_same() check will fail, eventually causing page fault
 407		 * to be retried by the CPU.
 408		 */
 409		goto unlock;
 410	}
 411
 412	error = radix_tree_insert(page_tree, index,
 413			RADIX_DAX_ENTRY(sector, pmd_entry));
 414	if (error)
 415		goto unlock;
 416
 417	mapping->nrexceptional++;
 418 dirty:
 419	if (dirty)
 420		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
 421 unlock:
 422	spin_unlock_irq(&mapping->tree_lock);
 423	return error;
 424}
 425
 426static int dax_writeback_one(struct block_device *bdev,
 427		struct address_space *mapping, pgoff_t index, void *entry)
 428{
 429	struct radix_tree_root *page_tree = &mapping->page_tree;
 430	int type = RADIX_DAX_TYPE(entry);
 431	struct radix_tree_node *node;
 432	struct blk_dax_ctl dax;
 433	void **slot;
 434	int ret = 0;
 435
 436	spin_lock_irq(&mapping->tree_lock);
 437	/*
 438	 * Regular page slots are stabilized by the page lock even
 439	 * without the tree itself locked.  These unlocked entries
 440	 * need verification under the tree lock.
 441	 */
 442	if (!__radix_tree_lookup(page_tree, index, &node, &slot))
 443		goto unlock;
 444	if (*slot != entry)
 445		goto unlock;
 446
 447	/* another fsync thread may have already written back this entry */
 448	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
 449		goto unlock;
 450
 451	if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
 452		ret = -EIO;
 453		goto unlock;
 454	}
 455
 456	dax.sector = RADIX_DAX_SECTOR(entry);
 457	dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
 458	spin_unlock_irq(&mapping->tree_lock);
 459
 460	/*
 461	 * We cannot hold tree_lock while calling dax_map_atomic() because it
 462	 * eventually calls cond_resched().
 463	 */
 464	ret = dax_map_atomic(bdev, &dax);
 465	if (ret < 0)
 466		return ret;
 467
 468	if (WARN_ON_ONCE(ret < dax.size)) {
 469		ret = -EIO;
 470		goto unmap;
 471	}
 472
 473	wb_cache_pmem(dax.addr, dax.size);
 474
 475	spin_lock_irq(&mapping->tree_lock);
 476	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
 477	spin_unlock_irq(&mapping->tree_lock);
 478 unmap:
 479	dax_unmap_atomic(bdev, &dax);
 480	return ret;
 481
 482 unlock:
 483	spin_unlock_irq(&mapping->tree_lock);
 484	return ret;
 485}
 486
 487/*
 488 * Flush the mapping to the persistent domain within the byte range of [start,
 489 * end]. This is required by data integrity operations to ensure file data is
 490 * on persistent storage prior to completion of the operation.
 491 */
 492int dax_writeback_mapping_range(struct address_space *mapping,
 493		struct block_device *bdev, struct writeback_control *wbc)
 494{
 495	struct inode *inode = mapping->host;
 496	pgoff_t start_index, end_index, pmd_index;
 497	pgoff_t indices[PAGEVEC_SIZE];
 498	struct pagevec pvec;
 499	bool done = false;
 500	int i, ret = 0;
 501	void *entry;
 502
 503	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
 504		return -EIO;
 505
 506	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
 507		return 0;
 508
 509	start_index = wbc->range_start >> PAGE_SHIFT;
 510	end_index = wbc->range_end >> PAGE_SHIFT;
 511	pmd_index = DAX_PMD_INDEX(start_index);
 512
 513	rcu_read_lock();
 514	entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
 515	rcu_read_unlock();
 516
 517	/* see if the start of our range is covered by a PMD entry */
 518	if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
 519		start_index = pmd_index;
 520
 521	tag_pages_for_writeback(mapping, start_index, end_index);
 522
 523	pagevec_init(&pvec, 0);
 524	while (!done) {
 525		pvec.nr = find_get_entries_tag(mapping, start_index,
 526				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
 527				pvec.pages, indices);
 528
 529		if (pvec.nr == 0)
 530			break;
 531
 532		for (i = 0; i < pvec.nr; i++) {
 533			if (indices[i] > end_index) {
 534				done = true;
 535				break;
 536			}
 537
 538			ret = dax_writeback_one(bdev, mapping, indices[i],
 539					pvec.pages[i]);
 540			if (ret < 0)
 541				return ret;
 542		}
 543	}
 544	wmb_pmem();
 545	return 0;
 546}
 547EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 548
 549static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
 550			struct vm_area_struct *vma, struct vm_fault *vmf)
 551{
 552	unsigned long vaddr = (unsigned long)vmf->virtual_address;
 553	struct address_space *mapping = inode->i_mapping;
 554	struct block_device *bdev = bh->b_bdev;
 555	struct blk_dax_ctl dax = {
 556		.sector = to_sector(bh, inode),
 557		.size = bh->b_size,
 558	};
 559	pgoff_t size;
 560	int error;
 561
 562	i_mmap_lock_read(mapping);
 563
 564	/*
 565	 * Check truncate didn't happen while we were allocating a block.
 566	 * If it did, this block may or may not be still allocated to the
 567	 * file.  We can't tell the filesystem to free it because we can't
 568	 * take i_mutex here.  In the worst case, the file still has blocks
 569	 * allocated past the end of the file.
 570	 */
 571	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 572	if (unlikely(vmf->pgoff >= size)) {
 573		error = -EIO;
 574		goto out;
 575	}
 576
 577	if (dax_map_atomic(bdev, &dax) < 0) {
 578		error = PTR_ERR(dax.addr);
 579		goto out;
 580	}
 581
 582	if (buffer_unwritten(bh) || buffer_new(bh)) {
 583		clear_pmem(dax.addr, PAGE_SIZE);
 584		wmb_pmem();
 585	}
 586	dax_unmap_atomic(bdev, &dax);
 587
 588	error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
 589			vmf->flags & FAULT_FLAG_WRITE);
 590	if (error)
 591		goto out;
 592
 593	error = vm_insert_mixed(vma, vaddr, dax.pfn);
 594
 595 out:
 596	i_mmap_unlock_read(mapping);
 597
 598	return error;
 599}
 600
 601/**
 602 * __dax_fault - handle a page fault on a DAX file
 603 * @vma: The virtual memory area where the fault occurred
 604 * @vmf: The description of the fault
 605 * @get_block: The filesystem method used to translate file offsets to blocks
 606 * @complete_unwritten: The filesystem method used to convert unwritten blocks
 607 *	to written so the data written to them is exposed. This is required for
 608 *	required by write faults for filesystems that will return unwritten
 609 *	extent mappings from @get_block, but it is optional for reads as
 610 *	dax_insert_mapping() will always zero unwritten blocks. If the fs does
 611 *	not support unwritten extents, the it should pass NULL.
 612 *
 613 * When a page fault occurs, filesystems may call this helper in their
 614 * fault handler for DAX files. __dax_fault() assumes the caller has done all
 615 * the necessary locking for the page fault to proceed successfully.
 616 */
 617int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 618			get_block_t get_block, dax_iodone_t complete_unwritten)
 619{
 620	struct file *file = vma->vm_file;
 621	struct address_space *mapping = file->f_mapping;
 622	struct inode *inode = mapping->host;
 623	struct page *page;
 624	struct buffer_head bh;
 625	unsigned long vaddr = (unsigned long)vmf->virtual_address;
 626	unsigned blkbits = inode->i_blkbits;
 627	sector_t block;
 628	pgoff_t size;
 629	int error;
 630	int major = 0;
 631
 632	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 633	if (vmf->pgoff >= size)
 634		return VM_FAULT_SIGBUS;
 635
 636	memset(&bh, 0, sizeof(bh));
 637	block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
 638	bh.b_bdev = inode->i_sb->s_bdev;
 639	bh.b_size = PAGE_SIZE;
 640
 641 repeat:
 642	page = find_get_page(mapping, vmf->pgoff);
 643	if (page) {
 644		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
 645			put_page(page);
 646			return VM_FAULT_RETRY;
 647		}
 648		if (unlikely(page->mapping != mapping)) {
 649			unlock_page(page);
 650			put_page(page);
 651			goto repeat;
 652		}
 653		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 654		if (unlikely(vmf->pgoff >= size)) {
 655			/*
 656			 * We have a struct page covering a hole in the file
 657			 * from a read fault and we've raced with a truncate
 658			 */
 659			error = -EIO;
 660			goto unlock_page;
 661		}
 662	}
 663
 664	error = get_block(inode, block, &bh, 0);
 665	if (!error && (bh.b_size < PAGE_SIZE))
 666		error = -EIO;		/* fs corruption? */
 667	if (error)
 668		goto unlock_page;
 669
 670	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
 671		if (vmf->flags & FAULT_FLAG_WRITE) {
 672			error = get_block(inode, block, &bh, 1);
 673			count_vm_event(PGMAJFAULT);
 674			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
 675			major = VM_FAULT_MAJOR;
 676			if (!error && (bh.b_size < PAGE_SIZE))
 677				error = -EIO;
 678			if (error)
 679				goto unlock_page;
 680		} else {
 681			return dax_load_hole(mapping, page, vmf);
 682		}
 683	}
 684
 685	if (vmf->cow_page) {
 686		struct page *new_page = vmf->cow_page;
 687		if (buffer_written(&bh))
 688			error = copy_user_bh(new_page, inode, &bh, vaddr);
 689		else
 690			clear_user_highpage(new_page, vaddr);
 691		if (error)
 692			goto unlock_page;
 693		vmf->page = page;
 694		if (!page) {
 695			i_mmap_lock_read(mapping);
 696			/* Check we didn't race with truncate */
 697			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
 698								PAGE_SHIFT;
 699			if (vmf->pgoff >= size) {
 700				i_mmap_unlock_read(mapping);
 701				error = -EIO;
 702				goto out;
 703			}
 704		}
 705		return VM_FAULT_LOCKED;
 706	}
 707
 708	/* Check we didn't race with a read fault installing a new page */
 709	if (!page && major)
 710		page = find_lock_page(mapping, vmf->pgoff);
 711
 712	if (page) {
 713		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
 714							PAGE_SIZE, 0);
 715		delete_from_page_cache(page);
 716		unlock_page(page);
 717		put_page(page);
 718		page = NULL;
 719	}
 720
 721	/*
 722	 * If we successfully insert the new mapping over an unwritten extent,
 723	 * we need to ensure we convert the unwritten extent. If there is an
 724	 * error inserting the mapping, the filesystem needs to leave it as
 725	 * unwritten to prevent exposure of the stale underlying data to
 726	 * userspace, but we still need to call the completion function so
 727	 * the private resources on the mapping buffer can be released. We
 728	 * indicate what the callback should do via the uptodate variable, same
 729	 * as for normal BH based IO completions.
 730	 */
 731	error = dax_insert_mapping(inode, &bh, vma, vmf);
 732	if (buffer_unwritten(&bh)) {
 733		if (complete_unwritten)
 734			complete_unwritten(&bh, !error);
 735		else
 736			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
 737	}
 738
 739 out:
 740	if (error == -ENOMEM)
 741		return VM_FAULT_OOM | major;
 742	/* -EBUSY is fine, somebody else faulted on the same PTE */
 743	if ((error < 0) && (error != -EBUSY))
 744		return VM_FAULT_SIGBUS | major;
 745	return VM_FAULT_NOPAGE | major;
 746
 747 unlock_page:
 748	if (page) {
 749		unlock_page(page);
 750		put_page(page);
 751	}
 752	goto out;
 753}
 754EXPORT_SYMBOL(__dax_fault);
 755
 756/**
 757 * dax_fault - handle a page fault on a DAX file
 758 * @vma: The virtual memory area where the fault occurred
 759 * @vmf: The description of the fault
 760 * @get_block: The filesystem method used to translate file offsets to blocks
 761 *
 762 * When a page fault occurs, filesystems may call this helper in their
 763 * fault handler for DAX files.
 764 */
 765int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 766	      get_block_t get_block, dax_iodone_t complete_unwritten)
 767{
 768	int result;
 769	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
 770
 771	if (vmf->flags & FAULT_FLAG_WRITE) {
 772		sb_start_pagefault(sb);
 773		file_update_time(vma->vm_file);
 774	}
 775	result = __dax_fault(vma, vmf, get_block, complete_unwritten);
 776	if (vmf->flags & FAULT_FLAG_WRITE)
 777		sb_end_pagefault(sb);
 778
 779	return result;
 780}
 781EXPORT_SYMBOL_GPL(dax_fault);
 782
 783#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 784/*
 785 * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
 786 * more often than one might expect in the below function.
 787 */
 788#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
 789
 790static void __dax_dbg(struct buffer_head *bh, unsigned long address,
 791		const char *reason, const char *fn)
 792{
 793	if (bh) {
 794		char bname[BDEVNAME_SIZE];
 795		bdevname(bh->b_bdev, bname);
 796		pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
 797			"length %zd fallback: %s\n", fn, current->comm,
 798			address, bname, bh->b_state, (u64)bh->b_blocknr,
 799			bh->b_size, reason);
 800	} else {
 801		pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
 802			current->comm, address, reason);
 803	}
 804}
 805
 806#define dax_pmd_dbg(bh, address, reason)	__dax_dbg(bh, address, reason, "dax_pmd")
 807
 808int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
 809		pmd_t *pmd, unsigned int flags, get_block_t get_block,
 810		dax_iodone_t complete_unwritten)
 811{
 812	struct file *file = vma->vm_file;
 813	struct address_space *mapping = file->f_mapping;
 814	struct inode *inode = mapping->host;
 815	struct buffer_head bh;
 816	unsigned blkbits = inode->i_blkbits;
 817	unsigned long pmd_addr = address & PMD_MASK;
 818	bool write = flags & FAULT_FLAG_WRITE;
 819	struct block_device *bdev;
 820	pgoff_t size, pgoff;
 821	sector_t block;
 822	int error, result = 0;
 823	bool alloc = false;
 824
 825	/* dax pmd mappings require pfn_t_devmap() */
 826	if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
 827		return VM_FAULT_FALLBACK;
 828
 829	/* Fall back to PTEs if we're going to COW */
 830	if (write && !(vma->vm_flags & VM_SHARED)) {
 831		split_huge_pmd(vma, pmd, address);
 832		dax_pmd_dbg(NULL, address, "cow write");
 833		return VM_FAULT_FALLBACK;
 834	}
 835	/* If the PMD would extend outside the VMA */
 836	if (pmd_addr < vma->vm_start) {
 837		dax_pmd_dbg(NULL, address, "vma start unaligned");
 838		return VM_FAULT_FALLBACK;
 839	}
 840	if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
 841		dax_pmd_dbg(NULL, address, "vma end unaligned");
 842		return VM_FAULT_FALLBACK;
 843	}
 844
 845	pgoff = linear_page_index(vma, pmd_addr);
 846	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 847	if (pgoff >= size)
 848		return VM_FAULT_SIGBUS;
 849	/* If the PMD would cover blocks out of the file */
 850	if ((pgoff | PG_PMD_COLOUR) >= size) {
 851		dax_pmd_dbg(NULL, address,
 852				"offset + huge page size > file size");
 853		return VM_FAULT_FALLBACK;
 854	}
 855
 856	memset(&bh, 0, sizeof(bh));
 857	bh.b_bdev = inode->i_sb->s_bdev;
 858	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
 859
 860	bh.b_size = PMD_SIZE;
 861
 862	if (get_block(inode, block, &bh, 0) != 0)
 863		return VM_FAULT_SIGBUS;
 864
 865	if (!buffer_mapped(&bh) && write) {
 866		if (get_block(inode, block, &bh, 1) != 0)
 867			return VM_FAULT_SIGBUS;
 868		alloc = true;
 869	}
 870
 871	bdev = bh.b_bdev;
 872
 873	/*
 874	 * If the filesystem isn't willing to tell us the length of a hole,
 875	 * just fall back to PTEs.  Calling get_block 512 times in a loop
 876	 * would be silly.
 877	 */
 878	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
 879		dax_pmd_dbg(&bh, address, "allocated block too small");
 880		return VM_FAULT_FALLBACK;
 881	}
 882
 883	/*
 884	 * If we allocated new storage, make sure no process has any
 885	 * zero pages covering this hole
 886	 */
 887	if (alloc) {
 888		loff_t lstart = pgoff << PAGE_SHIFT;
 889		loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
 890
 891		truncate_pagecache_range(inode, lstart, lend);
 892	}
 893
 894	i_mmap_lock_read(mapping);
 895
 896	/*
 897	 * If a truncate happened while we were allocating blocks, we may
 898	 * leave blocks allocated to the file that are beyond EOF.  We can't
 899	 * take i_mutex here, so just leave them hanging; they'll be freed
 900	 * when the file is deleted.
 901	 */
 902	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 903	if (pgoff >= size) {
 904		result = VM_FAULT_SIGBUS;
 905		goto out;
 906	}
 907	if ((pgoff | PG_PMD_COLOUR) >= size) {
 908		dax_pmd_dbg(&bh, address,
 909				"offset + huge page size > file size");
 910		goto fallback;
 911	}
 912
 913	if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
 914		spinlock_t *ptl;
 915		pmd_t entry;
 916		struct page *zero_page = get_huge_zero_page();
 917
 918		if (unlikely(!zero_page)) {
 919			dax_pmd_dbg(&bh, address, "no zero page");
 920			goto fallback;
 921		}
 922
 923		ptl = pmd_lock(vma->vm_mm, pmd);
 924		if (!pmd_none(*pmd)) {
 925			spin_unlock(ptl);
 926			dax_pmd_dbg(&bh, address, "pmd already present");
 927			goto fallback;
 928		}
 929
 930		dev_dbg(part_to_dev(bdev->bd_part),
 931				"%s: %s addr: %lx pfn: <zero> sect: %llx\n",
 932				__func__, current->comm, address,
 933				(unsigned long long) to_sector(&bh, inode));
 934
 935		entry = mk_pmd(zero_page, vma->vm_page_prot);
 936		entry = pmd_mkhuge(entry);
 937		set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
 938		result = VM_FAULT_NOPAGE;
 939		spin_unlock(ptl);
 940	} else {
 941		struct blk_dax_ctl dax = {
 942			.sector = to_sector(&bh, inode),
 943			.size = PMD_SIZE,
 944		};
 945		long length = dax_map_atomic(bdev, &dax);
 946
 947		if (length < 0) {
 948			result = VM_FAULT_SIGBUS;
 949			goto out;
 950		}
 951		if (length < PMD_SIZE) {
 952			dax_pmd_dbg(&bh, address, "dax-length too small");
 953			dax_unmap_atomic(bdev, &dax);
 954			goto fallback;
 955		}
 956		if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
 957			dax_pmd_dbg(&bh, address, "pfn unaligned");
 958			dax_unmap_atomic(bdev, &dax);
 959			goto fallback;
 960		}
 961
 962		if (!pfn_t_devmap(dax.pfn)) {
 963			dax_unmap_atomic(bdev, &dax);
 964			dax_pmd_dbg(&bh, address, "pfn not in memmap");
 965			goto fallback;
 966		}
 967
 968		if (buffer_unwritten(&bh) || buffer_new(&bh)) {
 969			clear_pmem(dax.addr, PMD_SIZE);
 970			wmb_pmem();
 971			count_vm_event(PGMAJFAULT);
 972			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
 973			result |= VM_FAULT_MAJOR;
 974		}
 975		dax_unmap_atomic(bdev, &dax);
 976
 977		/*
 978		 * For PTE faults we insert a radix tree entry for reads, and
 979		 * leave it clean.  Then on the first write we dirty the radix
 980		 * tree entry via the dax_pfn_mkwrite() path.  This sequence
 981		 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
 982		 * call into get_block() to translate the pgoff to a sector in
 983		 * order to be able to create a new radix tree entry.
 984		 *
 985		 * The PMD path doesn't have an equivalent to
 986		 * dax_pfn_mkwrite(), though, so for a read followed by a
 987		 * write we traverse all the way through __dax_pmd_fault()
 988		 * twice.  This means we can just skip inserting a radix tree
 989		 * entry completely on the initial read and just wait until
 990		 * the write to insert a dirty entry.
 991		 */
 992		if (write) {
 993			error = dax_radix_entry(mapping, pgoff, dax.sector,
 994					true, true);
 995			if (error) {
 996				dax_pmd_dbg(&bh, address,
 997						"PMD radix insertion failed");
 998				goto fallback;
 999			}
1000		}
1001
1002		dev_dbg(part_to_dev(bdev->bd_part),
1003				"%s: %s addr: %lx pfn: %lx sect: %llx\n",
1004				__func__, current->comm, address,
1005				pfn_t_to_pfn(dax.pfn),
1006				(unsigned long long) dax.sector);
1007		result |= vmf_insert_pfn_pmd(vma, address, pmd,
1008				dax.pfn, write);
1009	}
1010
1011 out:
1012	i_mmap_unlock_read(mapping);
1013
1014	if (buffer_unwritten(&bh))
1015		complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
1016
1017	return result;
1018
1019 fallback:
1020	count_vm_event(THP_FAULT_FALLBACK);
1021	result = VM_FAULT_FALLBACK;
1022	goto out;
1023}
1024EXPORT_SYMBOL_GPL(__dax_pmd_fault);
1025
1026/**
1027 * dax_pmd_fault - handle a PMD fault on a DAX file
1028 * @vma: The virtual memory area where the fault occurred
1029 * @vmf: The description of the fault
1030 * @get_block: The filesystem method used to translate file offsets to blocks
1031 *
1032 * When a page fault occurs, filesystems may call this helper in their
1033 * pmd_fault handler for DAX files.
1034 */
1035int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1036			pmd_t *pmd, unsigned int flags, get_block_t get_block,
1037			dax_iodone_t complete_unwritten)
1038{
1039	int result;
1040	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
1041
1042	if (flags & FAULT_FLAG_WRITE) {
1043		sb_start_pagefault(sb);
1044		file_update_time(vma->vm_file);
1045	}
1046	result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
1047				complete_unwritten);
1048	if (flags & FAULT_FLAG_WRITE)
1049		sb_end_pagefault(sb);
1050
1051	return result;
1052}
1053EXPORT_SYMBOL_GPL(dax_pmd_fault);
1054#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1055
1056/**
1057 * dax_pfn_mkwrite - handle first write to DAX page
1058 * @vma: The virtual memory area where the fault occurred
1059 * @vmf: The description of the fault
1060 */
1061int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1062{
1063	struct file *file = vma->vm_file;
1064	int error;
1065
1066	/*
1067	 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
1068	 * RADIX_DAX_PTE entry already exists in the radix tree from a
1069	 * previous call to __dax_fault().  We just want to look up that PTE
1070	 * entry using vmf->pgoff and make sure the dirty tag is set.  This
1071	 * saves us from having to make a call to get_block() here to look
1072	 * up the sector.
1073	 */
1074	error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
1075			true);
1076
1077	if (error == -ENOMEM)
1078		return VM_FAULT_OOM;
1079	if (error)
1080		return VM_FAULT_SIGBUS;
1081	return VM_FAULT_NOPAGE;
1082}
1083EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1084
1085/**
1086 * dax_zero_page_range - zero a range within a page of a DAX file
1087 * @inode: The file being truncated
1088 * @from: The file offset that is being truncated to
1089 * @length: The number of bytes to zero
1090 * @get_block: The filesystem method used to translate file offsets to blocks
1091 *
1092 * This function can be called by a filesystem when it is zeroing part of a
1093 * page in a DAX file.  This is intended for hole-punch operations.  If
1094 * you are truncating a file, the helper function dax_truncate_page() may be
1095 * more convenient.
1096 *
1097 * We work in terms of PAGE_SIZE here for commonality with
1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1099 * took care of disposing of the unnecessary blocks.  Even if the filesystem
1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1101 * since the file might be mmapped.
1102 */
1103int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1104							get_block_t get_block)
1105{
1106	struct buffer_head bh;
1107	pgoff_t index = from >> PAGE_SHIFT;
1108	unsigned offset = from & (PAGE_SIZE-1);
1109	int err;
1110
1111	/* Block boundary? Nothing to do */
1112	if (!length)
1113		return 0;
1114	BUG_ON((offset + length) > PAGE_SIZE);
1115
1116	memset(&bh, 0, sizeof(bh));
1117	bh.b_bdev = inode->i_sb->s_bdev;
1118	bh.b_size = PAGE_SIZE;
1119	err = get_block(inode, index, &bh, 0);
1120	if (err < 0)
1121		return err;
1122	if (buffer_written(&bh)) {
1123		struct block_device *bdev = bh.b_bdev;
1124		struct blk_dax_ctl dax = {
1125			.sector = to_sector(&bh, inode),
1126			.size = PAGE_SIZE,
1127		};
1128
1129		if (dax_map_atomic(bdev, &dax) < 0)
1130			return PTR_ERR(dax.addr);
1131		clear_pmem(dax.addr + offset, length);
1132		wmb_pmem();
1133		dax_unmap_atomic(bdev, &dax);
1134	}
1135
1136	return 0;
1137}
1138EXPORT_SYMBOL_GPL(dax_zero_page_range);
1139
1140/**
1141 * dax_truncate_page - handle a partial page being truncated in a DAX file
1142 * @inode: The file being truncated
1143 * @from: The file offset that is being truncated to
1144 * @get_block: The filesystem method used to translate file offsets to blocks
1145 *
1146 * Similar to block_truncate_page(), this function can be called by a
1147 * filesystem when it is truncating a DAX file to handle the partial page.
1148 *
1149 * We work in terms of PAGE_SIZE here for commonality with
1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1151 * took care of disposing of the unnecessary blocks.  Even if the filesystem
1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1153 * since the file might be mmapped.
1154 */
1155int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1156{
1157	unsigned length = PAGE_ALIGN(from) - from;
1158	return dax_zero_page_range(inode, from, length, get_block);
1159}
1160EXPORT_SYMBOL_GPL(dax_truncate_page);