Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * dax: direct host memory access
   4 * Copyright (C) 2020 Red Hat, Inc.
   5 */
   6
   7#include "fuse_i.h"
   8
   9#include <linux/delay.h>
  10#include <linux/dax.h>
  11#include <linux/uio.h>
  12#include <linux/pagemap.h>
  13#include <linux/pfn_t.h>
  14#include <linux/iomap.h>
  15#include <linux/interval_tree.h>
  16
  17/*
  18 * Default memory range size.  A power of 2 so it agrees with common FUSE_INIT
  19 * map_alignment values 4KB and 64KB.
  20 */
  21#define FUSE_DAX_SHIFT	21
  22#define FUSE_DAX_SZ	(1 << FUSE_DAX_SHIFT)
  23#define FUSE_DAX_PAGES	(FUSE_DAX_SZ / PAGE_SIZE)
  24
  25/* Number of ranges reclaimer will try to free in one invocation */
  26#define FUSE_DAX_RECLAIM_CHUNK		(10)
  27
  28/*
  29 * Dax memory reclaim threshold in percetage of total ranges. When free
  30 * number of free ranges drops below this threshold, reclaim can trigger
  31 * Default is 20%
  32 */
  33#define FUSE_DAX_RECLAIM_THRESHOLD	(20)
  34
  35/** Translation information for file offsets to DAX window offsets */
  36struct fuse_dax_mapping {
  37	/* Pointer to inode where this memory range is mapped */
  38	struct inode *inode;
  39
  40	/* Will connect in fcd->free_ranges to keep track of free memory */
  41	struct list_head list;
  42
  43	/* For interval tree in file/inode */
  44	struct interval_tree_node itn;
  45
  46	/* Will connect in fc->busy_ranges to keep track busy memory */
  47	struct list_head busy_list;
  48
  49	/** Position in DAX window */
  50	u64 window_offset;
  51
  52	/** Length of mapping, in bytes */
  53	loff_t length;
  54
  55	/* Is this mapping read-only or read-write */
  56	bool writable;
  57
  58	/* reference count when the mapping is used by dax iomap. */
  59	refcount_t refcnt;
  60};
  61
  62/* Per-inode dax map */
  63struct fuse_inode_dax {
  64	/* Semaphore to protect modifications to the dmap tree */
  65	struct rw_semaphore sem;
  66
  67	/* Sorted rb tree of struct fuse_dax_mapping elements */
  68	struct rb_root_cached tree;
  69	unsigned long nr;
  70};
  71
  72struct fuse_conn_dax {
  73	/* DAX device */
  74	struct dax_device *dev;
  75
  76	/* Lock protecting accessess to  members of this structure */
  77	spinlock_t lock;
  78
  79	/* List of memory ranges which are busy */
  80	unsigned long nr_busy_ranges;
  81	struct list_head busy_ranges;
  82
  83	/* Worker to free up memory ranges */
  84	struct delayed_work free_work;
  85
  86	/* Wait queue for a dax range to become free */
  87	wait_queue_head_t range_waitq;
  88
  89	/* DAX Window Free Ranges */
  90	long nr_free_ranges;
  91	struct list_head free_ranges;
  92
  93	unsigned long nr_ranges;
  94};
  95
  96static inline struct fuse_dax_mapping *
  97node_to_dmap(struct interval_tree_node *node)
  98{
  99	if (!node)
 100		return NULL;
 101
 102	return container_of(node, struct fuse_dax_mapping, itn);
 103}
 104
 105static struct fuse_dax_mapping *
 106alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);
 107
 108static void
 109__kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms)
 110{
 111	unsigned long free_threshold;
 112
 113	/* If number of free ranges are below threshold, start reclaim */
 114	free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100,
 115			     1);
 116	if (fcd->nr_free_ranges < free_threshold)
 117		queue_delayed_work(system_long_wq, &fcd->free_work,
 118				   msecs_to_jiffies(delay_ms));
 119}
 120
 121static void kick_dmap_free_worker(struct fuse_conn_dax *fcd,
 122				  unsigned long delay_ms)
 123{
 124	spin_lock(&fcd->lock);
 125	__kick_dmap_free_worker(fcd, delay_ms);
 126	spin_unlock(&fcd->lock);
 127}
 128
 129static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
 130{
 131	struct fuse_dax_mapping *dmap;
 132
 133	spin_lock(&fcd->lock);
 134	dmap = list_first_entry_or_null(&fcd->free_ranges,
 135					struct fuse_dax_mapping, list);
 136	if (dmap) {
 137		list_del_init(&dmap->list);
 138		WARN_ON(fcd->nr_free_ranges <= 0);
 139		fcd->nr_free_ranges--;
 140	}
 141	__kick_dmap_free_worker(fcd, 0);
 142	spin_unlock(&fcd->lock);
 143
 144	return dmap;
 145}
 146
 147/* This assumes fcd->lock is held */
 148static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd,
 149				    struct fuse_dax_mapping *dmap)
 150{
 151	list_del_init(&dmap->busy_list);
 152	WARN_ON(fcd->nr_busy_ranges == 0);
 153	fcd->nr_busy_ranges--;
 154}
 155
 156static void dmap_remove_busy_list(struct fuse_conn_dax *fcd,
 157				  struct fuse_dax_mapping *dmap)
 158{
 159	spin_lock(&fcd->lock);
 160	__dmap_remove_busy_list(fcd, dmap);
 161	spin_unlock(&fcd->lock);
 162}
 163
 164/* This assumes fcd->lock is held */
 165static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
 166				struct fuse_dax_mapping *dmap)
 167{
 168	list_add_tail(&dmap->list, &fcd->free_ranges);
 169	fcd->nr_free_ranges++;
 170	wake_up(&fcd->range_waitq);
 171}
 172
 173static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
 174				struct fuse_dax_mapping *dmap)
 175{
 176	/* Return fuse_dax_mapping to free list */
 177	spin_lock(&fcd->lock);
 178	__dmap_add_to_free_pool(fcd, dmap);
 179	spin_unlock(&fcd->lock);
 180}
 181
 182static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx,
 183				  struct fuse_dax_mapping *dmap, bool writable,
 184				  bool upgrade)
 185{
 186	struct fuse_mount *fm = get_fuse_mount(inode);
 187	struct fuse_conn_dax *fcd = fm->fc->dax;
 188	struct fuse_inode *fi = get_fuse_inode(inode);
 189	struct fuse_setupmapping_in inarg;
 190	loff_t offset = start_idx << FUSE_DAX_SHIFT;
 191	FUSE_ARGS(args);
 192	ssize_t err;
 193
 194	WARN_ON(fcd->nr_free_ranges < 0);
 195
 196	/* Ask fuse daemon to setup mapping */
 197	memset(&inarg, 0, sizeof(inarg));
 198	inarg.foffset = offset;
 199	inarg.fh = -1;
 200	inarg.moffset = dmap->window_offset;
 201	inarg.len = FUSE_DAX_SZ;
 202	inarg.flags |= FUSE_SETUPMAPPING_FLAG_READ;
 203	if (writable)
 204		inarg.flags |= FUSE_SETUPMAPPING_FLAG_WRITE;
 205	args.opcode = FUSE_SETUPMAPPING;
 206	args.nodeid = fi->nodeid;
 207	args.in_numargs = 1;
 208	args.in_args[0].size = sizeof(inarg);
 209	args.in_args[0].value = &inarg;
 210	err = fuse_simple_request(fm, &args);
 211	if (err < 0)
 212		return err;
 213	dmap->writable = writable;
 214	if (!upgrade) {
 215		/*
 216		 * We don't take a reference on inode. inode is valid right now
 217		 * and when inode is going away, cleanup logic should first
 218		 * cleanup dmap entries.
 219		 */
 220		dmap->inode = inode;
 221		dmap->itn.start = dmap->itn.last = start_idx;
 222		/* Protected by fi->dax->sem */
 223		interval_tree_insert(&dmap->itn, &fi->dax->tree);
 224		fi->dax->nr++;
 225		spin_lock(&fcd->lock);
 226		list_add_tail(&dmap->busy_list, &fcd->busy_ranges);
 227		fcd->nr_busy_ranges++;
 228		spin_unlock(&fcd->lock);
 229	}
 230	return 0;
 231}
 232
 233static int fuse_send_removemapping(struct inode *inode,
 234				   struct fuse_removemapping_in *inargp,
 235				   struct fuse_removemapping_one *remove_one)
 236{
 237	struct fuse_inode *fi = get_fuse_inode(inode);
 238	struct fuse_mount *fm = get_fuse_mount(inode);
 239	FUSE_ARGS(args);
 240
 241	args.opcode = FUSE_REMOVEMAPPING;
 242	args.nodeid = fi->nodeid;
 243	args.in_numargs = 2;
 244	args.in_args[0].size = sizeof(*inargp);
 245	args.in_args[0].value = inargp;
 246	args.in_args[1].size = inargp->count * sizeof(*remove_one);
 247	args.in_args[1].value = remove_one;
 248	return fuse_simple_request(fm, &args);
 249}
 250
 251static int dmap_removemapping_list(struct inode *inode, unsigned int num,
 252				   struct list_head *to_remove)
 253{
 254	struct fuse_removemapping_one *remove_one, *ptr;
 255	struct fuse_removemapping_in inarg;
 256	struct fuse_dax_mapping *dmap;
 257	int ret, i = 0, nr_alloc;
 258
 259	nr_alloc = min_t(unsigned int, num, FUSE_REMOVEMAPPING_MAX_ENTRY);
 260	remove_one = kmalloc_array(nr_alloc, sizeof(*remove_one), GFP_NOFS);
 261	if (!remove_one)
 262		return -ENOMEM;
 263
 264	ptr = remove_one;
 265	list_for_each_entry(dmap, to_remove, list) {
 266		ptr->moffset = dmap->window_offset;
 267		ptr->len = dmap->length;
 268		ptr++;
 269		i++;
 270		num--;
 271		if (i >= nr_alloc || num == 0) {
 272			memset(&inarg, 0, sizeof(inarg));
 273			inarg.count = i;
 274			ret = fuse_send_removemapping(inode, &inarg,
 275						      remove_one);
 276			if (ret)
 277				goto out;
 278			ptr = remove_one;
 279			i = 0;
 280		}
 281	}
 282out:
 283	kfree(remove_one);
 284	return ret;
 285}
 286
 287/*
 288 * Cleanup dmap entry and add back to free list. This should be called with
 289 * fcd->lock held.
 290 */
 291static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
 292					    struct fuse_dax_mapping *dmap)
 293{
 294	pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n",
 295		 dmap->itn.start, dmap->itn.last, dmap->window_offset,
 296		 dmap->length);
 297	__dmap_remove_busy_list(fcd, dmap);
 298	dmap->inode = NULL;
 299	dmap->itn.start = dmap->itn.last = 0;
 300	__dmap_add_to_free_pool(fcd, dmap);
 301}
 302
 303/*
 304 * Free inode dmap entries whose range falls inside [start, end].
 305 * Does not take any locks. At this point of time it should only be
 306 * called from evict_inode() path where we know all dmap entries can be
 307 * reclaimed.
 308 */
 309static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
 310				     struct inode *inode,
 311				     loff_t start, loff_t end)
 312{
 313	struct fuse_inode *fi = get_fuse_inode(inode);
 314	struct fuse_dax_mapping *dmap, *n;
 315	int err, num = 0;
 316	LIST_HEAD(to_remove);
 317	unsigned long start_idx = start >> FUSE_DAX_SHIFT;
 318	unsigned long end_idx = end >> FUSE_DAX_SHIFT;
 319	struct interval_tree_node *node;
 320
 321	while (1) {
 322		node = interval_tree_iter_first(&fi->dax->tree, start_idx,
 323						end_idx);
 324		if (!node)
 325			break;
 326		dmap = node_to_dmap(node);
 327		/* inode is going away. There should not be any users of dmap */
 328		WARN_ON(refcount_read(&dmap->refcnt) > 1);
 329		interval_tree_remove(&dmap->itn, &fi->dax->tree);
 330		num++;
 331		list_add(&dmap->list, &to_remove);
 332	}
 333
 334	/* Nothing to remove */
 335	if (list_empty(&to_remove))
 336		return;
 337
 338	WARN_ON(fi->dax->nr < num);
 339	fi->dax->nr -= num;
 340	err = dmap_removemapping_list(inode, num, &to_remove);
 341	if (err && err != -ENOTCONN) {
 342		pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n",
 343			start, end);
 344	}
 345	spin_lock(&fcd->lock);
 346	list_for_each_entry_safe(dmap, n, &to_remove, list) {
 347		list_del_init(&dmap->list);
 348		dmap_reinit_add_to_free_pool(fcd, dmap);
 349	}
 350	spin_unlock(&fcd->lock);
 351}
 352
 353static int dmap_removemapping_one(struct inode *inode,
 354				  struct fuse_dax_mapping *dmap)
 355{
 356	struct fuse_removemapping_one forget_one;
 357	struct fuse_removemapping_in inarg;
 358
 359	memset(&inarg, 0, sizeof(inarg));
 360	inarg.count = 1;
 361	memset(&forget_one, 0, sizeof(forget_one));
 362	forget_one.moffset = dmap->window_offset;
 363	forget_one.len = dmap->length;
 364
 365	return fuse_send_removemapping(inode, &inarg, &forget_one);
 366}
 367
 368/*
 369 * It is called from evict_inode() and by that time inode is going away. So
 370 * this function does not take any locks like fi->dax->sem for traversing
 371 * that fuse inode interval tree. If that lock is taken then lock validator
 372 * complains of deadlock situation w.r.t fs_reclaim lock.
 373 */
 374void fuse_dax_inode_cleanup(struct inode *inode)
 375{
 376	struct fuse_conn *fc = get_fuse_conn(inode);
 377	struct fuse_inode *fi = get_fuse_inode(inode);
 378
 379	/*
 380	 * fuse_evict_inode() has already called truncate_inode_pages_final()
 381	 * before we arrive here. So we should not have to worry about any
 382	 * pages/exception entries still associated with inode.
 383	 */
 384	inode_reclaim_dmap_range(fc->dax, inode, 0, -1);
 385	WARN_ON(fi->dax->nr);
 386}
 387
 388static void fuse_fill_iomap_hole(struct iomap *iomap, loff_t length)
 389{
 390	iomap->addr = IOMAP_NULL_ADDR;
 391	iomap->length = length;
 392	iomap->type = IOMAP_HOLE;
 393}
 394
 395static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length,
 396			    struct iomap *iomap, struct fuse_dax_mapping *dmap,
 397			    unsigned int flags)
 398{
 399	loff_t offset, len;
 400	loff_t i_size = i_size_read(inode);
 401
 402	offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT);
 403	len = min(length, dmap->length - offset);
 404
 405	/* If length is beyond end of file, truncate further */
 406	if (pos + len > i_size)
 407		len = i_size - pos;
 408
 409	if (len > 0) {
 410		iomap->addr = dmap->window_offset + offset;
 411		iomap->length = len;
 412		if (flags & IOMAP_FAULT)
 413			iomap->length = ALIGN(len, PAGE_SIZE);
 414		iomap->type = IOMAP_MAPPED;
 415		/*
 416		 * increace refcnt so that reclaim code knows this dmap is in
 417		 * use. This assumes fi->dax->sem mutex is held either
 418		 * shared/exclusive.
 419		 */
 420		refcount_inc(&dmap->refcnt);
 421
 422		/* iomap->private should be NULL */
 423		WARN_ON_ONCE(iomap->private);
 424		iomap->private = dmap;
 425	} else {
 426		/* Mapping beyond end of file is hole */
 427		fuse_fill_iomap_hole(iomap, length);
 428	}
 429}
 430
 431static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
 432				      loff_t length, unsigned int flags,
 433				      struct iomap *iomap)
 434{
 435	struct fuse_inode *fi = get_fuse_inode(inode);
 436	struct fuse_conn *fc = get_fuse_conn(inode);
 437	struct fuse_conn_dax *fcd = fc->dax;
 438	struct fuse_dax_mapping *dmap, *alloc_dmap = NULL;
 439	int ret;
 440	bool writable = flags & IOMAP_WRITE;
 441	unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
 442	struct interval_tree_node *node;
 443
 444	/*
 445	 * Can't do inline reclaim in fault path. We call
 446	 * dax_layout_busy_page() before we free a range. And
 447	 * fuse_wait_dax_page() drops mapping->invalidate_lock and requires it.
 448	 * In fault path we enter with mapping->invalidate_lock held and can't
 449	 * drop it. Also in fault path we hold mapping->invalidate_lock shared
 450	 * and not exclusive, so that creates further issues with
 451	 * fuse_wait_dax_page().  Hence return -EAGAIN and fuse_dax_fault()
 452	 * will wait for a memory range to become free and retry.
 453	 */
 454	if (flags & IOMAP_FAULT) {
 455		alloc_dmap = alloc_dax_mapping(fcd);
 456		if (!alloc_dmap)
 457			return -EAGAIN;
 458	} else {
 459		alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode);
 460		if (IS_ERR(alloc_dmap))
 461			return PTR_ERR(alloc_dmap);
 462	}
 463
 464	/* If we are here, we should have memory allocated */
 465	if (WARN_ON(!alloc_dmap))
 466		return -EIO;
 467
 468	/*
 469	 * Take write lock so that only one caller can try to setup mapping
 470	 * and other waits.
 471	 */
 472	down_write(&fi->dax->sem);
 473	/*
 474	 * We dropped lock. Check again if somebody else setup
 475	 * mapping already.
 476	 */
 477	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
 478	if (node) {
 479		dmap = node_to_dmap(node);
 480		fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
 481		dmap_add_to_free_pool(fcd, alloc_dmap);
 482		up_write(&fi->dax->sem);
 483		return 0;
 484	}
 485
 486	/* Setup one mapping */
 487	ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, alloc_dmap,
 488				     writable, false);
 489	if (ret < 0) {
 490		dmap_add_to_free_pool(fcd, alloc_dmap);
 491		up_write(&fi->dax->sem);
 492		return ret;
 493	}
 494	fuse_fill_iomap(inode, pos, length, iomap, alloc_dmap, flags);
 495	up_write(&fi->dax->sem);
 496	return 0;
 497}
 498
 499static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
 500				    loff_t length, unsigned int flags,
 501				    struct iomap *iomap)
 502{
 503	struct fuse_inode *fi = get_fuse_inode(inode);
 504	struct fuse_dax_mapping *dmap;
 505	int ret;
 506	unsigned long idx = pos >> FUSE_DAX_SHIFT;
 507	struct interval_tree_node *node;
 508
 509	/*
 510	 * Take exclusive lock so that only one caller can try to setup
 511	 * mapping and others wait.
 512	 */
 513	down_write(&fi->dax->sem);
 514	node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
 515
 516	/* We are holding either inode lock or invalidate_lock, and that should
 517	 * ensure that dmap can't be truncated. We are holding a reference
 518	 * on dmap and that should make sure it can't be reclaimed. So dmap
 519	 * should still be there in tree despite the fact we dropped and
 520	 * re-acquired the fi->dax->sem lock.
 521	 */
 522	ret = -EIO;
 523	if (WARN_ON(!node))
 524		goto out_err;
 525
 526	dmap = node_to_dmap(node);
 527
 528	/* We took an extra reference on dmap to make sure its not reclaimd.
 529	 * Now we hold fi->dax->sem lock and that reference is not needed
 530	 * anymore. Drop it.
 531	 */
 532	if (refcount_dec_and_test(&dmap->refcnt)) {
 533		/* refcount should not hit 0. This object only goes
 534		 * away when fuse connection goes away
 535		 */
 536		WARN_ON_ONCE(1);
 537	}
 538
 539	/* Maybe another thread already upgraded mapping while we were not
 540	 * holding lock.
 541	 */
 542	if (dmap->writable) {
 543		ret = 0;
 544		goto out_fill_iomap;
 545	}
 546
 547	ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true,
 548				     true);
 549	if (ret < 0)
 550		goto out_err;
 551out_fill_iomap:
 552	fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
 553out_err:
 554	up_write(&fi->dax->sem);
 555	return ret;
 556}
 557
 558/* This is just for DAX and the mapping is ephemeral, do not use it for other
 559 * purposes since there is no block device with a permanent mapping.
 560 */
 561static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
 562			    unsigned int flags, struct iomap *iomap,
 563			    struct iomap *srcmap)
 564{
 565	struct fuse_inode *fi = get_fuse_inode(inode);
 566	struct fuse_conn *fc = get_fuse_conn(inode);
 567	struct fuse_dax_mapping *dmap;
 568	bool writable = flags & IOMAP_WRITE;
 569	unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
 570	struct interval_tree_node *node;
 571
 572	/* We don't support FIEMAP */
 573	if (WARN_ON(flags & IOMAP_REPORT))
 574		return -EIO;
 575
 576	iomap->offset = pos;
 577	iomap->flags = 0;
 578	iomap->bdev = NULL;
 579	iomap->dax_dev = fc->dax->dev;
 580
 581	/*
 582	 * Both read/write and mmap path can race here. So we need something
 583	 * to make sure if we are setting up mapping, then other path waits
 584	 *
 585	 * For now, use a semaphore for this. It probably needs to be
 586	 * optimized later.
 587	 */
 588	down_read(&fi->dax->sem);
 589	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
 590	if (node) {
 591		dmap = node_to_dmap(node);
 592		if (writable && !dmap->writable) {
 593			/* Upgrade read-only mapping to read-write. This will
 594			 * require exclusive fi->dax->sem lock as we don't want
 595			 * two threads to be trying to this simultaneously
 596			 * for same dmap. So drop shared lock and acquire
 597			 * exclusive lock.
 598			 *
 599			 * Before dropping fi->dax->sem lock, take reference
 600			 * on dmap so that its not freed by range reclaim.
 601			 */
 602			refcount_inc(&dmap->refcnt);
 603			up_read(&fi->dax->sem);
 604			pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n",
 605				 __func__, pos, length);
 606			return fuse_upgrade_dax_mapping(inode, pos, length,
 607							flags, iomap);
 608		} else {
 609			fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
 610			up_read(&fi->dax->sem);
 611			return 0;
 612		}
 613	} else {
 614		up_read(&fi->dax->sem);
 615		pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n",
 616				__func__, pos, length);
 617		if (pos >= i_size_read(inode))
 618			goto iomap_hole;
 619
 620		return fuse_setup_new_dax_mapping(inode, pos, length, flags,
 621						  iomap);
 622	}
 623
 624	/*
 625	 * If read beyond end of file happens, fs code seems to return
 626	 * it as hole
 627	 */
 628iomap_hole:
 629	fuse_fill_iomap_hole(iomap, length);
 630	pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n",
 631		 __func__, pos, length, iomap->length);
 632	return 0;
 633}
 634
 635static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length,
 636			  ssize_t written, unsigned int flags,
 637			  struct iomap *iomap)
 638{
 639	struct fuse_dax_mapping *dmap = iomap->private;
 640
 641	if (dmap) {
 642		if (refcount_dec_and_test(&dmap->refcnt)) {
 643			/* refcount should not hit 0. This object only goes
 644			 * away when fuse connection goes away
 645			 */
 646			WARN_ON_ONCE(1);
 647		}
 648	}
 649
 650	/* DAX writes beyond end-of-file aren't handled using iomap, so the
 651	 * file size is unchanged and there is nothing to do here.
 652	 */
 653	return 0;
 654}
 655
 656static const struct iomap_ops fuse_iomap_ops = {
 657	.iomap_begin = fuse_iomap_begin,
 658	.iomap_end = fuse_iomap_end,
 659};
 660
 661static void fuse_wait_dax_page(struct inode *inode)
 662{
 663	filemap_invalidate_unlock(inode->i_mapping);
 664	schedule();
 665	filemap_invalidate_lock(inode->i_mapping);
 666}
 667
 668/* Should be called with mapping->invalidate_lock held exclusively */
 669static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
 670				    loff_t start, loff_t end)
 671{
 672	struct page *page;
 673
 674	page = dax_layout_busy_page_range(inode->i_mapping, start, end);
 675	if (!page)
 676		return 0;
 677
 678	*retry = true;
 679	return ___wait_var_event(&page->_refcount,
 680			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
 681			0, 0, fuse_wait_dax_page(inode));
 682}
 683
 684/* dmap_end == 0 leads to unmapping of whole file */
 685int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
 686				  u64 dmap_end)
 687{
 688	bool	retry;
 689	int	ret;
 690
 691	do {
 692		retry = false;
 693		ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
 694					       dmap_end);
 695	} while (ret == 0 && retry);
 696
 697	return ret;
 698}
 699
 700ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
 701{
 702	struct inode *inode = file_inode(iocb->ki_filp);
 703	ssize_t ret;
 704
 705	if (iocb->ki_flags & IOCB_NOWAIT) {
 706		if (!inode_trylock_shared(inode))
 707			return -EAGAIN;
 708	} else {
 709		inode_lock_shared(inode);
 710	}
 711
 712	ret = dax_iomap_rw(iocb, to, &fuse_iomap_ops);
 713	inode_unlock_shared(inode);
 714
 715	/* TODO file_accessed(iocb->f_filp) */
 716	return ret;
 717}
 718
 719static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from)
 720{
 721	struct inode *inode = file_inode(iocb->ki_filp);
 722
 723	return (iov_iter_rw(from) == WRITE &&
 724		((iocb->ki_pos) >= i_size_read(inode) ||
 725		  (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode))));
 726}
 727
 728static ssize_t fuse_dax_direct_write(struct kiocb *iocb, struct iov_iter *from)
 729{
 730	struct inode *inode = file_inode(iocb->ki_filp);
 731	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
 732	ssize_t ret;
 733
 734	ret = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
 735
 736	fuse_write_update_attr(inode, iocb->ki_pos, ret);
 737	return ret;
 738}
 739
 740ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
 741{
 742	struct inode *inode = file_inode(iocb->ki_filp);
 743	ssize_t ret;
 744
 745	if (iocb->ki_flags & IOCB_NOWAIT) {
 746		if (!inode_trylock(inode))
 747			return -EAGAIN;
 748	} else {
 749		inode_lock(inode);
 750	}
 751
 752	ret = generic_write_checks(iocb, from);
 753	if (ret <= 0)
 754		goto out;
 755
 756	ret = file_remove_privs(iocb->ki_filp);
 757	if (ret)
 758		goto out;
 759	/* TODO file_update_time() but we don't want metadata I/O */
 760
 761	/* Do not use dax for file extending writes as write and on
 762	 * disk i_size increase are not atomic otherwise.
 763	 */
 764	if (file_extending_write(iocb, from))
 765		ret = fuse_dax_direct_write(iocb, from);
 766	else
 767		ret = dax_iomap_rw(iocb, from, &fuse_iomap_ops);
 768
 769out:
 770	inode_unlock(inode);
 771
 772	if (ret > 0)
 773		ret = generic_write_sync(iocb, ret);
 774	return ret;
 775}
 776
 777static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order,
 778		bool write)
 779{
 780	vm_fault_t ret;
 781	struct inode *inode = file_inode(vmf->vma->vm_file);
 782	struct super_block *sb = inode->i_sb;
 783	pfn_t pfn;
 784	int error = 0;
 785	struct fuse_conn *fc = get_fuse_conn(inode);
 786	struct fuse_conn_dax *fcd = fc->dax;
 787	bool retry = false;
 788
 789	if (write)
 790		sb_start_pagefault(sb);
 791retry:
 792	if (retry && !(fcd->nr_free_ranges > 0))
 793		wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0));
 794
 795	/*
 796	 * We need to serialize against not only truncate but also against
 797	 * fuse dax memory range reclaim. While a range is being reclaimed,
 798	 * we do not want any read/write/mmap to make progress and try
 799	 * to populate page cache or access memory we are trying to free.
 800	 */
 801	filemap_invalidate_lock_shared(inode->i_mapping);
 802	ret = dax_iomap_fault(vmf, order, &pfn, &error, &fuse_iomap_ops);
 803	if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
 804		error = 0;
 805		retry = true;
 806		filemap_invalidate_unlock_shared(inode->i_mapping);
 807		goto retry;
 808	}
 809
 810	if (ret & VM_FAULT_NEEDDSYNC)
 811		ret = dax_finish_sync_fault(vmf, order, pfn);
 812	filemap_invalidate_unlock_shared(inode->i_mapping);
 813
 814	if (write)
 815		sb_end_pagefault(sb);
 816
 817	return ret;
 818}
 819
 820static vm_fault_t fuse_dax_fault(struct vm_fault *vmf)
 821{
 822	return __fuse_dax_fault(vmf, 0, vmf->flags & FAULT_FLAG_WRITE);
 823}
 824
 825static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
 826{
 827	return __fuse_dax_fault(vmf, order, vmf->flags & FAULT_FLAG_WRITE);
 828}
 829
 830static vm_fault_t fuse_dax_page_mkwrite(struct vm_fault *vmf)
 831{
 832	return __fuse_dax_fault(vmf, 0, true);
 833}
 834
 835static vm_fault_t fuse_dax_pfn_mkwrite(struct vm_fault *vmf)
 836{
 837	return __fuse_dax_fault(vmf, 0, true);
 838}
 839
 840static const struct vm_operations_struct fuse_dax_vm_ops = {
 841	.fault		= fuse_dax_fault,
 842	.huge_fault	= fuse_dax_huge_fault,
 843	.page_mkwrite	= fuse_dax_page_mkwrite,
 844	.pfn_mkwrite	= fuse_dax_pfn_mkwrite,
 845};
 846
 847int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
 848{
 849	file_accessed(file);
 850	vma->vm_ops = &fuse_dax_vm_ops;
 851	vm_flags_set(vma, VM_MIXEDMAP | VM_HUGEPAGE);
 852	return 0;
 853}
 854
 855static int dmap_writeback_invalidate(struct inode *inode,
 856				     struct fuse_dax_mapping *dmap)
 857{
 858	int ret;
 859	loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT;
 860	loff_t end_pos = (start_pos + FUSE_DAX_SZ - 1);
 861
 862	ret = filemap_fdatawrite_range(inode->i_mapping, start_pos, end_pos);
 863	if (ret) {
 864		pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n",
 865			 ret, start_pos, end_pos);
 866		return ret;
 867	}
 868
 869	ret = invalidate_inode_pages2_range(inode->i_mapping,
 870					    start_pos >> PAGE_SHIFT,
 871					    end_pos >> PAGE_SHIFT);
 872	if (ret)
 873		pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n",
 874			 ret);
 875
 876	return ret;
 877}
 878
 879static int reclaim_one_dmap_locked(struct inode *inode,
 880				   struct fuse_dax_mapping *dmap)
 881{
 882	int ret;
 883	struct fuse_inode *fi = get_fuse_inode(inode);
 884
 885	/*
 886	 * igrab() was done to make sure inode won't go under us, and this
 887	 * further avoids the race with evict().
 888	 */
 889	ret = dmap_writeback_invalidate(inode, dmap);
 890	if (ret)
 891		return ret;
 892
 893	/* Remove dax mapping from inode interval tree now */
 894	interval_tree_remove(&dmap->itn, &fi->dax->tree);
 895	fi->dax->nr--;
 896
 897	/* It is possible that umount/shutdown has killed the fuse connection
 898	 * and worker thread is trying to reclaim memory in parallel.  Don't
 899	 * warn in that case.
 900	 */
 901	ret = dmap_removemapping_one(inode, dmap);
 902	if (ret && ret != -ENOTCONN) {
 903		pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n",
 904			dmap->window_offset, dmap->length, ret);
 905	}
 906	return 0;
 907}
 908
 909/* Find first mapped dmap for an inode and return file offset. Caller needs
 910 * to hold fi->dax->sem lock either shared or exclusive.
 911 */
 912static struct fuse_dax_mapping *inode_lookup_first_dmap(struct inode *inode)
 913{
 914	struct fuse_inode *fi = get_fuse_inode(inode);
 915	struct fuse_dax_mapping *dmap;
 916	struct interval_tree_node *node;
 917
 918	for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node;
 919	     node = interval_tree_iter_next(node, 0, -1)) {
 920		dmap = node_to_dmap(node);
 921		/* still in use. */
 922		if (refcount_read(&dmap->refcnt) > 1)
 923			continue;
 924
 925		return dmap;
 926	}
 927
 928	return NULL;
 929}
 930
 931/*
 932 * Find first mapping in the tree and free it and return it. Do not add
 933 * it back to free pool.
 934 */
 935static struct fuse_dax_mapping *
 936inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
 937			      bool *retry)
 938{
 939	struct fuse_inode *fi = get_fuse_inode(inode);
 940	struct fuse_dax_mapping *dmap;
 941	u64 dmap_start, dmap_end;
 942	unsigned long start_idx;
 943	int ret;
 944	struct interval_tree_node *node;
 945
 946	filemap_invalidate_lock(inode->i_mapping);
 947
 948	/* Lookup a dmap and corresponding file offset to reclaim. */
 949	down_read(&fi->dax->sem);
 950	dmap = inode_lookup_first_dmap(inode);
 951	if (dmap) {
 952		start_idx = dmap->itn.start;
 953		dmap_start = start_idx << FUSE_DAX_SHIFT;
 954		dmap_end = dmap_start + FUSE_DAX_SZ - 1;
 955	}
 956	up_read(&fi->dax->sem);
 957
 958	if (!dmap)
 959		goto out_mmap_sem;
 960	/*
 961	 * Make sure there are no references to inode pages using
 962	 * get_user_pages()
 963	 */
 964	ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
 965	if (ret) {
 966		pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n",
 967			 ret);
 968		dmap = ERR_PTR(ret);
 969		goto out_mmap_sem;
 970	}
 971
 972	down_write(&fi->dax->sem);
 973	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
 974	/* Range already got reclaimed by somebody else */
 975	if (!node) {
 976		if (retry)
 977			*retry = true;
 978		goto out_write_dmap_sem;
 979	}
 980
 981	dmap = node_to_dmap(node);
 982	/* still in use. */
 983	if (refcount_read(&dmap->refcnt) > 1) {
 984		dmap = NULL;
 985		if (retry)
 986			*retry = true;
 987		goto out_write_dmap_sem;
 988	}
 989
 990	ret = reclaim_one_dmap_locked(inode, dmap);
 991	if (ret < 0) {
 992		dmap = ERR_PTR(ret);
 993		goto out_write_dmap_sem;
 994	}
 995
 996	/* Clean up dmap. Do not add back to free list */
 997	dmap_remove_busy_list(fcd, dmap);
 998	dmap->inode = NULL;
 999	dmap->itn.start = dmap->itn.last = 0;
1000
1001	pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n",
1002		 __func__, inode, dmap->window_offset, dmap->length);
1003
1004out_write_dmap_sem:
1005	up_write(&fi->dax->sem);
1006out_mmap_sem:
1007	filemap_invalidate_unlock(inode->i_mapping);
1008	return dmap;
1009}
1010
1011static struct fuse_dax_mapping *
1012alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
1013{
1014	struct fuse_dax_mapping *dmap;
1015	struct fuse_inode *fi = get_fuse_inode(inode);
1016
1017	while (1) {
1018		bool retry = false;
1019
1020		dmap = alloc_dax_mapping(fcd);
1021		if (dmap)
1022			return dmap;
1023
1024		dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
1025		/*
1026		 * Either we got a mapping or it is an error, return in both
1027		 * the cases.
1028		 */
1029		if (dmap)
1030			return dmap;
1031
1032		/* If we could not reclaim a mapping because it
1033		 * had a reference or some other temporary failure,
1034		 * Try again. We want to give up inline reclaim only
1035		 * if there is no range assigned to this node. Otherwise
1036		 * if a deadlock is possible if we sleep with
1037		 * mapping->invalidate_lock held and worker to free memory
1038		 * can't make progress due to unavailability of
1039		 * mapping->invalidate_lock.  So sleep only if fi->dax->nr=0
1040		 */
1041		if (retry)
1042			continue;
1043		/*
1044		 * There are no mappings which can be reclaimed. Wait for one.
1045		 * We are not holding fi->dax->sem. So it is possible
1046		 * that range gets added now. But as we are not holding
1047		 * mapping->invalidate_lock, worker should still be able to
1048		 * free up a range and wake us up.
1049		 */
1050		if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
1051			if (wait_event_killable_exclusive(fcd->range_waitq,
1052					(fcd->nr_free_ranges > 0))) {
1053				return ERR_PTR(-EINTR);
1054			}
1055		}
1056	}
1057}
1058
1059static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
1060					  struct inode *inode,
1061					  unsigned long start_idx)
1062{
1063	int ret;
1064	struct fuse_inode *fi = get_fuse_inode(inode);
1065	struct fuse_dax_mapping *dmap;
1066	struct interval_tree_node *node;
1067
1068	/* Find fuse dax mapping at file offset inode. */
1069	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
1070
1071	/* Range already got cleaned up by somebody else */
1072	if (!node)
1073		return 0;
1074	dmap = node_to_dmap(node);
1075
1076	/* still in use. */
1077	if (refcount_read(&dmap->refcnt) > 1)
1078		return 0;
1079
1080	ret = reclaim_one_dmap_locked(inode, dmap);
1081	if (ret < 0)
1082		return ret;
1083
1084	/* Cleanup dmap entry and add back to free list */
1085	spin_lock(&fcd->lock);
1086	dmap_reinit_add_to_free_pool(fcd, dmap);
1087	spin_unlock(&fcd->lock);
1088	return ret;
1089}
1090
1091/*
1092 * Free a range of memory.
1093 * Locking:
1094 * 1. Take mapping->invalidate_lock to block dax faults.
1095 * 2. Take fi->dax->sem to protect interval tree and also to make sure
1096 *    read/write can not reuse a dmap which we might be freeing.
1097 */
1098static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
1099				   struct inode *inode,
1100				   unsigned long start_idx,
1101				   unsigned long end_idx)
1102{
1103	int ret;
1104	struct fuse_inode *fi = get_fuse_inode(inode);
1105	loff_t dmap_start = start_idx << FUSE_DAX_SHIFT;
1106	loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1;
1107
1108	filemap_invalidate_lock(inode->i_mapping);
1109	ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
1110	if (ret) {
1111		pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
1112			 ret);
1113		goto out_mmap_sem;
1114	}
1115
1116	down_write(&fi->dax->sem);
1117	ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
1118	up_write(&fi->dax->sem);
1119out_mmap_sem:
1120	filemap_invalidate_unlock(inode->i_mapping);
1121	return ret;
1122}
1123
1124static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd,
1125				   unsigned long nr_to_free)
1126{
1127	struct fuse_dax_mapping *dmap, *pos, *temp;
1128	int ret, nr_freed = 0;
1129	unsigned long start_idx = 0, end_idx = 0;
1130	struct inode *inode = NULL;
1131
1132	/* Pick first busy range and free it for now*/
1133	while (1) {
1134		if (nr_freed >= nr_to_free)
1135			break;
1136
1137		dmap = NULL;
1138		spin_lock(&fcd->lock);
1139
1140		if (!fcd->nr_busy_ranges) {
1141			spin_unlock(&fcd->lock);
1142			return 0;
1143		}
1144
1145		list_for_each_entry_safe(pos, temp, &fcd->busy_ranges,
1146						busy_list) {
1147			/* skip this range if it's in use. */
1148			if (refcount_read(&pos->refcnt) > 1)
1149				continue;
1150
1151			inode = igrab(pos->inode);
1152			/*
1153			 * This inode is going away. That will free
1154			 * up all the ranges anyway, continue to
1155			 * next range.
1156			 */
1157			if (!inode)
1158				continue;
1159			/*
1160			 * Take this element off list and add it tail. If
1161			 * this element can't be freed, it will help with
1162			 * selecting new element in next iteration of loop.
1163			 */
1164			dmap = pos;
1165			list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
1166			start_idx = end_idx = dmap->itn.start;
1167			break;
1168		}
1169		spin_unlock(&fcd->lock);
1170		if (!dmap)
1171			return 0;
1172
1173		ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx);
1174		iput(inode);
1175		if (ret)
1176			return ret;
1177		nr_freed++;
1178	}
1179	return 0;
1180}
1181
1182static void fuse_dax_free_mem_worker(struct work_struct *work)
1183{
1184	int ret;
1185	struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax,
1186						 free_work.work);
1187	ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK);
1188	if (ret) {
1189		pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n",
1190			 ret);
1191	}
1192
1193	/* If number of free ranges are still below threshold, requeue */
1194	kick_dmap_free_worker(fcd, 1);
1195}
1196
1197static void fuse_free_dax_mem_ranges(struct list_head *mem_list)
1198{
1199	struct fuse_dax_mapping *range, *temp;
1200
1201	/* Free All allocated elements */
1202	list_for_each_entry_safe(range, temp, mem_list, list) {
1203		list_del(&range->list);
1204		if (!list_empty(&range->busy_list))
1205			list_del(&range->busy_list);
1206		kfree(range);
1207	}
1208}
1209
1210void fuse_dax_conn_free(struct fuse_conn *fc)
1211{
1212	if (fc->dax) {
1213		fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
1214		kfree(fc->dax);
1215		fc->dax = NULL;
1216	}
1217}
1218
1219static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
1220{
1221	long nr_pages, nr_ranges;
1222	struct fuse_dax_mapping *range;
1223	int ret, id;
1224	size_t dax_size = -1;
1225	unsigned long i;
1226
1227	init_waitqueue_head(&fcd->range_waitq);
1228	INIT_LIST_HEAD(&fcd->free_ranges);
1229	INIT_LIST_HEAD(&fcd->busy_ranges);
1230	INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
1231
1232	id = dax_read_lock();
1233	nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size),
1234			DAX_ACCESS, NULL, NULL);
1235	dax_read_unlock(id);
1236	if (nr_pages < 0) {
1237		pr_debug("dax_direct_access() returned %ld\n", nr_pages);
1238		return nr_pages;
1239	}
1240
1241	nr_ranges = nr_pages/FUSE_DAX_PAGES;
1242	pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n",
1243		__func__, nr_pages, nr_ranges);
1244
1245	for (i = 0; i < nr_ranges; i++) {
1246		range = kzalloc(sizeof(struct fuse_dax_mapping), GFP_KERNEL);
1247		ret = -ENOMEM;
1248		if (!range)
1249			goto out_err;
1250
1251		/* TODO: This offset only works if virtio-fs driver is not
1252		 * having some memory hidden at the beginning. This needs
1253		 * better handling
1254		 */
1255		range->window_offset = i * FUSE_DAX_SZ;
1256		range->length = FUSE_DAX_SZ;
1257		INIT_LIST_HEAD(&range->busy_list);
1258		refcount_set(&range->refcnt, 1);
1259		list_add_tail(&range->list, &fcd->free_ranges);
1260	}
1261
1262	fcd->nr_free_ranges = nr_ranges;
1263	fcd->nr_ranges = nr_ranges;
1264	return 0;
1265out_err:
1266	/* Free All allocated elements */
1267	fuse_free_dax_mem_ranges(&fcd->free_ranges);
1268	return ret;
1269}
1270
1271int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode dax_mode,
1272			struct dax_device *dax_dev)
1273{
1274	struct fuse_conn_dax *fcd;
1275	int err;
1276
1277	fc->dax_mode = dax_mode;
1278
1279	if (!dax_dev)
1280		return 0;
1281
1282	fcd = kzalloc(sizeof(*fcd), GFP_KERNEL);
1283	if (!fcd)
1284		return -ENOMEM;
1285
1286	spin_lock_init(&fcd->lock);
1287	fcd->dev = dax_dev;
1288	err = fuse_dax_mem_range_init(fcd);
1289	if (err) {
1290		kfree(fcd);
1291		return err;
1292	}
1293
1294	fc->dax = fcd;
1295	return 0;
1296}
1297
1298bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
1299{
1300	struct fuse_conn *fc = get_fuse_conn_super(sb);
1301
1302	fi->dax = NULL;
1303	if (fc->dax) {
1304		fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT);
1305		if (!fi->dax)
1306			return false;
1307
1308		init_rwsem(&fi->dax->sem);
1309		fi->dax->tree = RB_ROOT_CACHED;
1310	}
1311
1312	return true;
1313}
1314
1315static const struct address_space_operations fuse_dax_file_aops  = {
1316	.direct_IO	= noop_direct_IO,
1317	.dirty_folio	= noop_dirty_folio,
1318};
1319
1320static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags)
1321{
1322	struct fuse_conn *fc = get_fuse_conn(inode);
1323	enum fuse_dax_mode dax_mode = fc->dax_mode;
1324
1325	if (dax_mode == FUSE_DAX_NEVER)
1326		return false;
1327
1328	/*
1329	 * fc->dax may be NULL in 'inode' mode when filesystem device doesn't
1330	 * support DAX, in which case it will silently fallback to 'never' mode.
1331	 */
1332	if (!fc->dax)
1333		return false;
1334
1335	if (dax_mode == FUSE_DAX_ALWAYS)
1336		return true;
1337
1338	/* dax_mode is FUSE_DAX_INODE* */
1339	return fc->inode_dax && (flags & FUSE_ATTR_DAX);
1340}
1341
1342void fuse_dax_inode_init(struct inode *inode, unsigned int flags)
1343{
1344	if (!fuse_should_enable_dax(inode, flags))
1345		return;
1346
1347	inode->i_flags |= S_DAX;
1348	inode->i_data.a_ops = &fuse_dax_file_aops;
1349}
1350
1351void fuse_dax_dontcache(struct inode *inode, unsigned int flags)
1352{
1353	struct fuse_conn *fc = get_fuse_conn(inode);
1354
1355	if (fuse_is_inode_dax_mode(fc->dax_mode) &&
1356	    ((bool) IS_DAX(inode) != (bool) (flags & FUSE_ATTR_DAX)))
1357		d_mark_dontcache(inode);
1358}
1359
1360bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
1361{
1362	if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
1363		pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n",
1364			map_alignment, FUSE_DAX_SZ);
1365		return false;
1366	}
1367	return true;
1368}
1369
1370void fuse_dax_cancel_work(struct fuse_conn *fc)
1371{
1372	struct fuse_conn_dax *fcd = fc->dax;
1373
1374	if (fcd)
1375		cancel_delayed_work_sync(&fcd->free_work);
1376
1377}
1378EXPORT_SYMBOL_GPL(fuse_dax_cancel_work);