Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/slab.h>
   7#include <linux/blkdev.h>
   8#include <linux/writeback.h>
   9#include <linux/sched/mm.h>
  10#include "messages.h"
  11#include "misc.h"
  12#include "ctree.h"
  13#include "transaction.h"
  14#include "btrfs_inode.h"
  15#include "extent_io.h"
  16#include "disk-io.h"
  17#include "compression.h"
  18#include "delalloc-space.h"
  19#include "qgroup.h"
  20#include "subpage.h"
  21#include "file.h"
  22#include "super.h"
  23
  24static struct kmem_cache *btrfs_ordered_extent_cache;
  25
  26static u64 entry_end(struct btrfs_ordered_extent *entry)
  27{
  28	if (entry->file_offset + entry->num_bytes < entry->file_offset)
  29		return (u64)-1;
  30	return entry->file_offset + entry->num_bytes;
  31}
  32
  33/* returns NULL if the insertion worked, or it returns the node it did find
  34 * in the tree
  35 */
  36static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  37				   struct rb_node *node)
  38{
  39	struct rb_node **p = &root->rb_node;
  40	struct rb_node *parent = NULL;
  41	struct btrfs_ordered_extent *entry;
  42
  43	while (*p) {
  44		parent = *p;
  45		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  46
  47		if (file_offset < entry->file_offset)
  48			p = &(*p)->rb_left;
  49		else if (file_offset >= entry_end(entry))
  50			p = &(*p)->rb_right;
  51		else
  52			return parent;
  53	}
  54
  55	rb_link_node(node, parent, p);
  56	rb_insert_color(node, root);
  57	return NULL;
  58}
  59
 
 
 
 
 
 
 
 
  60/*
  61 * look for a given offset in the tree, and if it can't be found return the
  62 * first lesser offset
  63 */
  64static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  65				     struct rb_node **prev_ret)
  66{
  67	struct rb_node *n = root->rb_node;
  68	struct rb_node *prev = NULL;
  69	struct rb_node *test;
  70	struct btrfs_ordered_extent *entry;
  71	struct btrfs_ordered_extent *prev_entry = NULL;
  72
  73	while (n) {
  74		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  75		prev = n;
  76		prev_entry = entry;
  77
  78		if (file_offset < entry->file_offset)
  79			n = n->rb_left;
  80		else if (file_offset >= entry_end(entry))
  81			n = n->rb_right;
  82		else
  83			return n;
  84	}
  85	if (!prev_ret)
  86		return NULL;
  87
  88	while (prev && file_offset >= entry_end(prev_entry)) {
  89		test = rb_next(prev);
  90		if (!test)
  91			break;
  92		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  93				      rb_node);
  94		if (file_offset < entry_end(prev_entry))
  95			break;
  96
  97		prev = test;
  98	}
  99	if (prev)
 100		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
 101				      rb_node);
 102	while (prev && file_offset < entry_end(prev_entry)) {
 103		test = rb_prev(prev);
 104		if (!test)
 105			break;
 106		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 107				      rb_node);
 108		prev = test;
 109	}
 110	*prev_ret = prev;
 111	return NULL;
 112}
 113
 
 
 
 
 
 
 
 
 
 
 
 114static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
 115			  u64 len)
 116{
 117	if (file_offset + len <= entry->file_offset ||
 118	    entry->file_offset + entry->num_bytes <= file_offset)
 119		return 0;
 120	return 1;
 121}
 122
 123/*
 124 * look find the first ordered struct that has this offset, otherwise
 125 * the first one less than this offset
 126 */
 127static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
 128						  u64 file_offset)
 129{
 
 130	struct rb_node *prev = NULL;
 131	struct rb_node *ret;
 132	struct btrfs_ordered_extent *entry;
 133
 134	if (inode->ordered_tree_last) {
 135		entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
 136				 rb_node);
 137		if (in_range(file_offset, entry->file_offset, entry->num_bytes))
 138			return inode->ordered_tree_last;
 139	}
 140	ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
 141	if (!ret)
 142		ret = prev;
 143	if (ret)
 144		inode->ordered_tree_last = ret;
 145	return ret;
 146}
 147
 148static struct btrfs_ordered_extent *alloc_ordered_extent(
 149			struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
 150			u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
 151			u64 offset, unsigned long flags, int compress_type)
 152{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153	struct btrfs_ordered_extent *entry;
 154	int ret;
 155	u64 qgroup_rsv = 0;
 156
 157	if (flags &
 158	    ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
 159		/* For nocow write, we can release the qgroup rsv right now */
 160		ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
 161		if (ret < 0)
 162			return ERR_PTR(ret);
 163	} else {
 164		/*
 165		 * The ordered extent has reserved qgroup space, release now
 166		 * and pass the reserved number for qgroup_record to free.
 167		 */
 168		ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
 169		if (ret < 0)
 170			return ERR_PTR(ret);
 171	}
 172	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
 173	if (!entry)
 174		return ERR_PTR(-ENOMEM);
 175
 176	entry->file_offset = file_offset;
 177	entry->num_bytes = num_bytes;
 178	entry->ram_bytes = ram_bytes;
 179	entry->disk_bytenr = disk_bytenr;
 180	entry->disk_num_bytes = disk_num_bytes;
 181	entry->offset = offset;
 182	entry->bytes_left = num_bytes;
 183	entry->inode = igrab(&inode->vfs_inode);
 184	entry->compress_type = compress_type;
 185	entry->truncated_len = (u64)-1;
 186	entry->qgroup_rsv = qgroup_rsv;
 187	entry->flags = flags;
 
 
 
 
 
 188	refcount_set(&entry->refs, 1);
 189	init_waitqueue_head(&entry->wait);
 190	INIT_LIST_HEAD(&entry->list);
 191	INIT_LIST_HEAD(&entry->log_list);
 192	INIT_LIST_HEAD(&entry->root_extent_list);
 193	INIT_LIST_HEAD(&entry->work_list);
 194	INIT_LIST_HEAD(&entry->bioc_list);
 195	init_completion(&entry->completion);
 196
 197	/*
 198	 * We don't need the count_max_extents here, we can assume that all of
 199	 * that work has been done at higher layers, so this is truly the
 200	 * smallest the extent is going to get.
 201	 */
 202	spin_lock(&inode->lock);
 203	btrfs_mod_outstanding_extents(inode, 1);
 204	spin_unlock(&inode->lock);
 205
 206	return entry;
 207}
 208
 209static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
 210{
 211	struct btrfs_inode *inode = BTRFS_I(entry->inode);
 212	struct btrfs_root *root = inode->root;
 213	struct btrfs_fs_info *fs_info = root->fs_info;
 214	struct rb_node *node;
 215
 216	trace_btrfs_ordered_extent_add(inode, entry);
 217
 218	percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
 219				 fs_info->delalloc_batch);
 220
 221	/* One ref for the tree. */
 222	refcount_inc(&entry->refs);
 223
 224	spin_lock_irq(&inode->ordered_tree_lock);
 225	node = tree_insert(&inode->ordered_tree, entry->file_offset,
 226			   &entry->rb_node);
 227	if (node)
 228		btrfs_panic(fs_info, -EEXIST,
 229				"inconsistency in ordered tree at offset %llu",
 230				entry->file_offset);
 231	spin_unlock_irq(&inode->ordered_tree_lock);
 232
 233	spin_lock(&root->ordered_extent_lock);
 234	list_add_tail(&entry->root_extent_list,
 235		      &root->ordered_extents);
 236	root->nr_ordered_extents++;
 237	if (root->nr_ordered_extents == 1) {
 238		spin_lock(&fs_info->ordered_root_lock);
 239		BUG_ON(!list_empty(&root->ordered_root));
 240		list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
 241		spin_unlock(&fs_info->ordered_root_lock);
 242	}
 243	spin_unlock(&root->ordered_extent_lock);
 
 
 
 
 
 
 
 
 
 
 
 244}
 245
 246/*
 247 * Add an ordered extent to the per-inode tree.
 248 *
 249 * @inode:           Inode that this extent is for.
 250 * @file_offset:     Logical offset in file where the extent starts.
 251 * @num_bytes:       Logical length of extent in file.
 252 * @ram_bytes:       Full length of unencoded data.
 253 * @disk_bytenr:     Offset of extent on disk.
 254 * @disk_num_bytes:  Size of extent on disk.
 255 * @offset:          Offset into unencoded data where file data starts.
 256 * @flags:           Flags specifying type of extent (1 << BTRFS_ORDERED_*).
 257 * @compress_type:   Compression algorithm used for data.
 258 *
 259 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
 260 * tree is given a single reference on the ordered extent that was inserted, and
 261 * the returned pointer is given a second reference.
 262 *
 263 * Return: the new ordered extent or error pointer.
 264 */
 265struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
 266			struct btrfs_inode *inode, u64 file_offset,
 267			u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
 268			u64 disk_num_bytes, u64 offset, unsigned long flags,
 269			int compress_type)
 270{
 271	struct btrfs_ordered_extent *entry;
 
 
 
 272
 273	ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
 
 
 
 
 
 
 274
 275	entry = alloc_ordered_extent(inode, file_offset, num_bytes, ram_bytes,
 276				     disk_bytenr, disk_num_bytes, offset, flags,
 277				     compress_type);
 278	if (!IS_ERR(entry))
 279		insert_ordered_extent(entry);
 280	return entry;
 
 281}
 282
 283/*
 284 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
 285 * when an ordered extent is finished.  If the list covers more than one
 286 * ordered extent, it is split across multiples.
 287 */
 288void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
 
 289			   struct btrfs_ordered_sum *sum)
 290{
 291	struct btrfs_inode *inode = BTRFS_I(entry->inode);
 292
 293	spin_lock_irq(&inode->ordered_tree_lock);
 
 294	list_add_tail(&sum->list, &entry->list);
 295	spin_unlock_irq(&inode->ordered_tree_lock);
 296}
 297
 298static void finish_ordered_fn(struct btrfs_work *work)
 299{
 300	struct btrfs_ordered_extent *ordered_extent;
 301
 302	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
 303	btrfs_finish_ordered_io(ordered_extent);
 304}
 305
 306static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
 307				      struct page *page, u64 file_offset,
 308				      u64 len, bool uptodate)
 309{
 310	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
 311	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 312
 313	lockdep_assert_held(&inode->ordered_tree_lock);
 314
 315	if (page) {
 316		ASSERT(page->mapping);
 317		ASSERT(page_offset(page) <= file_offset);
 318		ASSERT(file_offset + len <= page_offset(page) + PAGE_SIZE);
 319
 320		/*
 321		 * Ordered (Private2) bit indicates whether we still have
 322		 * pending io unfinished for the ordered extent.
 323		 *
 324		 * If there's no such bit, we need to skip to next range.
 325		 */
 326		if (!btrfs_folio_test_ordered(fs_info, page_folio(page),
 327					      file_offset, len))
 328			return false;
 329		btrfs_folio_clear_ordered(fs_info, page_folio(page), file_offset, len);
 330	}
 331
 332	/* Now we're fine to update the accounting. */
 333	if (WARN_ON_ONCE(len > ordered->bytes_left)) {
 334		btrfs_crit(fs_info,
 335"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
 336			   inode->root->root_key.objectid, btrfs_ino(inode),
 337			   ordered->file_offset, ordered->num_bytes,
 338			   len, ordered->bytes_left);
 339		ordered->bytes_left = 0;
 340	} else {
 341		ordered->bytes_left -= len;
 342	}
 343
 344	if (!uptodate)
 345		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
 346
 347	if (ordered->bytes_left)
 348		return false;
 349
 350	/*
 351	 * All the IO of the ordered extent is finished, we need to queue
 352	 * the finish_func to be executed.
 353	 */
 354	set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
 355	cond_wake_up(&ordered->wait);
 356	refcount_inc(&ordered->refs);
 357	trace_btrfs_ordered_extent_mark_finished(inode, ordered);
 358	return true;
 359}
 360
 361static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
 362{
 363	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
 364	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 365	struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
 366		fs_info->endio_freespace_worker : fs_info->endio_write_workers;
 367
 368	btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
 369	btrfs_queue_work(wq, &ordered->work);
 370}
 371
 372bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
 373				 struct page *page, u64 file_offset, u64 len,
 374				 bool uptodate)
 375{
 376	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
 377	unsigned long flags;
 378	bool ret;
 379
 380	trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
 381
 382	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 383	ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
 384	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 385
 386	if (ret)
 387		btrfs_queue_ordered_fn(ordered);
 388	return ret;
 389}
 390
 391/*
 392 * Mark all ordered extents io inside the specified range finished.
 
 
 
 393 *
 394 * @page:	 The involved page for the operation.
 395 *		 For uncompressed buffered IO, the page status also needs to be
 396 *		 updated to indicate whether the pending ordered io is finished.
 397 *		 Can be NULL for direct IO and compressed write.
 398 *		 For these cases, callers are ensured they won't execute the
 399 *		 endio function twice.
 400 *
 401 * This function is called for endio, thus the range must have ordered
 402 * extent(s) covering it.
 403 */
 404void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
 405				    struct page *page, u64 file_offset,
 406				    u64 num_bytes, bool uptodate)
 407{
 
 
 408	struct rb_node *node;
 409	struct btrfs_ordered_extent *entry = NULL;
 
 410	unsigned long flags;
 411	u64 cur = file_offset;
 412
 413	trace_btrfs_writepage_end_io_hook(inode, file_offset,
 414					  file_offset + num_bytes - 1,
 415					  uptodate);
 416
 417	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 418	while (cur < file_offset + num_bytes) {
 419		u64 entry_end;
 420		u64 end;
 421		u32 len;
 422
 423		node = ordered_tree_search(inode, cur);
 424		/* No ordered extents at all */
 425		if (!node)
 426			break;
 427
 428		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 429		entry_end = entry->file_offset + entry->num_bytes;
 430		/*
 431		 * |<-- OE --->|  |
 432		 *		  cur
 433		 * Go to next OE.
 434		 */
 435		if (cur >= entry_end) {
 436			node = rb_next(node);
 437			/* No more ordered extents, exit */
 438			if (!node)
 439				break;
 440			entry = rb_entry(node, struct btrfs_ordered_extent,
 441					 rb_node);
 442
 443			/* Go to next ordered extent and continue */
 444			cur = entry->file_offset;
 445			continue;
 446		}
 447		/*
 448		 * |	|<--- OE --->|
 449		 * cur
 450		 * Go to the start of OE.
 451		 */
 452		if (cur < entry->file_offset) {
 453			cur = entry->file_offset;
 454			continue;
 455		}
 
 
 
 
 456
 
 
 457		/*
 458		 * Now we are definitely inside one ordered extent.
 459		 *
 460		 * |<--- OE --->|
 461		 *	|
 462		 *	cur
 463		 */
 464		end = min(entry->file_offset + entry->num_bytes,
 465			  file_offset + num_bytes) - 1;
 466		ASSERT(end + 1 - cur < U32_MAX);
 467		len = end + 1 - cur;
 468
 469		if (can_finish_ordered_extent(entry, page, cur, len, uptodate)) {
 470			spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 471			btrfs_queue_ordered_fn(entry);
 472			spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 473		}
 474		cur += len;
 475	}
 476	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 
 
 
 
 
 
 477}
 478
 479/*
 480 * Finish IO for one ordered extent across a given range.  The range can only
 481 * contain one ordered extent.
 482 *
 483 * @cached:	 The cached ordered extent. If not NULL, we can skip the tree
 484 *               search and use the ordered extent directly.
 485 * 		 Will be also used to store the finished ordered extent.
 486 * @file_offset: File offset for the finished IO
 487 * @io_size:	 Length of the finish IO range
 488 *
 489 * Return true if the ordered extent is finished in the range, and update
 490 * @cached.
 491 * Return false otherwise.
 492 *
 493 * NOTE: The range can NOT cross multiple ordered extents.
 494 * Thus caller should ensure the range doesn't cross ordered extents.
 495 */
 496bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
 497				    struct btrfs_ordered_extent **cached,
 498				    u64 file_offset, u64 io_size)
 499{
 
 500	struct rb_node *node;
 501	struct btrfs_ordered_extent *entry = NULL;
 502	unsigned long flags;
 503	bool finished = false;
 504
 505	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 
 506	if (cached && *cached) {
 507		entry = *cached;
 508		goto have_entry;
 509	}
 510
 511	node = ordered_tree_search(inode, file_offset);
 512	if (!node)
 
 513		goto out;
 
 514
 515	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 516have_entry:
 517	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
 
 518		goto out;
 
 519
 520	if (io_size > entry->bytes_left)
 521		btrfs_crit(inode->root->fs_info,
 522			   "bad ordered accounting left %llu size %llu",
 523		       entry->bytes_left, io_size);
 524
 525	entry->bytes_left -= io_size;
 
 
 526
 527	if (entry->bytes_left == 0) {
 
 528		/*
 529		 * Ensure only one caller can set the flag and finished_ret
 530		 * accordingly
 531		 */
 532		finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 533		/* test_and_set_bit implies a barrier */
 534		cond_wake_up_nomb(&entry->wait);
 
 535	}
 536out:
 537	if (finished && cached && entry) {
 538		*cached = entry;
 539		refcount_inc(&entry->refs);
 540		trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
 541	}
 542	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 543	return finished;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544}
 545
 546/*
 547 * used to drop a reference on an ordered extent.  This will free
 548 * the extent if the last reference is dropped
 549 */
 550void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 551{
 552	struct list_head *cur;
 553	struct btrfs_ordered_sum *sum;
 554
 555	trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
 556
 557	if (refcount_dec_and_test(&entry->refs)) {
 558		ASSERT(list_empty(&entry->root_extent_list));
 559		ASSERT(list_empty(&entry->log_list));
 
 
 560		ASSERT(RB_EMPTY_NODE(&entry->rb_node));
 561		if (entry->inode)
 562			btrfs_add_delayed_iput(BTRFS_I(entry->inode));
 563		while (!list_empty(&entry->list)) {
 564			cur = entry->list.next;
 565			sum = list_entry(cur, struct btrfs_ordered_sum, list);
 566			list_del(&sum->list);
 567			kvfree(sum);
 568		}
 569		kmem_cache_free(btrfs_ordered_extent_cache, entry);
 570	}
 571}
 572
 573/*
 574 * remove an ordered extent from the tree.  No references are dropped
 575 * and waiters are woken up.
 576 */
 577void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
 578				 struct btrfs_ordered_extent *entry)
 579{
 
 
 
 580	struct btrfs_root *root = btrfs_inode->root;
 581	struct btrfs_fs_info *fs_info = root->fs_info;
 582	struct rb_node *node;
 583	bool pending;
 584	bool freespace_inode;
 585
 586	/*
 587	 * If this is a free space inode the thread has not acquired the ordered
 588	 * extents lockdep map.
 589	 */
 590	freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
 591
 592	btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
 593	/* This is paired with btrfs_alloc_ordered_extent. */
 594	spin_lock(&btrfs_inode->lock);
 595	btrfs_mod_outstanding_extents(btrfs_inode, -1);
 596	spin_unlock(&btrfs_inode->lock);
 597	if (root != fs_info->tree_root) {
 598		u64 release;
 599
 600		if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
 601			release = entry->disk_num_bytes;
 602		else
 603			release = entry->num_bytes;
 604		btrfs_delalloc_release_metadata(btrfs_inode, release,
 605						test_bit(BTRFS_ORDERED_IOERR,
 606							 &entry->flags));
 607	}
 608
 609	percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
 610				 fs_info->delalloc_batch);
 611
 612	spin_lock_irq(&btrfs_inode->ordered_tree_lock);
 613	node = &entry->rb_node;
 614	rb_erase(node, &btrfs_inode->ordered_tree);
 615	RB_CLEAR_NODE(node);
 616	if (btrfs_inode->ordered_tree_last == node)
 617		btrfs_inode->ordered_tree_last = NULL;
 618	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 619	pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
 620	spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
 
 621
 622	/*
 623	 * The current running transaction is waiting on us, we need to let it
 624	 * know that we're complete and wake it up.
 625	 */
 626	if (pending) {
 627		struct btrfs_transaction *trans;
 628
 629		/*
 630		 * The checks for trans are just a formality, it should be set,
 631		 * but if it isn't we don't want to deref/assert under the spin
 632		 * lock, so be nice and check if trans is set, but ASSERT() so
 633		 * if it isn't set a developer will notice.
 634		 */
 635		spin_lock(&fs_info->trans_lock);
 636		trans = fs_info->running_transaction;
 637		if (trans)
 638			refcount_inc(&trans->use_count);
 639		spin_unlock(&fs_info->trans_lock);
 640
 641		ASSERT(trans || BTRFS_FS_ERROR(fs_info));
 642		if (trans) {
 643			if (atomic_dec_and_test(&trans->pending_ordered))
 644				wake_up(&trans->pending_wait);
 645			btrfs_put_transaction(trans);
 646		}
 647	}
 648
 649	btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
 650
 651	spin_lock(&root->ordered_extent_lock);
 652	list_del_init(&entry->root_extent_list);
 653	root->nr_ordered_extents--;
 654
 655	trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
 656
 657	if (!root->nr_ordered_extents) {
 658		spin_lock(&fs_info->ordered_root_lock);
 659		BUG_ON(list_empty(&root->ordered_root));
 660		list_del_init(&root->ordered_root);
 661		spin_unlock(&fs_info->ordered_root_lock);
 662	}
 663	spin_unlock(&root->ordered_extent_lock);
 664	wake_up(&entry->wait);
 665	if (!freespace_inode)
 666		btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
 667}
 668
 669static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
 670{
 671	struct btrfs_ordered_extent *ordered;
 672
 673	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
 674	btrfs_start_ordered_extent(ordered);
 675	complete(&ordered->completion);
 676}
 677
 678/*
 679 * wait for all the ordered extents in a root.  This is done when balancing
 680 * space between drives.
 681 */
 682u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
 683			       const u64 range_start, const u64 range_len)
 684{
 685	struct btrfs_fs_info *fs_info = root->fs_info;
 686	LIST_HEAD(splice);
 687	LIST_HEAD(skipped);
 688	LIST_HEAD(works);
 689	struct btrfs_ordered_extent *ordered, *next;
 690	u64 count = 0;
 691	const u64 range_end = range_start + range_len;
 692
 693	mutex_lock(&root->ordered_extent_mutex);
 694	spin_lock(&root->ordered_extent_lock);
 695	list_splice_init(&root->ordered_extents, &splice);
 696	while (!list_empty(&splice) && nr) {
 697		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
 698					   root_extent_list);
 699
 700		if (range_end <= ordered->disk_bytenr ||
 701		    ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
 702			list_move_tail(&ordered->root_extent_list, &skipped);
 703			cond_resched_lock(&root->ordered_extent_lock);
 704			continue;
 705		}
 706
 707		list_move_tail(&ordered->root_extent_list,
 708			       &root->ordered_extents);
 709		refcount_inc(&ordered->refs);
 710		spin_unlock(&root->ordered_extent_lock);
 711
 712		btrfs_init_work(&ordered->flush_work,
 713				btrfs_run_ordered_extent_work, NULL);
 
 714		list_add_tail(&ordered->work_list, &works);
 715		btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
 716
 717		cond_resched();
 718		spin_lock(&root->ordered_extent_lock);
 719		if (nr != U64_MAX)
 720			nr--;
 721		count++;
 722	}
 723	list_splice_tail(&skipped, &root->ordered_extents);
 724	list_splice_tail(&splice, &root->ordered_extents);
 725	spin_unlock(&root->ordered_extent_lock);
 726
 727	list_for_each_entry_safe(ordered, next, &works, work_list) {
 728		list_del_init(&ordered->work_list);
 729		wait_for_completion(&ordered->completion);
 730		btrfs_put_ordered_extent(ordered);
 731		cond_resched();
 732	}
 733	mutex_unlock(&root->ordered_extent_mutex);
 734
 735	return count;
 736}
 737
 738void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
 739			     const u64 range_start, const u64 range_len)
 740{
 741	struct btrfs_root *root;
 742	LIST_HEAD(splice);
 
 743	u64 done;
 744
 
 
 745	mutex_lock(&fs_info->ordered_operations_mutex);
 746	spin_lock(&fs_info->ordered_root_lock);
 747	list_splice_init(&fs_info->ordered_roots, &splice);
 748	while (!list_empty(&splice) && nr) {
 749		root = list_first_entry(&splice, struct btrfs_root,
 750					ordered_root);
 751		root = btrfs_grab_root(root);
 752		BUG_ON(!root);
 753		list_move_tail(&root->ordered_root,
 754			       &fs_info->ordered_roots);
 755		spin_unlock(&fs_info->ordered_root_lock);
 756
 757		done = btrfs_wait_ordered_extents(root, nr,
 758						  range_start, range_len);
 759		btrfs_put_root(root);
 
 760
 761		spin_lock(&fs_info->ordered_root_lock);
 762		if (nr != U64_MAX) {
 763			nr -= done;
 764		}
 765	}
 766	list_splice_tail(&splice, &fs_info->ordered_roots);
 767	spin_unlock(&fs_info->ordered_root_lock);
 768	mutex_unlock(&fs_info->ordered_operations_mutex);
 
 
 769}
 770
 771/*
 772 * Start IO and wait for a given ordered extent to finish.
 773 *
 774 * Wait on page writeback for all the pages in the extent and the IO completion
 775 * code to insert metadata into the btree corresponding to the extent.
 
 776 */
 777void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
 
 
 778{
 779	u64 start = entry->file_offset;
 780	u64 end = start + entry->num_bytes - 1;
 781	struct btrfs_inode *inode = BTRFS_I(entry->inode);
 782	bool freespace_inode;
 783
 784	trace_btrfs_ordered_extent_start(inode, entry);
 785
 786	/*
 787	 * If this is a free space inode do not take the ordered extents lockdep
 788	 * map.
 789	 */
 790	freespace_inode = btrfs_is_free_space_inode(inode);
 791
 792	/*
 793	 * pages in the range can be dirty, clean or writeback.  We
 794	 * start IO on any dirty ones so the wait doesn't stall waiting
 795	 * for the flusher thread to find them
 796	 */
 797	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 798		filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
 799
 800	if (!freespace_inode)
 801		btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
 802	wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
 803}
 804
 805/*
 806 * Used to wait on ordered extents across a large range of bytes.
 807 */
 808int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 809{
 810	int ret = 0;
 811	int ret_wb = 0;
 812	u64 end;
 813	u64 orig_end;
 814	struct btrfs_ordered_extent *ordered;
 815
 816	if (start + len < start) {
 817		orig_end = OFFSET_MAX;
 818	} else {
 819		orig_end = start + len - 1;
 820		if (orig_end > OFFSET_MAX)
 821			orig_end = OFFSET_MAX;
 822	}
 823
 824	/* start IO across the range first to instantiate any delalloc
 825	 * extents
 826	 */
 827	ret = btrfs_fdatawrite_range(inode, start, orig_end);
 828	if (ret)
 829		return ret;
 830
 831	/*
 832	 * If we have a writeback error don't return immediately. Wait first
 833	 * for any ordered extents that haven't completed yet. This is to make
 834	 * sure no one can dirty the same page ranges and call writepages()
 835	 * before the ordered extents complete - to avoid failures (-EEXIST)
 836	 * when adding the new ordered extents to the ordered tree.
 837	 */
 838	ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 839
 840	end = orig_end;
 841	while (1) {
 842		ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
 843		if (!ordered)
 844			break;
 845		if (ordered->file_offset > orig_end) {
 846			btrfs_put_ordered_extent(ordered);
 847			break;
 848		}
 849		if (ordered->file_offset + ordered->num_bytes <= start) {
 850			btrfs_put_ordered_extent(ordered);
 851			break;
 852		}
 853		btrfs_start_ordered_extent(ordered);
 854		end = ordered->file_offset;
 855		/*
 856		 * If the ordered extent had an error save the error but don't
 857		 * exit without waiting first for all other ordered extents in
 858		 * the range to complete.
 859		 */
 860		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
 861			ret = -EIO;
 862		btrfs_put_ordered_extent(ordered);
 863		if (end == 0 || end == start)
 864			break;
 865		end--;
 866	}
 867	return ret_wb ? ret_wb : ret;
 868}
 869
 870/*
 871 * find an ordered extent corresponding to file_offset.  return NULL if
 872 * nothing is found, otherwise take a reference on the extent and return it
 873 */
 874struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
 875							 u64 file_offset)
 876{
 
 877	struct rb_node *node;
 878	struct btrfs_ordered_extent *entry = NULL;
 879	unsigned long flags;
 880
 881	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 882	node = ordered_tree_search(inode, file_offset);
 
 883	if (!node)
 884		goto out;
 885
 886	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 887	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
 888		entry = NULL;
 889	if (entry) {
 890		refcount_inc(&entry->refs);
 891		trace_btrfs_ordered_extent_lookup(inode, entry);
 892	}
 893out:
 894	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 895	return entry;
 896}
 897
 898/* Since the DIO code tries to lock a wide area we need to look for any ordered
 899 * extents that exist in the range, rather than just the start of the range.
 900 */
 901struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
 902		struct btrfs_inode *inode, u64 file_offset, u64 len)
 903{
 
 904	struct rb_node *node;
 905	struct btrfs_ordered_extent *entry = NULL;
 906
 907	spin_lock_irq(&inode->ordered_tree_lock);
 908	node = ordered_tree_search(inode, file_offset);
 
 909	if (!node) {
 910		node = ordered_tree_search(inode, file_offset + len);
 911		if (!node)
 912			goto out;
 913	}
 914
 915	while (1) {
 916		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 917		if (range_overlaps(entry, file_offset, len))
 918			break;
 919
 920		if (entry->file_offset >= file_offset + len) {
 921			entry = NULL;
 922			break;
 923		}
 924		entry = NULL;
 925		node = rb_next(node);
 926		if (!node)
 927			break;
 928	}
 929out:
 930	if (entry) {
 931		refcount_inc(&entry->refs);
 932		trace_btrfs_ordered_extent_lookup_range(inode, entry);
 933	}
 934	spin_unlock_irq(&inode->ordered_tree_lock);
 935	return entry;
 936}
 937
 938/*
 939 * Adds all ordered extents to the given list. The list ends up sorted by the
 940 * file_offset of the ordered extents.
 941 */
 942void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
 943					   struct list_head *list)
 944{
 945	struct rb_node *n;
 946
 947	ASSERT(inode_is_locked(&inode->vfs_inode));
 948
 949	spin_lock_irq(&inode->ordered_tree_lock);
 950	for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
 951		struct btrfs_ordered_extent *ordered;
 952
 953		ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
 954
 955		if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
 956			continue;
 957
 958		ASSERT(list_empty(&ordered->log_list));
 959		list_add_tail(&ordered->log_list, list);
 960		refcount_inc(&ordered->refs);
 961		trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
 962	}
 963	spin_unlock_irq(&inode->ordered_tree_lock);
 964}
 965
 966/*
 967 * lookup and return any extent before 'file_offset'.  NULL is returned
 968 * if none is found
 969 */
 970struct btrfs_ordered_extent *
 971btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
 972{
 
 973	struct rb_node *node;
 974	struct btrfs_ordered_extent *entry = NULL;
 975
 976	spin_lock_irq(&inode->ordered_tree_lock);
 977	node = ordered_tree_search(inode, file_offset);
 
 978	if (!node)
 979		goto out;
 980
 981	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 982	refcount_inc(&entry->refs);
 983	trace_btrfs_ordered_extent_lookup_first(inode, entry);
 984out:
 985	spin_unlock_irq(&inode->ordered_tree_lock);
 986	return entry;
 987}
 988
 989/*
 990 * Lookup the first ordered extent that overlaps the range
 991 * [@file_offset, @file_offset + @len).
 992 *
 993 * The difference between this and btrfs_lookup_first_ordered_extent() is
 994 * that this one won't return any ordered extent that does not overlap the range.
 995 * And the difference against btrfs_lookup_ordered_extent() is, this function
 996 * ensures the first ordered extent gets returned.
 997 */
 998struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
 999			struct btrfs_inode *inode, u64 file_offset, u64 len)
1000{
 
 
 
 
1001	struct rb_node *node;
1002	struct rb_node *cur;
1003	struct rb_node *prev;
1004	struct rb_node *next;
1005	struct btrfs_ordered_extent *entry = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
1006
1007	spin_lock_irq(&inode->ordered_tree_lock);
1008	node = inode->ordered_tree.rb_node;
1009	/*
1010	 * Here we don't want to use tree_search() which will use tree->last
1011	 * and screw up the search order.
1012	 * And __tree_search() can't return the adjacent ordered extents
1013	 * either, thus here we do our own search.
 
 
 
 
 
1014	 */
1015	while (node) {
1016		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1017
1018		if (file_offset < entry->file_offset) {
1019			node = node->rb_left;
1020		} else if (file_offset >= entry_end(entry)) {
1021			node = node->rb_right;
1022		} else {
1023			/*
1024			 * Direct hit, got an ordered extent that starts at
1025			 * @file_offset
1026			 */
1027			goto out;
1028		}
1029	}
1030	if (!entry) {
1031		/* Empty tree */
1032		goto out;
1033	}
1034
1035	cur = &entry->rb_node;
1036	/* We got an entry around @file_offset, check adjacent entries */
1037	if (entry->file_offset < file_offset) {
1038		prev = cur;
1039		next = rb_next(cur);
1040	} else {
1041		prev = rb_prev(cur);
1042		next = cur;
1043	}
1044	if (prev) {
1045		entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1046		if (range_overlaps(entry, file_offset, len))
1047			goto out;
1048	}
1049	if (next) {
1050		entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1051		if (range_overlaps(entry, file_offset, len))
1052			goto out;
1053	}
1054	/* No ordered extent in the range */
1055	entry = NULL;
1056out:
1057	if (entry) {
1058		refcount_inc(&entry->refs);
1059		trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1060	}
1061
1062	spin_unlock_irq(&inode->ordered_tree_lock);
1063	return entry;
1064}
 
 
 
 
1065
1066/*
1067 * Lock the passed range and ensures all pending ordered extents in it are run
1068 * to completion.
1069 *
1070 * @inode:        Inode whose ordered tree is to be searched
1071 * @start:        Beginning of range to flush
1072 * @end:          Last byte of range to lock
1073 * @cached_state: If passed, will return the extent state responsible for the
1074 *                locked range. It's the caller's responsibility to free the
1075 *                cached state.
1076 *
1077 * Always return with the given range locked, ensuring after it's called no
1078 * order extent can be pending.
1079 */
1080void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1081					u64 end,
1082					struct extent_state **cached_state)
1083{
1084	struct btrfs_ordered_extent *ordered;
1085	struct extent_state *cache = NULL;
1086	struct extent_state **cachedp = &cache;
 
1087
1088	if (cached_state)
1089		cachedp = cached_state;
 
1090
1091	while (1) {
1092		lock_extent(&inode->io_tree, start, end, cachedp);
1093		ordered = btrfs_lookup_ordered_range(inode, start,
1094						     end - start + 1);
1095		if (!ordered) {
1096			/*
1097			 * If no external cached_state has been passed then
1098			 * decrement the extra ref taken for cachedp since we
1099			 * aren't exposing it outside of this function
1100			 */
1101			if (!cached_state)
1102				refcount_dec(&cache->refs);
1103			break;
1104		}
1105		unlock_extent(&inode->io_tree, start, end, cachedp);
1106		btrfs_start_ordered_extent(ordered);
1107		btrfs_put_ordered_extent(ordered);
 
 
 
 
 
 
 
 
 
1108	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1109}
1110
1111/*
1112 * Lock the passed range and ensure all pending ordered extents in it are run
1113 * to completion in nowait mode.
1114 *
1115 * Return true if btrfs_lock_ordered_range does not return any extents,
1116 * otherwise false.
1117 */
1118bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1119				  struct extent_state **cached_state)
1120{
 
1121	struct btrfs_ordered_extent *ordered;
 
 
 
 
 
1122
1123	if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1124		return false;
1125
1126	ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1127	if (!ordered)
1128		return true;
1129
1130	btrfs_put_ordered_extent(ordered);
1131	unlock_extent(&inode->io_tree, start, end, cached_state);
1132
1133	return false;
1134}
1135
1136/* Split out a new ordered extent for this first @len bytes of @ordered. */
1137struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1138			struct btrfs_ordered_extent *ordered, u64 len)
1139{
1140	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1141	struct btrfs_root *root = inode->root;
1142	struct btrfs_fs_info *fs_info = root->fs_info;
1143	u64 file_offset = ordered->file_offset;
1144	u64 disk_bytenr = ordered->disk_bytenr;
1145	unsigned long flags = ordered->flags;
1146	struct btrfs_ordered_sum *sum, *tmpsum;
1147	struct btrfs_ordered_extent *new;
1148	struct rb_node *node;
1149	u64 offset = 0;
1150
1151	trace_btrfs_ordered_extent_split(inode, ordered);
1152
1153	ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1154
1155	/*
1156	 * The entire bio must be covered by the ordered extent, but we can't
1157	 * reduce the original extent to a zero length either.
1158	 */
1159	if (WARN_ON_ONCE(len >= ordered->num_bytes))
1160		return ERR_PTR(-EINVAL);
1161	/* We cannot split partially completed ordered extents. */
1162	if (ordered->bytes_left) {
1163		ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
1164		if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1165			return ERR_PTR(-EINVAL);
1166	}
1167	/* We cannot split a compressed ordered extent. */
1168	if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
1169		return ERR_PTR(-EINVAL);
1170
1171	new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
1172				   len, 0, flags, ordered->compress_type);
1173	if (IS_ERR(new))
1174		return new;
1175
1176	/* One ref for the tree. */
1177	refcount_inc(&new->refs);
1178
1179	spin_lock_irq(&root->ordered_extent_lock);
1180	spin_lock(&inode->ordered_tree_lock);
1181	/* Remove from tree once */
1182	node = &ordered->rb_node;
1183	rb_erase(node, &inode->ordered_tree);
1184	RB_CLEAR_NODE(node);
1185	if (inode->ordered_tree_last == node)
1186		inode->ordered_tree_last = NULL;
1187
1188	ordered->file_offset += len;
1189	ordered->disk_bytenr += len;
1190	ordered->num_bytes -= len;
1191	ordered->disk_num_bytes -= len;
1192
1193	if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
1194		ASSERT(ordered->bytes_left == 0);
1195		new->bytes_left = 0;
1196	} else {
1197		ordered->bytes_left -= len;
1198	}
1199
1200	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
1201		if (ordered->truncated_len > len) {
1202			ordered->truncated_len -= len;
1203		} else {
1204			new->truncated_len = ordered->truncated_len;
1205			ordered->truncated_len = 0;
 
 
 
 
 
 
 
 
 
 
1206		}
1207	}
1208
1209	list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
1210		if (offset == len)
1211			break;
1212		list_move_tail(&sum->list, &new->list);
1213		offset += sum->len;
1214	}
1215
1216	/* Re-insert the node */
1217	node = tree_insert(&inode->ordered_tree, ordered->file_offset,
1218			   &ordered->rb_node);
1219	if (node)
1220		btrfs_panic(fs_info, -EEXIST,
1221			"zoned: inconsistency in ordered tree at offset %llu",
1222			ordered->file_offset);
1223
1224	node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
1225	if (node)
1226		btrfs_panic(fs_info, -EEXIST,
1227			"zoned: inconsistency in ordered tree at offset %llu",
1228			new->file_offset);
1229	spin_unlock(&inode->ordered_tree_lock);
1230
1231	list_add_tail(&new->root_extent_list, &root->ordered_extents);
1232	root->nr_ordered_extents++;
1233	spin_unlock_irq(&root->ordered_extent_lock);
1234	return new;
1235}
1236
1237int __init ordered_data_init(void)
1238{
1239	btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1240				     sizeof(struct btrfs_ordered_extent), 0,
1241				     SLAB_MEM_SPREAD,
1242				     NULL);
1243	if (!btrfs_ordered_extent_cache)
1244		return -ENOMEM;
1245
1246	return 0;
1247}
1248
1249void __cold ordered_data_exit(void)
1250{
1251	kmem_cache_destroy(btrfs_ordered_extent_cache);
1252}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/slab.h>
   7#include <linux/blkdev.h>
   8#include <linux/writeback.h>
   9#include <linux/pagevec.h>
 
 
  10#include "ctree.h"
  11#include "transaction.h"
  12#include "btrfs_inode.h"
  13#include "extent_io.h"
  14#include "disk-io.h"
  15#include "compression.h"
 
 
 
 
 
  16
  17static struct kmem_cache *btrfs_ordered_extent_cache;
  18
  19static u64 entry_end(struct btrfs_ordered_extent *entry)
  20{
  21	if (entry->file_offset + entry->len < entry->file_offset)
  22		return (u64)-1;
  23	return entry->file_offset + entry->len;
  24}
  25
  26/* returns NULL if the insertion worked, or it returns the node it did find
  27 * in the tree
  28 */
  29static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  30				   struct rb_node *node)
  31{
  32	struct rb_node **p = &root->rb_node;
  33	struct rb_node *parent = NULL;
  34	struct btrfs_ordered_extent *entry;
  35
  36	while (*p) {
  37		parent = *p;
  38		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  39
  40		if (file_offset < entry->file_offset)
  41			p = &(*p)->rb_left;
  42		else if (file_offset >= entry_end(entry))
  43			p = &(*p)->rb_right;
  44		else
  45			return parent;
  46	}
  47
  48	rb_link_node(node, parent, p);
  49	rb_insert_color(node, root);
  50	return NULL;
  51}
  52
  53static void ordered_data_tree_panic(struct inode *inode, int errno,
  54					       u64 offset)
  55{
  56	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  57	btrfs_panic(fs_info, errno,
  58		    "Inconsistency in ordered tree at offset %llu", offset);
  59}
  60
  61/*
  62 * look for a given offset in the tree, and if it can't be found return the
  63 * first lesser offset
  64 */
  65static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  66				     struct rb_node **prev_ret)
  67{
  68	struct rb_node *n = root->rb_node;
  69	struct rb_node *prev = NULL;
  70	struct rb_node *test;
  71	struct btrfs_ordered_extent *entry;
  72	struct btrfs_ordered_extent *prev_entry = NULL;
  73
  74	while (n) {
  75		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  76		prev = n;
  77		prev_entry = entry;
  78
  79		if (file_offset < entry->file_offset)
  80			n = n->rb_left;
  81		else if (file_offset >= entry_end(entry))
  82			n = n->rb_right;
  83		else
  84			return n;
  85	}
  86	if (!prev_ret)
  87		return NULL;
  88
  89	while (prev && file_offset >= entry_end(prev_entry)) {
  90		test = rb_next(prev);
  91		if (!test)
  92			break;
  93		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  94				      rb_node);
  95		if (file_offset < entry_end(prev_entry))
  96			break;
  97
  98		prev = test;
  99	}
 100	if (prev)
 101		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
 102				      rb_node);
 103	while (prev && file_offset < entry_end(prev_entry)) {
 104		test = rb_prev(prev);
 105		if (!test)
 106			break;
 107		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 108				      rb_node);
 109		prev = test;
 110	}
 111	*prev_ret = prev;
 112	return NULL;
 113}
 114
 115/*
 116 * helper to check if a given offset is inside a given entry
 117 */
 118static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
 119{
 120	if (file_offset < entry->file_offset ||
 121	    entry->file_offset + entry->len <= file_offset)
 122		return 0;
 123	return 1;
 124}
 125
 126static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
 127			  u64 len)
 128{
 129	if (file_offset + len <= entry->file_offset ||
 130	    entry->file_offset + entry->len <= file_offset)
 131		return 0;
 132	return 1;
 133}
 134
 135/*
 136 * look find the first ordered struct that has this offset, otherwise
 137 * the first one less than this offset
 138 */
 139static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
 140					  u64 file_offset)
 141{
 142	struct rb_root *root = &tree->tree;
 143	struct rb_node *prev = NULL;
 144	struct rb_node *ret;
 145	struct btrfs_ordered_extent *entry;
 146
 147	if (tree->last) {
 148		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
 149				 rb_node);
 150		if (offset_in_entry(entry, file_offset))
 151			return tree->last;
 152	}
 153	ret = __tree_search(root, file_offset, &prev);
 154	if (!ret)
 155		ret = prev;
 156	if (ret)
 157		tree->last = ret;
 158	return ret;
 159}
 160
 161/* allocate and add a new ordered_extent into the per-inode tree.
 162 * file_offset is the logical offset in the file
 163 *
 164 * start is the disk block number of an extent already reserved in the
 165 * extent allocation tree
 166 *
 167 * len is the length of the extent
 168 *
 169 * The tree is given a single reference on the ordered extent that was
 170 * inserted.
 171 */
 172static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 173				      u64 start, u64 len, u64 disk_len,
 174				      int type, int dio, int compress_type)
 175{
 176	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 177	struct btrfs_root *root = BTRFS_I(inode)->root;
 178	struct btrfs_ordered_inode_tree *tree;
 179	struct rb_node *node;
 180	struct btrfs_ordered_extent *entry;
 
 
 181
 182	tree = &BTRFS_I(inode)->ordered_tree;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 183	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
 184	if (!entry)
 185		return -ENOMEM;
 186
 187	entry->file_offset = file_offset;
 188	entry->start = start;
 189	entry->len = len;
 190	entry->disk_len = disk_len;
 191	entry->bytes_left = len;
 192	entry->inode = igrab(inode);
 
 
 193	entry->compress_type = compress_type;
 194	entry->truncated_len = (u64)-1;
 195	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
 196		set_bit(type, &entry->flags);
 197
 198	if (dio)
 199		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
 200
 201	/* one ref for the tree */
 202	refcount_set(&entry->refs, 1);
 203	init_waitqueue_head(&entry->wait);
 204	INIT_LIST_HEAD(&entry->list);
 
 205	INIT_LIST_HEAD(&entry->root_extent_list);
 206	INIT_LIST_HEAD(&entry->work_list);
 
 207	init_completion(&entry->completion);
 208	INIT_LIST_HEAD(&entry->log_list);
 209	INIT_LIST_HEAD(&entry->trans_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 210
 211	trace_btrfs_ordered_extent_add(inode, entry);
 212
 213	spin_lock_irq(&tree->lock);
 214	node = tree_insert(&tree->tree, file_offset,
 
 
 
 
 
 
 215			   &entry->rb_node);
 216	if (node)
 217		ordered_data_tree_panic(inode, -EEXIST, file_offset);
 218	spin_unlock_irq(&tree->lock);
 
 
 219
 220	spin_lock(&root->ordered_extent_lock);
 221	list_add_tail(&entry->root_extent_list,
 222		      &root->ordered_extents);
 223	root->nr_ordered_extents++;
 224	if (root->nr_ordered_extents == 1) {
 225		spin_lock(&fs_info->ordered_root_lock);
 226		BUG_ON(!list_empty(&root->ordered_root));
 227		list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
 228		spin_unlock(&fs_info->ordered_root_lock);
 229	}
 230	spin_unlock(&root->ordered_extent_lock);
 231
 232	/*
 233	 * We don't need the count_max_extents here, we can assume that all of
 234	 * that work has been done at higher layers, so this is truly the
 235	 * smallest the extent is going to get.
 236	 */
 237	spin_lock(&BTRFS_I(inode)->lock);
 238	btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
 239	spin_unlock(&BTRFS_I(inode)->lock);
 240
 241	return 0;
 242}
 243
 244int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 245			     u64 start, u64 len, u64 disk_len, int type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 246{
 247	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 248					  disk_len, type, 0,
 249					  BTRFS_COMPRESS_NONE);
 250}
 251
 252int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
 253				 u64 start, u64 len, u64 disk_len, int type)
 254{
 255	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 256					  disk_len, type, 1,
 257					  BTRFS_COMPRESS_NONE);
 258}
 259
 260int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
 261				      u64 start, u64 len, u64 disk_len,
 262				      int type, int compress_type)
 263{
 264	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 265					  disk_len, type, 0,
 266					  compress_type);
 267}
 268
 269/*
 270 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
 271 * when an ordered extent is finished.  If the list covers more than one
 272 * ordered extent, it is split across multiples.
 273 */
 274void btrfs_add_ordered_sum(struct inode *inode,
 275			   struct btrfs_ordered_extent *entry,
 276			   struct btrfs_ordered_sum *sum)
 277{
 278	struct btrfs_ordered_inode_tree *tree;
 279
 280	tree = &BTRFS_I(inode)->ordered_tree;
 281	spin_lock_irq(&tree->lock);
 282	list_add_tail(&sum->list, &entry->list);
 283	spin_unlock_irq(&tree->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284}
 285
 286/*
 287 * this is used to account for finished IO across a given range
 288 * of the file.  The IO may span ordered extents.  If
 289 * a given ordered_extent is completely done, 1 is returned, otherwise
 290 * 0.
 291 *
 292 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 293 * to make sure this function only returns 1 once for a given ordered extent.
 
 
 
 
 294 *
 295 * file_offset is updated to one byte past the range that is recorded as
 296 * complete.  This allows you to walk forward in the file.
 297 */
 298int btrfs_dec_test_first_ordered_pending(struct inode *inode,
 299				   struct btrfs_ordered_extent **cached,
 300				   u64 *file_offset, u64 io_size, int uptodate)
 301{
 302	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 303	struct btrfs_ordered_inode_tree *tree;
 304	struct rb_node *node;
 305	struct btrfs_ordered_extent *entry = NULL;
 306	int ret;
 307	unsigned long flags;
 308	u64 dec_end;
 309	u64 dec_start;
 310	u64 to_dec;
 311
 312	tree = &BTRFS_I(inode)->ordered_tree;
 313	spin_lock_irqsave(&tree->lock, flags);
 314	node = tree_search(tree, *file_offset);
 315	if (!node) {
 316		ret = 1;
 317		goto out;
 318	}
 
 
 
 
 
 319
 320	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 321	if (!offset_in_entry(entry, *file_offset)) {
 322		ret = 1;
 323		goto out;
 324	}
 
 
 
 
 
 
 
 
 
 325
 326	dec_start = max(*file_offset, entry->file_offset);
 327	dec_end = min(*file_offset + io_size, entry->file_offset +
 328		      entry->len);
 329	*file_offset = dec_end;
 330	if (dec_start > dec_end) {
 331		btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
 332			   dec_start, dec_end);
 333	}
 334	to_dec = dec_end - dec_start;
 335	if (to_dec > entry->bytes_left) {
 336		btrfs_crit(fs_info,
 337			   "bad ordered accounting left %llu size %llu",
 338			   entry->bytes_left, to_dec);
 339	}
 340	entry->bytes_left -= to_dec;
 341	if (!uptodate)
 342		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 343
 344	if (entry->bytes_left == 0) {
 345		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 346		/*
 347		 * Implicit memory barrier after test_and_set_bit
 
 
 
 
 348		 */
 349		if (waitqueue_active(&entry->wait))
 350			wake_up(&entry->wait);
 351	} else {
 352		ret = 1;
 
 
 
 
 
 
 
 353	}
 354out:
 355	if (!ret && cached && entry) {
 356		*cached = entry;
 357		refcount_inc(&entry->refs);
 358	}
 359	spin_unlock_irqrestore(&tree->lock, flags);
 360	return ret == 0;
 361}
 362
 363/*
 364 * this is used to account for finished IO across a given range
 365 * of the file.  The IO should not span ordered extents.  If
 366 * a given ordered_extent is completely done, 1 is returned, otherwise
 367 * 0.
 
 
 
 
 368 *
 369 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 370 * to make sure this function only returns 1 once for a given ordered extent.
 
 
 
 
 371 */
 372int btrfs_dec_test_ordered_pending(struct inode *inode,
 373				   struct btrfs_ordered_extent **cached,
 374				   u64 file_offset, u64 io_size, int uptodate)
 375{
 376	struct btrfs_ordered_inode_tree *tree;
 377	struct rb_node *node;
 378	struct btrfs_ordered_extent *entry = NULL;
 379	unsigned long flags;
 380	int ret;
 381
 382	tree = &BTRFS_I(inode)->ordered_tree;
 383	spin_lock_irqsave(&tree->lock, flags);
 384	if (cached && *cached) {
 385		entry = *cached;
 386		goto have_entry;
 387	}
 388
 389	node = tree_search(tree, file_offset);
 390	if (!node) {
 391		ret = 1;
 392		goto out;
 393	}
 394
 395	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 396have_entry:
 397	if (!offset_in_entry(entry, file_offset)) {
 398		ret = 1;
 399		goto out;
 400	}
 401
 402	if (io_size > entry->bytes_left) {
 403		btrfs_crit(BTRFS_I(inode)->root->fs_info,
 404			   "bad ordered accounting left %llu size %llu",
 405		       entry->bytes_left, io_size);
 406	}
 407	entry->bytes_left -= io_size;
 408	if (!uptodate)
 409		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 410
 411	if (entry->bytes_left == 0) {
 412		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 413		/*
 414		 * Implicit memory barrier after test_and_set_bit
 
 415		 */
 416		if (waitqueue_active(&entry->wait))
 417			wake_up(&entry->wait);
 418	} else {
 419		ret = 1;
 420	}
 421out:
 422	if (!ret && cached && entry) {
 423		*cached = entry;
 424		refcount_inc(&entry->refs);
 
 425	}
 426	spin_unlock_irqrestore(&tree->lock, flags);
 427	return ret == 0;
 428}
 429
 430/* Needs to either be called under a log transaction or the log_mutex */
 431void btrfs_get_logged_extents(struct btrfs_inode *inode,
 432			      struct list_head *logged_list,
 433			      const loff_t start,
 434			      const loff_t end)
 435{
 436	struct btrfs_ordered_inode_tree *tree;
 437	struct btrfs_ordered_extent *ordered;
 438	struct rb_node *n;
 439	struct rb_node *prev;
 440
 441	tree = &inode->ordered_tree;
 442	spin_lock_irq(&tree->lock);
 443	n = __tree_search(&tree->tree, end, &prev);
 444	if (!n)
 445		n = prev;
 446	for (; n; n = rb_prev(n)) {
 447		ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
 448		if (ordered->file_offset > end)
 449			continue;
 450		if (entry_end(ordered) <= start)
 451			break;
 452		if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
 453			continue;
 454		list_add(&ordered->log_list, logged_list);
 455		refcount_inc(&ordered->refs);
 456	}
 457	spin_unlock_irq(&tree->lock);
 458}
 459
 460void btrfs_put_logged_extents(struct list_head *logged_list)
 461{
 462	struct btrfs_ordered_extent *ordered;
 463
 464	while (!list_empty(logged_list)) {
 465		ordered = list_first_entry(logged_list,
 466					   struct btrfs_ordered_extent,
 467					   log_list);
 468		list_del_init(&ordered->log_list);
 469		btrfs_put_ordered_extent(ordered);
 470	}
 471}
 472
 473void btrfs_submit_logged_extents(struct list_head *logged_list,
 474				 struct btrfs_root *log)
 475{
 476	int index = log->log_transid % 2;
 477
 478	spin_lock_irq(&log->log_extents_lock[index]);
 479	list_splice_tail(logged_list, &log->logged_list[index]);
 480	spin_unlock_irq(&log->log_extents_lock[index]);
 481}
 482
 483void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
 484			       struct btrfs_root *log, u64 transid)
 485{
 486	struct btrfs_ordered_extent *ordered;
 487	int index = transid % 2;
 488
 489	spin_lock_irq(&log->log_extents_lock[index]);
 490	while (!list_empty(&log->logged_list[index])) {
 491		struct inode *inode;
 492		ordered = list_first_entry(&log->logged_list[index],
 493					   struct btrfs_ordered_extent,
 494					   log_list);
 495		list_del_init(&ordered->log_list);
 496		inode = ordered->inode;
 497		spin_unlock_irq(&log->log_extents_lock[index]);
 498
 499		if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
 500		    !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
 501			u64 start = ordered->file_offset;
 502			u64 end = ordered->file_offset + ordered->len - 1;
 503
 504			WARN_ON(!inode);
 505			filemap_fdatawrite_range(inode->i_mapping, start, end);
 506		}
 507		wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
 508						   &ordered->flags));
 509
 510		/*
 511		 * In order to keep us from losing our ordered extent
 512		 * information when committing the transaction we have to make
 513		 * sure that any logged extents are completed when we go to
 514		 * commit the transaction.  To do this we simply increase the
 515		 * current transactions pending_ordered counter and decrement it
 516		 * when the ordered extent completes.
 517		 */
 518		if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
 519			struct btrfs_ordered_inode_tree *tree;
 520
 521			tree = &BTRFS_I(inode)->ordered_tree;
 522			spin_lock_irq(&tree->lock);
 523			if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
 524				set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
 525				atomic_inc(&trans->transaction->pending_ordered);
 526			}
 527			spin_unlock_irq(&tree->lock);
 528		}
 529		btrfs_put_ordered_extent(ordered);
 530		spin_lock_irq(&log->log_extents_lock[index]);
 531	}
 532	spin_unlock_irq(&log->log_extents_lock[index]);
 533}
 534
 535void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
 536{
 537	struct btrfs_ordered_extent *ordered;
 538	int index = transid % 2;
 539
 540	spin_lock_irq(&log->log_extents_lock[index]);
 541	while (!list_empty(&log->logged_list[index])) {
 542		ordered = list_first_entry(&log->logged_list[index],
 543					   struct btrfs_ordered_extent,
 544					   log_list);
 545		list_del_init(&ordered->log_list);
 546		spin_unlock_irq(&log->log_extents_lock[index]);
 547		btrfs_put_ordered_extent(ordered);
 548		spin_lock_irq(&log->log_extents_lock[index]);
 549	}
 550	spin_unlock_irq(&log->log_extents_lock[index]);
 551}
 552
 553/*
 554 * used to drop a reference on an ordered extent.  This will free
 555 * the extent if the last reference is dropped
 556 */
 557void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 558{
 559	struct list_head *cur;
 560	struct btrfs_ordered_sum *sum;
 561
 562	trace_btrfs_ordered_extent_put(entry->inode, entry);
 563
 564	if (refcount_dec_and_test(&entry->refs)) {
 
 565		ASSERT(list_empty(&entry->log_list));
 566		ASSERT(list_empty(&entry->trans_list));
 567		ASSERT(list_empty(&entry->root_extent_list));
 568		ASSERT(RB_EMPTY_NODE(&entry->rb_node));
 569		if (entry->inode)
 570			btrfs_add_delayed_iput(entry->inode);
 571		while (!list_empty(&entry->list)) {
 572			cur = entry->list.next;
 573			sum = list_entry(cur, struct btrfs_ordered_sum, list);
 574			list_del(&sum->list);
 575			kfree(sum);
 576		}
 577		kmem_cache_free(btrfs_ordered_extent_cache, entry);
 578	}
 579}
 580
 581/*
 582 * remove an ordered extent from the tree.  No references are dropped
 583 * and waiters are woken up.
 584 */
 585void btrfs_remove_ordered_extent(struct inode *inode,
 586				 struct btrfs_ordered_extent *entry)
 587{
 588	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 589	struct btrfs_ordered_inode_tree *tree;
 590	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
 591	struct btrfs_root *root = btrfs_inode->root;
 
 592	struct rb_node *node;
 593	bool dec_pending_ordered = false;
 
 594
 595	/* This is paired with btrfs_add_ordered_extent. */
 
 
 
 
 
 
 
 596	spin_lock(&btrfs_inode->lock);
 597	btrfs_mod_outstanding_extents(btrfs_inode, -1);
 598	spin_unlock(&btrfs_inode->lock);
 599	if (root != fs_info->tree_root)
 600		btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false);
 
 
 
 
 
 
 
 
 
 601
 602	tree = &btrfs_inode->ordered_tree;
 603	spin_lock_irq(&tree->lock);
 
 
 604	node = &entry->rb_node;
 605	rb_erase(node, &tree->tree);
 606	RB_CLEAR_NODE(node);
 607	if (tree->last == node)
 608		tree->last = NULL;
 609	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 610	if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags))
 611		dec_pending_ordered = true;
 612	spin_unlock_irq(&tree->lock);
 613
 614	/*
 615	 * The current running transaction is waiting on us, we need to let it
 616	 * know that we're complete and wake it up.
 617	 */
 618	if (dec_pending_ordered) {
 619		struct btrfs_transaction *trans;
 620
 621		/*
 622		 * The checks for trans are just a formality, it should be set,
 623		 * but if it isn't we don't want to deref/assert under the spin
 624		 * lock, so be nice and check if trans is set, but ASSERT() so
 625		 * if it isn't set a developer will notice.
 626		 */
 627		spin_lock(&fs_info->trans_lock);
 628		trans = fs_info->running_transaction;
 629		if (trans)
 630			refcount_inc(&trans->use_count);
 631		spin_unlock(&fs_info->trans_lock);
 632
 633		ASSERT(trans);
 634		if (trans) {
 635			if (atomic_dec_and_test(&trans->pending_ordered))
 636				wake_up(&trans->pending_wait);
 637			btrfs_put_transaction(trans);
 638		}
 639	}
 640
 
 
 641	spin_lock(&root->ordered_extent_lock);
 642	list_del_init(&entry->root_extent_list);
 643	root->nr_ordered_extents--;
 644
 645	trace_btrfs_ordered_extent_remove(inode, entry);
 646
 647	if (!root->nr_ordered_extents) {
 648		spin_lock(&fs_info->ordered_root_lock);
 649		BUG_ON(list_empty(&root->ordered_root));
 650		list_del_init(&root->ordered_root);
 651		spin_unlock(&fs_info->ordered_root_lock);
 652	}
 653	spin_unlock(&root->ordered_extent_lock);
 654	wake_up(&entry->wait);
 
 
 655}
 656
 657static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
 658{
 659	struct btrfs_ordered_extent *ordered;
 660
 661	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
 662	btrfs_start_ordered_extent(ordered->inode, ordered, 1);
 663	complete(&ordered->completion);
 664}
 665
 666/*
 667 * wait for all the ordered extents in a root.  This is done when balancing
 668 * space between drives.
 669 */
 670u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
 671			       const u64 range_start, const u64 range_len)
 672{
 673	struct btrfs_fs_info *fs_info = root->fs_info;
 674	LIST_HEAD(splice);
 675	LIST_HEAD(skipped);
 676	LIST_HEAD(works);
 677	struct btrfs_ordered_extent *ordered, *next;
 678	u64 count = 0;
 679	const u64 range_end = range_start + range_len;
 680
 681	mutex_lock(&root->ordered_extent_mutex);
 682	spin_lock(&root->ordered_extent_lock);
 683	list_splice_init(&root->ordered_extents, &splice);
 684	while (!list_empty(&splice) && nr) {
 685		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
 686					   root_extent_list);
 687
 688		if (range_end <= ordered->start ||
 689		    ordered->start + ordered->disk_len <= range_start) {
 690			list_move_tail(&ordered->root_extent_list, &skipped);
 691			cond_resched_lock(&root->ordered_extent_lock);
 692			continue;
 693		}
 694
 695		list_move_tail(&ordered->root_extent_list,
 696			       &root->ordered_extents);
 697		refcount_inc(&ordered->refs);
 698		spin_unlock(&root->ordered_extent_lock);
 699
 700		btrfs_init_work(&ordered->flush_work,
 701				btrfs_flush_delalloc_helper,
 702				btrfs_run_ordered_extent_work, NULL, NULL);
 703		list_add_tail(&ordered->work_list, &works);
 704		btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
 705
 706		cond_resched();
 707		spin_lock(&root->ordered_extent_lock);
 708		if (nr != U64_MAX)
 709			nr--;
 710		count++;
 711	}
 712	list_splice_tail(&skipped, &root->ordered_extents);
 713	list_splice_tail(&splice, &root->ordered_extents);
 714	spin_unlock(&root->ordered_extent_lock);
 715
 716	list_for_each_entry_safe(ordered, next, &works, work_list) {
 717		list_del_init(&ordered->work_list);
 718		wait_for_completion(&ordered->completion);
 719		btrfs_put_ordered_extent(ordered);
 720		cond_resched();
 721	}
 722	mutex_unlock(&root->ordered_extent_mutex);
 723
 724	return count;
 725}
 726
 727u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
 728			     const u64 range_start, const u64 range_len)
 729{
 730	struct btrfs_root *root;
 731	struct list_head splice;
 732	u64 total_done = 0;
 733	u64 done;
 734
 735	INIT_LIST_HEAD(&splice);
 736
 737	mutex_lock(&fs_info->ordered_operations_mutex);
 738	spin_lock(&fs_info->ordered_root_lock);
 739	list_splice_init(&fs_info->ordered_roots, &splice);
 740	while (!list_empty(&splice) && nr) {
 741		root = list_first_entry(&splice, struct btrfs_root,
 742					ordered_root);
 743		root = btrfs_grab_fs_root(root);
 744		BUG_ON(!root);
 745		list_move_tail(&root->ordered_root,
 746			       &fs_info->ordered_roots);
 747		spin_unlock(&fs_info->ordered_root_lock);
 748
 749		done = btrfs_wait_ordered_extents(root, nr,
 750						  range_start, range_len);
 751		btrfs_put_fs_root(root);
 752		total_done += done;
 753
 754		spin_lock(&fs_info->ordered_root_lock);
 755		if (nr != U64_MAX) {
 756			nr -= done;
 757		}
 758	}
 759	list_splice_tail(&splice, &fs_info->ordered_roots);
 760	spin_unlock(&fs_info->ordered_root_lock);
 761	mutex_unlock(&fs_info->ordered_operations_mutex);
 762
 763	return total_done;
 764}
 765
 766/*
 767 * Used to start IO or wait for a given ordered extent to finish.
 768 *
 769 * If wait is one, this effectively waits on page writeback for all the pages
 770 * in the extent, and it waits on the io completion code to insert
 771 * metadata into the btree corresponding to the extent
 772 */
 773void btrfs_start_ordered_extent(struct inode *inode,
 774				       struct btrfs_ordered_extent *entry,
 775				       int wait)
 776{
 777	u64 start = entry->file_offset;
 778	u64 end = start + entry->len - 1;
 
 
 779
 780	trace_btrfs_ordered_extent_start(inode, entry);
 781
 782	/*
 
 
 
 
 
 
 783	 * pages in the range can be dirty, clean or writeback.  We
 784	 * start IO on any dirty ones so the wait doesn't stall waiting
 785	 * for the flusher thread to find them
 786	 */
 787	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 788		filemap_fdatawrite_range(inode->i_mapping, start, end);
 789	if (wait) {
 790		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
 791						 &entry->flags));
 792	}
 793}
 794
 795/*
 796 * Used to wait on ordered extents across a large range of bytes.
 797 */
 798int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 799{
 800	int ret = 0;
 801	int ret_wb = 0;
 802	u64 end;
 803	u64 orig_end;
 804	struct btrfs_ordered_extent *ordered;
 805
 806	if (start + len < start) {
 807		orig_end = INT_LIMIT(loff_t);
 808	} else {
 809		orig_end = start + len - 1;
 810		if (orig_end > INT_LIMIT(loff_t))
 811			orig_end = INT_LIMIT(loff_t);
 812	}
 813
 814	/* start IO across the range first to instantiate any delalloc
 815	 * extents
 816	 */
 817	ret = btrfs_fdatawrite_range(inode, start, orig_end);
 818	if (ret)
 819		return ret;
 820
 821	/*
 822	 * If we have a writeback error don't return immediately. Wait first
 823	 * for any ordered extents that haven't completed yet. This is to make
 824	 * sure no one can dirty the same page ranges and call writepages()
 825	 * before the ordered extents complete - to avoid failures (-EEXIST)
 826	 * when adding the new ordered extents to the ordered tree.
 827	 */
 828	ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 829
 830	end = orig_end;
 831	while (1) {
 832		ordered = btrfs_lookup_first_ordered_extent(inode, end);
 833		if (!ordered)
 834			break;
 835		if (ordered->file_offset > orig_end) {
 836			btrfs_put_ordered_extent(ordered);
 837			break;
 838		}
 839		if (ordered->file_offset + ordered->len <= start) {
 840			btrfs_put_ordered_extent(ordered);
 841			break;
 842		}
 843		btrfs_start_ordered_extent(inode, ordered, 1);
 844		end = ordered->file_offset;
 
 
 
 
 
 845		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
 846			ret = -EIO;
 847		btrfs_put_ordered_extent(ordered);
 848		if (ret || end == 0 || end == start)
 849			break;
 850		end--;
 851	}
 852	return ret_wb ? ret_wb : ret;
 853}
 854
 855/*
 856 * find an ordered extent corresponding to file_offset.  return NULL if
 857 * nothing is found, otherwise take a reference on the extent and return it
 858 */
 859struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
 860							 u64 file_offset)
 861{
 862	struct btrfs_ordered_inode_tree *tree;
 863	struct rb_node *node;
 864	struct btrfs_ordered_extent *entry = NULL;
 
 865
 866	tree = &BTRFS_I(inode)->ordered_tree;
 867	spin_lock_irq(&tree->lock);
 868	node = tree_search(tree, file_offset);
 869	if (!node)
 870		goto out;
 871
 872	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 873	if (!offset_in_entry(entry, file_offset))
 874		entry = NULL;
 875	if (entry)
 876		refcount_inc(&entry->refs);
 
 
 877out:
 878	spin_unlock_irq(&tree->lock);
 879	return entry;
 880}
 881
 882/* Since the DIO code tries to lock a wide area we need to look for any ordered
 883 * extents that exist in the range, rather than just the start of the range.
 884 */
 885struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
 886		struct btrfs_inode *inode, u64 file_offset, u64 len)
 887{
 888	struct btrfs_ordered_inode_tree *tree;
 889	struct rb_node *node;
 890	struct btrfs_ordered_extent *entry = NULL;
 891
 892	tree = &inode->ordered_tree;
 893	spin_lock_irq(&tree->lock);
 894	node = tree_search(tree, file_offset);
 895	if (!node) {
 896		node = tree_search(tree, file_offset + len);
 897		if (!node)
 898			goto out;
 899	}
 900
 901	while (1) {
 902		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 903		if (range_overlaps(entry, file_offset, len))
 904			break;
 905
 906		if (entry->file_offset >= file_offset + len) {
 907			entry = NULL;
 908			break;
 909		}
 910		entry = NULL;
 911		node = rb_next(node);
 912		if (!node)
 913			break;
 914	}
 915out:
 916	if (entry)
 917		refcount_inc(&entry->refs);
 918	spin_unlock_irq(&tree->lock);
 
 
 919	return entry;
 920}
 921
 922bool btrfs_have_ordered_extents_in_range(struct inode *inode,
 923					 u64 file_offset,
 924					 u64 len)
 925{
 926	struct btrfs_ordered_extent *oe;
 927
 928	oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len);
 929	if (oe) {
 930		btrfs_put_ordered_extent(oe);
 931		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 932	}
 933	return false;
 934}
 935
 936/*
 937 * lookup and return any extent before 'file_offset'.  NULL is returned
 938 * if none is found
 939 */
 940struct btrfs_ordered_extent *
 941btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
 942{
 943	struct btrfs_ordered_inode_tree *tree;
 944	struct rb_node *node;
 945	struct btrfs_ordered_extent *entry = NULL;
 946
 947	tree = &BTRFS_I(inode)->ordered_tree;
 948	spin_lock_irq(&tree->lock);
 949	node = tree_search(tree, file_offset);
 950	if (!node)
 951		goto out;
 952
 953	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 954	refcount_inc(&entry->refs);
 
 955out:
 956	spin_unlock_irq(&tree->lock);
 957	return entry;
 958}
 959
 960/*
 961 * After an extent is done, call this to conditionally update the on disk
 962 * i_size.  i_size is updated to cover any fully written part of the file.
 
 
 
 
 
 963 */
 964int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
 965				struct btrfs_ordered_extent *ordered)
 966{
 967	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
 968	u64 disk_i_size;
 969	u64 new_i_size;
 970	u64 i_size = i_size_read(inode);
 971	struct rb_node *node;
 972	struct rb_node *prev = NULL;
 973	struct btrfs_ordered_extent *test;
 974	int ret = 1;
 975	u64 orig_offset = offset;
 976
 977	spin_lock_irq(&tree->lock);
 978	if (ordered) {
 979		offset = entry_end(ordered);
 980		if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
 981			offset = min(offset,
 982				     ordered->file_offset +
 983				     ordered->truncated_len);
 984	} else {
 985		offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
 986	}
 987	disk_i_size = BTRFS_I(inode)->disk_i_size;
 988
 
 
 989	/*
 990	 * truncate file.
 991	 * If ordered is not NULL, then this is called from endio and
 992	 * disk_i_size will be updated by either truncate itself or any
 993	 * in-flight IOs which are inside the disk_i_size.
 994	 *
 995	 * Because btrfs_setsize() may set i_size with disk_i_size if truncate
 996	 * fails somehow, we need to make sure we have a precise disk_i_size by
 997	 * updating it as usual.
 998	 *
 999	 */
1000	if (!ordered && disk_i_size > i_size) {
1001		BTRFS_I(inode)->disk_i_size = orig_offset;
1002		ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1003		goto out;
1004	}
1005
1006	/*
1007	 * if the disk i_size is already at the inode->i_size, or
1008	 * this ordered extent is inside the disk i_size, we're done
1009	 */
1010	if (disk_i_size == i_size)
1011		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1012
1013	/*
1014	 * We still need to update disk_i_size if outstanding_isize is greater
1015	 * than disk_i_size.
1016	 */
1017	if (offset <= disk_i_size &&
1018	    (!ordered || ordered->outstanding_isize <= disk_i_size))
1019		goto out;
1020
1021	/*
1022	 * walk backward from this ordered extent to disk_i_size.
1023	 * if we find an ordered extent then we can't update disk i_size
1024	 * yet
1025	 */
1026	if (ordered) {
1027		node = rb_prev(&ordered->rb_node);
1028	} else {
1029		prev = tree_search(tree, offset);
1030		/*
1031		 * we insert file extents without involving ordered struct,
1032		 * so there should be no ordered struct cover this offset
1033		 */
1034		if (prev) {
1035			test = rb_entry(prev, struct btrfs_ordered_extent,
1036					rb_node);
1037			BUG_ON(offset_in_entry(test, offset));
1038		}
1039		node = prev;
1040	}
1041	for (; node; node = rb_prev(node)) {
1042		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1043
1044		/* We treat this entry as if it doesn't exist */
1045		if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
1046			continue;
1047
1048		if (entry_end(test) <= disk_i_size)
 
 
 
 
 
 
 
 
 
 
 
1049			break;
1050		if (test->file_offset >= i_size)
1051			break;
1052
1053		/*
1054		 * We don't update disk_i_size now, so record this undealt
1055		 * i_size. Or we will not know the real i_size.
1056		 */
1057		if (test->outstanding_isize < offset)
1058			test->outstanding_isize = offset;
1059		if (ordered &&
1060		    ordered->outstanding_isize > test->outstanding_isize)
1061			test->outstanding_isize = ordered->outstanding_isize;
1062		goto out;
1063	}
1064	new_i_size = min_t(u64, offset, i_size);
1065
1066	/*
1067	 * Some ordered extents may completed before the current one, and
1068	 * we hold the real i_size in ->outstanding_isize.
1069	 */
1070	if (ordered && ordered->outstanding_isize > new_i_size)
1071		new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
1072	BTRFS_I(inode)->disk_i_size = new_i_size;
1073	ret = 0;
1074out:
1075	/*
1076	 * We need to do this because we can't remove ordered extents until
1077	 * after the i_disk_size has been updated and then the inode has been
1078	 * updated to reflect the change, so we need to tell anybody who finds
1079	 * this ordered extent that we've already done all the real work, we
1080	 * just haven't completed all the other work.
1081	 */
1082	if (ordered)
1083		set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
1084	spin_unlock_irq(&tree->lock);
1085	return ret;
1086}
1087
1088/*
1089 * search the ordered extents for one corresponding to 'offset' and
1090 * try to find a checksum.  This is used because we allow pages to
1091 * be reclaimed before their checksum is actually put into the btree
 
 
1092 */
1093int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
1094			   u32 *sum, int len)
1095{
1096	struct btrfs_ordered_sum *ordered_sum;
1097	struct btrfs_ordered_extent *ordered;
1098	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1099	unsigned long num_sectors;
1100	unsigned long i;
1101	u32 sectorsize = btrfs_inode_sectorsize(inode);
1102	int index = 0;
1103
1104	ordered = btrfs_lookup_ordered_extent(inode, offset);
 
 
 
1105	if (!ordered)
1106		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
1108	spin_lock_irq(&tree->lock);
1109	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
1110		if (disk_bytenr >= ordered_sum->bytenr &&
1111		    disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1112			i = (disk_bytenr - ordered_sum->bytenr) >>
1113			    inode->i_sb->s_blocksize_bits;
1114			num_sectors = ordered_sum->len >>
1115				      inode->i_sb->s_blocksize_bits;
1116			num_sectors = min_t(int, len - index, num_sectors - i);
1117			memcpy(sum + index, ordered_sum->sums + i,
1118			       num_sectors);
1119
1120			index += (int)num_sectors;
1121			if (index == len)
1122				goto out;
1123			disk_bytenr += num_sectors * sectorsize;
1124		}
1125	}
1126out:
1127	spin_unlock_irq(&tree->lock);
1128	btrfs_put_ordered_extent(ordered);
1129	return index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1130}
1131
1132int __init ordered_data_init(void)
1133{
1134	btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1135				     sizeof(struct btrfs_ordered_extent), 0,
1136				     SLAB_MEM_SPREAD,
1137				     NULL);
1138	if (!btrfs_ordered_extent_cache)
1139		return -ENOMEM;
1140
1141	return 0;
1142}
1143
1144void __cold ordered_data_exit(void)
1145{
1146	kmem_cache_destroy(btrfs_ordered_extent_cache);
1147}